the-Lin commited on
Commit
31024d9
·
verified ·
1 Parent(s): 2716787

Delete llava-med4_CXR

Browse files
llava-med4_CXR/config.json DELETED
@@ -1,193 +0,0 @@
1
- {
2
- "_name_or_path": "/vast/users/xiaodan/haokunlin/Continual_LLaVA/llava/output/llava-v1.5-7b",
3
- "architectures": [
4
- "LlavaLlamaForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "bos_token_id": 1,
9
- "dataset_type": "llava-med",
10
- "dataset_type_map": {
11
- "dataset": {
12
- "GQA": [
13
- 24,
14
- 32
15
- ],
16
- "Grounding": [
17
- 40,
18
- 48
19
- ],
20
- "ImageNet": [
21
- 16,
22
- 24
23
- ],
24
- "OCRVQA": [
25
- 56,
26
- 64
27
- ],
28
- "ScienceQA": [
29
- 0,
30
- 8
31
- ],
32
- "TextVQA": [
33
- 8,
34
- 16
35
- ],
36
- "VQAv2": [
37
- 48,
38
- 56
39
- ],
40
- "VizWiz": [
41
- 32,
42
- 40
43
- ]
44
- },
45
- "domain": {
46
- "chartqa": [
47
- 0,
48
- 8
49
- ],
50
- "docvqa": [
51
- 8,
52
- 16
53
- ],
54
- "iconqa": [
55
- 16,
56
- 24
57
- ],
58
- "medicalqa": [
59
- 24,
60
- 32
61
- ]
62
- },
63
- "llava-med": {
64
- "CT": [
65
- 8,
66
- 16
67
- ],
68
- "CXR": [
69
- 0,
70
- 8
71
- ],
72
- "MRI": [
73
- 16,
74
- 24
75
- ]
76
- },
77
- "newdomain": {
78
- "GeoChat_Instruct": [
79
- 0,
80
- 8
81
- ],
82
- "agri": [
83
- 40,
84
- 48
85
- ],
86
- "art": [
87
- 24,
88
- 32
89
- ],
90
- "astro": [
91
- 32,
92
- 40
93
- ],
94
- "atom": [
95
- 16,
96
- 24
97
- ],
98
- "chem": [
99
- 48,
100
- 56
101
- ],
102
- "climate": [
103
- 56,
104
- 64
105
- ],
106
- "llava_med": [
107
- 8,
108
- 16
109
- ]
110
- },
111
- "vqa_rad": {
112
- "abd": [
113
- 0,
114
- 8
115
- ],
116
- "chest": [
117
- 8,
118
- 16
119
- ],
120
- "head": [
121
- 16,
122
- 24
123
- ]
124
- }
125
- },
126
- "disable_task_id": false,
127
- "eos_token_id": 2,
128
- "freeze_mm_mlp_adapter": false,
129
- "freeze_mm_vision_resampler": false,
130
- "groups": 4,
131
- "hidden_act": "silu",
132
- "hidden_size": 4096,
133
- "image_aspect_ratio": "pad",
134
- "initializer_range": 0.02,
135
- "intermediate_size": 11008,
136
- "low_rank": 8,
137
- "max_length": 4096,
138
- "max_position_embeddings": 4096,
139
- "mm_hidden_size": 1024,
140
- "mm_patch_merge_type": "flat",
141
- "mm_projector_lr": null,
142
- "mm_projector_type": "mlp2x_gelu",
143
- "mm_resampler_type": null,
144
- "mm_use_im_patch_token": false,
145
- "mm_use_im_start_end": false,
146
- "mm_vision_select_feature": "patch",
147
- "mm_vision_select_layer": -2,
148
- "mm_vision_tower": "/vast/users/xiaodan/haokunlin/Continual_LLaVA/llava/output/clip-vit-large-patch14-336",
149
- "model_name": "/vast/users/xiaodan/haokunlin/Continual_LLaVA/llava/output/llava-v1.5-7b",
150
- "model_type": "llava_llama",
151
- "num_attention_heads": 32,
152
- "num_hidden_layers": 32,
153
- "num_key_value_heads": 32,
154
- "pad_token_id": 0,
155
- "pool_size": 24,
156
- "pool_train_keys": true,
157
- "pool_train_weights": true,
158
- "preprare_retreival_version": "firstq",
159
- "pretraining_tp": 1,
160
- "random_dropout": null,
161
- "retriever_state_dict": "/vast/users/xiaodan/haokunlin/Continual_LLaVA/llava/output/prompt-key/merged_prompt_key4.pth",
162
- "rms_norm_eps": 1e-05,
163
- "rope_scaling": null,
164
- "rope_theta": 10000.0,
165
- "similarity_type": "cosine",
166
- "task": "CXR",
167
- "task_pool_index_range": {
168
- "CT": [
169
- 8,
170
- 16
171
- ],
172
- "CXR": [
173
- 0,
174
- 8
175
- ],
176
- "MRI": [
177
- 16,
178
- 24
179
- ]
180
- },
181
- "tie_word_embeddings": false,
182
- "tokenizer_model_max_length": 2048,
183
- "tokenizer_padding_side": "right",
184
- "torch_dtype": "bfloat16",
185
- "transformers_version": "4.37.2",
186
- "tune_mm_mlp_adapter": false,
187
- "tune_mm_vision_resampler": false,
188
- "unfreeze_mm_vision_tower": false,
189
- "use_cache": true,
190
- "use_mm_proj": true,
191
- "vocab_size": 32000,
192
- "weight_topk": 4
193
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava-med4_CXR/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "bos_token_id": 1,
3
- "eos_token_id": 2,
4
- "max_length": 4096,
5
- "pad_token_id": 0,
6
- "transformers_version": "4.37.2"
7
- }
 
 
 
 
 
 
 
 
llava-med4_CXR/model-00001-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0249d255cd0d67de360b4a1f578904b0baeea9c9b326a6818c2248f856c35d0
3
- size 4938985352
 
 
 
 
llava-med4_CXR/model-00002-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:216448014a22b547b66308b4d8b5a1e21ccd2e5aa1a4500a204d645df9c3eddd
3
- size 4947390880
 
 
 
 
llava-med4_CXR/model-00003-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d9dd36ed8d8c21571fecda76855c84385cd328b0b12e3933c67e750843a80f1
3
- size 4998097112
 
 
 
 
llava-med4_CXR/model-00004-of-00004.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:96f92911aac2b397e2d5ed88bd0f3d8e91b12f8b5a83de63d2edcebed69931c8
3
- size 268469384
 
 
 
 
llava-med4_CXR/model.safetensors.index.json DELETED
@@ -1,916 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 15152818688
4
- },
5
- "weight_map": {
6
- "lm_head.weight": "model-00003-of-00004.safetensors",
7
- "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
- "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
- "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
- "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
- "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
- "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
- "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
14
- "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
15
- "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
16
- "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
17
- "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
18
- "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
19
- "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
20
- "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
21
- "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
22
- "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
23
- "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
24
- "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
25
- "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00001-of-00004.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
28
- "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
29
- "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
31
- "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
32
- "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
33
- "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
34
- "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
35
- "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
36
- "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
37
- "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
38
- "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
39
- "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
40
- "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
41
- "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
43
- "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
44
- "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
- "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
- "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
- "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
- "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
- "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
50
- "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
51
- "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
52
- "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
53
- "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
54
- "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
- "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
56
- "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
57
- "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
58
- "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
59
- "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
60
- "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
61
- "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
62
- "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
63
- "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
64
- "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
65
- "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
66
- "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
67
- "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
68
- "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
69
- "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
70
- "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
71
- "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
- "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
73
- "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
74
- "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
75
- "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
76
- "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
77
- "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
78
- "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
- "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
- "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
- "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
- "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
- "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
- "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
- "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
86
- "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
87
- "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
88
- "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
89
- "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
90
- "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
91
- "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
92
- "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
93
- "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
94
- "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
95
- "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
96
- "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
97
- "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
98
- "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
99
- "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
100
- "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
101
- "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
102
- "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
103
- "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
104
- "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
- "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
- "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
- "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
108
- "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
109
- "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
- "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
- "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
112
- "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
- "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
- "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
- "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
- "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
117
- "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
118
- "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
119
- "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
120
- "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
121
- "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
122
- "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
123
- "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
124
- "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
- "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
126
- "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
127
- "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
- "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
129
- "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
130
- "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
- "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
132
- "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
133
- "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
134
- "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
135
- "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
136
- "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
137
- "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
138
- "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
139
- "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
140
- "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
141
- "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
142
- "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
148
- "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
149
- "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
150
- "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
151
- "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
152
- "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
153
- "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
154
- "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
155
- "model.layers.23.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
156
- "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
161
- "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
162
- "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
163
- "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
164
- "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
165
- "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
- "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
167
- "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
168
- "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
169
- "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
170
- "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
171
- "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
172
- "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
173
- "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
174
- "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
175
- "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
176
- "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
177
- "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
178
- "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
179
- "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
180
- "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
181
- "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
182
- "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
183
- "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
184
- "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
185
- "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
186
- "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
- "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
- "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
- "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
- "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
- "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
- "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
- "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
194
- "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
195
- "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
196
- "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
197
- "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
198
- "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
199
- "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
200
- "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
201
- "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
202
- "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
203
- "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
204
- "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
205
- "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
206
- "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
207
- "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
208
- "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
209
- "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
210
- "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
211
- "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
212
- "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
213
- "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
214
- "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
215
- "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
216
- "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
217
- "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
218
- "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
219
- "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
220
- "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
221
- "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
222
- "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
223
- "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
224
- "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
- "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
- "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
- "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
- "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
- "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
230
- "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
231
- "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
232
- "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
- "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
234
- "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
235
- "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
- "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
- "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
238
- "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
- "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
240
- "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
241
- "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
242
- "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
243
- "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
244
- "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
245
- "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
246
- "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
247
- "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
248
- "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
249
- "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
250
- "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
251
- "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
252
- "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
253
- "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
254
- "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
255
- "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
256
- "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
257
- "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
258
- "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
259
- "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
260
- "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
- "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
- "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
- "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
- "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
- "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
266
- "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
267
- "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
268
- "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
269
- "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
270
- "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
271
- "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
272
- "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
273
- "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
274
- "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
275
- "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
- "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
- "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
- "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
279
- "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
280
- "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
- "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
282
- "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
283
- "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
- "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
- "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
286
- "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
287
- "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
288
- "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
289
- "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
290
- "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
291
- "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
292
- "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
293
- "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
294
- "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
295
- "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
- "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
297
- "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
298
- "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
299
- "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
300
- "model.norm.weight": "model-00003-of-00004.safetensors",
301
- "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
302
- "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
303
- "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
304
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
305
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
306
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
307
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
308
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
309
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
310
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
311
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
312
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
313
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
314
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
315
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
316
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
317
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
318
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
319
- "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
320
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
321
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
322
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
323
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
324
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
325
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
326
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
327
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
328
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
329
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
330
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
331
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
332
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
333
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
334
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
335
- "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
336
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
337
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
338
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
339
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
340
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
341
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
342
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
343
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
344
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
345
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
346
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
347
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
348
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
349
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
350
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
351
- "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
352
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
353
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
354
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
355
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
356
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
357
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
358
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
359
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
360
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
361
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
362
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
363
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
364
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
365
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
366
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
367
- "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
368
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
369
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
370
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
371
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
372
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
373
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
374
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
375
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
376
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
377
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
378
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
379
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
380
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
381
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
382
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
383
- "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
384
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
385
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
386
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
387
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
388
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
389
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
390
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
391
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
392
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
393
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
394
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
395
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
396
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
397
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
398
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
399
- "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
400
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
401
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
402
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
403
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
404
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
405
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
406
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
407
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
408
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
409
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
410
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
411
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
412
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
413
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
414
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
415
- "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
416
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
417
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
418
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
419
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
420
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
421
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
422
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
423
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
424
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
425
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
426
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
427
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
428
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
429
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
430
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
431
- "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
432
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
433
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
434
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
435
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
436
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
437
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
438
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
439
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
440
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
441
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
442
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
443
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
444
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
445
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
446
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
447
- "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
448
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
449
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
450
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
451
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
452
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
453
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
454
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
455
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
456
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
457
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
458
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
459
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
460
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
461
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
462
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
463
- "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
464
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
465
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
466
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
467
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
468
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
469
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
470
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
471
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
472
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
473
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
474
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
475
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
476
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
477
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
478
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
479
- "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
480
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
481
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
482
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
483
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
484
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
485
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
486
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
487
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
488
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
489
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
490
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
491
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
492
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
493
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
494
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
495
- "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
496
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
497
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
498
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
499
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
500
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
501
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
502
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
503
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
504
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
505
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
506
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
507
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
508
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
509
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
510
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
511
- "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
512
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
513
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
514
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
515
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
516
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
517
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
518
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
519
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
520
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
521
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
522
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
523
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
524
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
525
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
526
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
527
- "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
528
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
529
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
530
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
531
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
532
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
533
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
534
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
535
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
536
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
537
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
538
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
539
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
540
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
541
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
542
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
543
- "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
544
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
545
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
546
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
547
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
548
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
549
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
550
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
551
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
552
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
553
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
554
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
555
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
556
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
557
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
558
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
559
- "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
560
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
561
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
562
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
563
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
564
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
565
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
566
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
567
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
568
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
569
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
570
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
571
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
572
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
573
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
574
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
575
- "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
576
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
577
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
578
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
579
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
580
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
581
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
582
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
583
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
584
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
585
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
586
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
587
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
588
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
589
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
590
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
591
- "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
592
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
593
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
594
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
595
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
596
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
597
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
598
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
599
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
600
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
601
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
602
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
603
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
604
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
605
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
606
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
607
- "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
608
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
609
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
610
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
611
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
612
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
613
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
614
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
615
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
616
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
617
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
618
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
619
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
620
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
621
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
622
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
623
- "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
624
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
625
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
626
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
627
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
628
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
629
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
630
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
631
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
632
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
633
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
634
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
635
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
636
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
637
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
638
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
639
- "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
640
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
641
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
642
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
643
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
644
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
645
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
646
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
647
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
648
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
649
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
650
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
651
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
652
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
653
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
654
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
655
- "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
656
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
657
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
658
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
659
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
660
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
661
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
662
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
663
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
664
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
665
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
666
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
667
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
668
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
669
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
670
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
671
- "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
672
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
673
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
674
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
675
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
676
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
677
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
678
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
679
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
680
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
681
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
682
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
683
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
684
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
685
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
686
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
687
- "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
688
- "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
689
- "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
690
- "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
691
- "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors",
692
- "retriever.bert.embeddings.LayerNorm.bias": "model-00003-of-00004.safetensors",
693
- "retriever.bert.embeddings.LayerNorm.weight": "model-00003-of-00004.safetensors",
694
- "retriever.bert.embeddings.position_embeddings.weight": "model-00003-of-00004.safetensors",
695
- "retriever.bert.embeddings.token_type_embeddings.weight": "model-00003-of-00004.safetensors",
696
- "retriever.bert.embeddings.word_embeddings.weight": "model-00003-of-00004.safetensors",
697
- "retriever.bert.encoder.layer.0.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
698
- "retriever.bert.encoder.layer.0.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
699
- "retriever.bert.encoder.layer.0.attention.output.dense.bias": "model-00003-of-00004.safetensors",
700
- "retriever.bert.encoder.layer.0.attention.output.dense.weight": "model-00003-of-00004.safetensors",
701
- "retriever.bert.encoder.layer.0.attention.self.key.bias": "model-00003-of-00004.safetensors",
702
- "retriever.bert.encoder.layer.0.attention.self.key.weight": "model-00003-of-00004.safetensors",
703
- "retriever.bert.encoder.layer.0.attention.self.query.bias": "model-00003-of-00004.safetensors",
704
- "retriever.bert.encoder.layer.0.attention.self.query.weight": "model-00003-of-00004.safetensors",
705
- "retriever.bert.encoder.layer.0.attention.self.value.bias": "model-00003-of-00004.safetensors",
706
- "retriever.bert.encoder.layer.0.attention.self.value.weight": "model-00003-of-00004.safetensors",
707
- "retriever.bert.encoder.layer.0.intermediate.dense.bias": "model-00003-of-00004.safetensors",
708
- "retriever.bert.encoder.layer.0.intermediate.dense.weight": "model-00003-of-00004.safetensors",
709
- "retriever.bert.encoder.layer.0.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
710
- "retriever.bert.encoder.layer.0.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
711
- "retriever.bert.encoder.layer.0.output.dense.bias": "model-00003-of-00004.safetensors",
712
- "retriever.bert.encoder.layer.0.output.dense.weight": "model-00003-of-00004.safetensors",
713
- "retriever.bert.encoder.layer.1.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
714
- "retriever.bert.encoder.layer.1.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
715
- "retriever.bert.encoder.layer.1.attention.output.dense.bias": "model-00003-of-00004.safetensors",
716
- "retriever.bert.encoder.layer.1.attention.output.dense.weight": "model-00003-of-00004.safetensors",
717
- "retriever.bert.encoder.layer.1.attention.self.key.bias": "model-00003-of-00004.safetensors",
718
- "retriever.bert.encoder.layer.1.attention.self.key.weight": "model-00003-of-00004.safetensors",
719
- "retriever.bert.encoder.layer.1.attention.self.query.bias": "model-00003-of-00004.safetensors",
720
- "retriever.bert.encoder.layer.1.attention.self.query.weight": "model-00003-of-00004.safetensors",
721
- "retriever.bert.encoder.layer.1.attention.self.value.bias": "model-00003-of-00004.safetensors",
722
- "retriever.bert.encoder.layer.1.attention.self.value.weight": "model-00003-of-00004.safetensors",
723
- "retriever.bert.encoder.layer.1.intermediate.dense.bias": "model-00003-of-00004.safetensors",
724
- "retriever.bert.encoder.layer.1.intermediate.dense.weight": "model-00003-of-00004.safetensors",
725
- "retriever.bert.encoder.layer.1.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
726
- "retriever.bert.encoder.layer.1.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
727
- "retriever.bert.encoder.layer.1.output.dense.bias": "model-00003-of-00004.safetensors",
728
- "retriever.bert.encoder.layer.1.output.dense.weight": "model-00003-of-00004.safetensors",
729
- "retriever.bert.encoder.layer.10.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
730
- "retriever.bert.encoder.layer.10.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
731
- "retriever.bert.encoder.layer.10.attention.output.dense.bias": "model-00003-of-00004.safetensors",
732
- "retriever.bert.encoder.layer.10.attention.output.dense.weight": "model-00003-of-00004.safetensors",
733
- "retriever.bert.encoder.layer.10.attention.self.key.bias": "model-00003-of-00004.safetensors",
734
- "retriever.bert.encoder.layer.10.attention.self.key.weight": "model-00003-of-00004.safetensors",
735
- "retriever.bert.encoder.layer.10.attention.self.query.bias": "model-00003-of-00004.safetensors",
736
- "retriever.bert.encoder.layer.10.attention.self.query.weight": "model-00003-of-00004.safetensors",
737
- "retriever.bert.encoder.layer.10.attention.self.value.bias": "model-00003-of-00004.safetensors",
738
- "retriever.bert.encoder.layer.10.attention.self.value.weight": "model-00003-of-00004.safetensors",
739
- "retriever.bert.encoder.layer.10.intermediate.dense.bias": "model-00003-of-00004.safetensors",
740
- "retriever.bert.encoder.layer.10.intermediate.dense.weight": "model-00003-of-00004.safetensors",
741
- "retriever.bert.encoder.layer.10.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
742
- "retriever.bert.encoder.layer.10.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
743
- "retriever.bert.encoder.layer.10.output.dense.bias": "model-00003-of-00004.safetensors",
744
- "retriever.bert.encoder.layer.10.output.dense.weight": "model-00003-of-00004.safetensors",
745
- "retriever.bert.encoder.layer.11.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
746
- "retriever.bert.encoder.layer.11.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
747
- "retriever.bert.encoder.layer.11.attention.output.dense.bias": "model-00003-of-00004.safetensors",
748
- "retriever.bert.encoder.layer.11.attention.output.dense.weight": "model-00003-of-00004.safetensors",
749
- "retriever.bert.encoder.layer.11.attention.self.key.bias": "model-00003-of-00004.safetensors",
750
- "retriever.bert.encoder.layer.11.attention.self.key.weight": "model-00003-of-00004.safetensors",
751
- "retriever.bert.encoder.layer.11.attention.self.query.bias": "model-00003-of-00004.safetensors",
752
- "retriever.bert.encoder.layer.11.attention.self.query.weight": "model-00003-of-00004.safetensors",
753
- "retriever.bert.encoder.layer.11.attention.self.value.bias": "model-00003-of-00004.safetensors",
754
- "retriever.bert.encoder.layer.11.attention.self.value.weight": "model-00003-of-00004.safetensors",
755
- "retriever.bert.encoder.layer.11.intermediate.dense.bias": "model-00003-of-00004.safetensors",
756
- "retriever.bert.encoder.layer.11.intermediate.dense.weight": "model-00003-of-00004.safetensors",
757
- "retriever.bert.encoder.layer.11.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
758
- "retriever.bert.encoder.layer.11.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
759
- "retriever.bert.encoder.layer.11.output.dense.bias": "model-00003-of-00004.safetensors",
760
- "retriever.bert.encoder.layer.11.output.dense.weight": "model-00003-of-00004.safetensors",
761
- "retriever.bert.encoder.layer.2.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
762
- "retriever.bert.encoder.layer.2.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
763
- "retriever.bert.encoder.layer.2.attention.output.dense.bias": "model-00003-of-00004.safetensors",
764
- "retriever.bert.encoder.layer.2.attention.output.dense.weight": "model-00003-of-00004.safetensors",
765
- "retriever.bert.encoder.layer.2.attention.self.key.bias": "model-00003-of-00004.safetensors",
766
- "retriever.bert.encoder.layer.2.attention.self.key.weight": "model-00003-of-00004.safetensors",
767
- "retriever.bert.encoder.layer.2.attention.self.query.bias": "model-00003-of-00004.safetensors",
768
- "retriever.bert.encoder.layer.2.attention.self.query.weight": "model-00003-of-00004.safetensors",
769
- "retriever.bert.encoder.layer.2.attention.self.value.bias": "model-00003-of-00004.safetensors",
770
- "retriever.bert.encoder.layer.2.attention.self.value.weight": "model-00003-of-00004.safetensors",
771
- "retriever.bert.encoder.layer.2.intermediate.dense.bias": "model-00003-of-00004.safetensors",
772
- "retriever.bert.encoder.layer.2.intermediate.dense.weight": "model-00003-of-00004.safetensors",
773
- "retriever.bert.encoder.layer.2.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
774
- "retriever.bert.encoder.layer.2.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
775
- "retriever.bert.encoder.layer.2.output.dense.bias": "model-00003-of-00004.safetensors",
776
- "retriever.bert.encoder.layer.2.output.dense.weight": "model-00003-of-00004.safetensors",
777
- "retriever.bert.encoder.layer.3.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
778
- "retriever.bert.encoder.layer.3.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
779
- "retriever.bert.encoder.layer.3.attention.output.dense.bias": "model-00003-of-00004.safetensors",
780
- "retriever.bert.encoder.layer.3.attention.output.dense.weight": "model-00003-of-00004.safetensors",
781
- "retriever.bert.encoder.layer.3.attention.self.key.bias": "model-00003-of-00004.safetensors",
782
- "retriever.bert.encoder.layer.3.attention.self.key.weight": "model-00003-of-00004.safetensors",
783
- "retriever.bert.encoder.layer.3.attention.self.query.bias": "model-00003-of-00004.safetensors",
784
- "retriever.bert.encoder.layer.3.attention.self.query.weight": "model-00003-of-00004.safetensors",
785
- "retriever.bert.encoder.layer.3.attention.self.value.bias": "model-00003-of-00004.safetensors",
786
- "retriever.bert.encoder.layer.3.attention.self.value.weight": "model-00003-of-00004.safetensors",
787
- "retriever.bert.encoder.layer.3.intermediate.dense.bias": "model-00003-of-00004.safetensors",
788
- "retriever.bert.encoder.layer.3.intermediate.dense.weight": "model-00003-of-00004.safetensors",
789
- "retriever.bert.encoder.layer.3.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
790
- "retriever.bert.encoder.layer.3.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
791
- "retriever.bert.encoder.layer.3.output.dense.bias": "model-00003-of-00004.safetensors",
792
- "retriever.bert.encoder.layer.3.output.dense.weight": "model-00003-of-00004.safetensors",
793
- "retriever.bert.encoder.layer.4.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
794
- "retriever.bert.encoder.layer.4.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
795
- "retriever.bert.encoder.layer.4.attention.output.dense.bias": "model-00003-of-00004.safetensors",
796
- "retriever.bert.encoder.layer.4.attention.output.dense.weight": "model-00003-of-00004.safetensors",
797
- "retriever.bert.encoder.layer.4.attention.self.key.bias": "model-00003-of-00004.safetensors",
798
- "retriever.bert.encoder.layer.4.attention.self.key.weight": "model-00003-of-00004.safetensors",
799
- "retriever.bert.encoder.layer.4.attention.self.query.bias": "model-00003-of-00004.safetensors",
800
- "retriever.bert.encoder.layer.4.attention.self.query.weight": "model-00003-of-00004.safetensors",
801
- "retriever.bert.encoder.layer.4.attention.self.value.bias": "model-00003-of-00004.safetensors",
802
- "retriever.bert.encoder.layer.4.attention.self.value.weight": "model-00003-of-00004.safetensors",
803
- "retriever.bert.encoder.layer.4.intermediate.dense.bias": "model-00003-of-00004.safetensors",
804
- "retriever.bert.encoder.layer.4.intermediate.dense.weight": "model-00003-of-00004.safetensors",
805
- "retriever.bert.encoder.layer.4.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
806
- "retriever.bert.encoder.layer.4.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
807
- "retriever.bert.encoder.layer.4.output.dense.bias": "model-00003-of-00004.safetensors",
808
- "retriever.bert.encoder.layer.4.output.dense.weight": "model-00003-of-00004.safetensors",
809
- "retriever.bert.encoder.layer.5.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
810
- "retriever.bert.encoder.layer.5.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
811
- "retriever.bert.encoder.layer.5.attention.output.dense.bias": "model-00003-of-00004.safetensors",
812
- "retriever.bert.encoder.layer.5.attention.output.dense.weight": "model-00003-of-00004.safetensors",
813
- "retriever.bert.encoder.layer.5.attention.self.key.bias": "model-00003-of-00004.safetensors",
814
- "retriever.bert.encoder.layer.5.attention.self.key.weight": "model-00003-of-00004.safetensors",
815
- "retriever.bert.encoder.layer.5.attention.self.query.bias": "model-00003-of-00004.safetensors",
816
- "retriever.bert.encoder.layer.5.attention.self.query.weight": "model-00003-of-00004.safetensors",
817
- "retriever.bert.encoder.layer.5.attention.self.value.bias": "model-00003-of-00004.safetensors",
818
- "retriever.bert.encoder.layer.5.attention.self.value.weight": "model-00003-of-00004.safetensors",
819
- "retriever.bert.encoder.layer.5.intermediate.dense.bias": "model-00003-of-00004.safetensors",
820
- "retriever.bert.encoder.layer.5.intermediate.dense.weight": "model-00003-of-00004.safetensors",
821
- "retriever.bert.encoder.layer.5.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
822
- "retriever.bert.encoder.layer.5.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
823
- "retriever.bert.encoder.layer.5.output.dense.bias": "model-00003-of-00004.safetensors",
824
- "retriever.bert.encoder.layer.5.output.dense.weight": "model-00003-of-00004.safetensors",
825
- "retriever.bert.encoder.layer.6.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
826
- "retriever.bert.encoder.layer.6.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
827
- "retriever.bert.encoder.layer.6.attention.output.dense.bias": "model-00003-of-00004.safetensors",
828
- "retriever.bert.encoder.layer.6.attention.output.dense.weight": "model-00003-of-00004.safetensors",
829
- "retriever.bert.encoder.layer.6.attention.self.key.bias": "model-00003-of-00004.safetensors",
830
- "retriever.bert.encoder.layer.6.attention.self.key.weight": "model-00003-of-00004.safetensors",
831
- "retriever.bert.encoder.layer.6.attention.self.query.bias": "model-00003-of-00004.safetensors",
832
- "retriever.bert.encoder.layer.6.attention.self.query.weight": "model-00003-of-00004.safetensors",
833
- "retriever.bert.encoder.layer.6.attention.self.value.bias": "model-00003-of-00004.safetensors",
834
- "retriever.bert.encoder.layer.6.attention.self.value.weight": "model-00003-of-00004.safetensors",
835
- "retriever.bert.encoder.layer.6.intermediate.dense.bias": "model-00003-of-00004.safetensors",
836
- "retriever.bert.encoder.layer.6.intermediate.dense.weight": "model-00003-of-00004.safetensors",
837
- "retriever.bert.encoder.layer.6.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
838
- "retriever.bert.encoder.layer.6.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
839
- "retriever.bert.encoder.layer.6.output.dense.bias": "model-00003-of-00004.safetensors",
840
- "retriever.bert.encoder.layer.6.output.dense.weight": "model-00003-of-00004.safetensors",
841
- "retriever.bert.encoder.layer.7.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
842
- "retriever.bert.encoder.layer.7.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
843
- "retriever.bert.encoder.layer.7.attention.output.dense.bias": "model-00003-of-00004.safetensors",
844
- "retriever.bert.encoder.layer.7.attention.output.dense.weight": "model-00003-of-00004.safetensors",
845
- "retriever.bert.encoder.layer.7.attention.self.key.bias": "model-00003-of-00004.safetensors",
846
- "retriever.bert.encoder.layer.7.attention.self.key.weight": "model-00003-of-00004.safetensors",
847
- "retriever.bert.encoder.layer.7.attention.self.query.bias": "model-00003-of-00004.safetensors",
848
- "retriever.bert.encoder.layer.7.attention.self.query.weight": "model-00003-of-00004.safetensors",
849
- "retriever.bert.encoder.layer.7.attention.self.value.bias": "model-00003-of-00004.safetensors",
850
- "retriever.bert.encoder.layer.7.attention.self.value.weight": "model-00003-of-00004.safetensors",
851
- "retriever.bert.encoder.layer.7.intermediate.dense.bias": "model-00003-of-00004.safetensors",
852
- "retriever.bert.encoder.layer.7.intermediate.dense.weight": "model-00003-of-00004.safetensors",
853
- "retriever.bert.encoder.layer.7.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
854
- "retriever.bert.encoder.layer.7.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
855
- "retriever.bert.encoder.layer.7.output.dense.bias": "model-00003-of-00004.safetensors",
856
- "retriever.bert.encoder.layer.7.output.dense.weight": "model-00003-of-00004.safetensors",
857
- "retriever.bert.encoder.layer.8.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
858
- "retriever.bert.encoder.layer.8.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
859
- "retriever.bert.encoder.layer.8.attention.output.dense.bias": "model-00003-of-00004.safetensors",
860
- "retriever.bert.encoder.layer.8.attention.output.dense.weight": "model-00003-of-00004.safetensors",
861
- "retriever.bert.encoder.layer.8.attention.self.key.bias": "model-00003-of-00004.safetensors",
862
- "retriever.bert.encoder.layer.8.attention.self.key.weight": "model-00003-of-00004.safetensors",
863
- "retriever.bert.encoder.layer.8.attention.self.query.bias": "model-00003-of-00004.safetensors",
864
- "retriever.bert.encoder.layer.8.attention.self.query.weight": "model-00003-of-00004.safetensors",
865
- "retriever.bert.encoder.layer.8.attention.self.value.bias": "model-00003-of-00004.safetensors",
866
- "retriever.bert.encoder.layer.8.attention.self.value.weight": "model-00003-of-00004.safetensors",
867
- "retriever.bert.encoder.layer.8.intermediate.dense.bias": "model-00003-of-00004.safetensors",
868
- "retriever.bert.encoder.layer.8.intermediate.dense.weight": "model-00003-of-00004.safetensors",
869
- "retriever.bert.encoder.layer.8.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
870
- "retriever.bert.encoder.layer.8.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
871
- "retriever.bert.encoder.layer.8.output.dense.bias": "model-00003-of-00004.safetensors",
872
- "retriever.bert.encoder.layer.8.output.dense.weight": "model-00003-of-00004.safetensors",
873
- "retriever.bert.encoder.layer.9.attention.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
874
- "retriever.bert.encoder.layer.9.attention.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
875
- "retriever.bert.encoder.layer.9.attention.output.dense.bias": "model-00003-of-00004.safetensors",
876
- "retriever.bert.encoder.layer.9.attention.output.dense.weight": "model-00003-of-00004.safetensors",
877
- "retriever.bert.encoder.layer.9.attention.self.key.bias": "model-00003-of-00004.safetensors",
878
- "retriever.bert.encoder.layer.9.attention.self.key.weight": "model-00003-of-00004.safetensors",
879
- "retriever.bert.encoder.layer.9.attention.self.query.bias": "model-00003-of-00004.safetensors",
880
- "retriever.bert.encoder.layer.9.attention.self.query.weight": "model-00003-of-00004.safetensors",
881
- "retriever.bert.encoder.layer.9.attention.self.value.bias": "model-00003-of-00004.safetensors",
882
- "retriever.bert.encoder.layer.9.attention.self.value.weight": "model-00003-of-00004.safetensors",
883
- "retriever.bert.encoder.layer.9.intermediate.dense.bias": "model-00003-of-00004.safetensors",
884
- "retriever.bert.encoder.layer.9.intermediate.dense.weight": "model-00003-of-00004.safetensors",
885
- "retriever.bert.encoder.layer.9.output.LayerNorm.bias": "model-00003-of-00004.safetensors",
886
- "retriever.bert.encoder.layer.9.output.LayerNorm.weight": "model-00003-of-00004.safetensors",
887
- "retriever.bert.encoder.layer.9.output.dense.bias": "model-00003-of-00004.safetensors",
888
- "retriever.bert.encoder.layer.9.output.dense.weight": "model-00003-of-00004.safetensors",
889
- "retriever.bert.pooler.dense.bias": "model-00003-of-00004.safetensors",
890
- "retriever.bert.pooler.dense.weight": "model-00003-of-00004.safetensors",
891
- "retriever.cross_attn.in_proj_bias": "model-00004-of-00004.safetensors",
892
- "retriever.cross_attn.in_proj_weight": "model-00004-of-00004.safetensors",
893
- "retriever.cross_attn.out_proj.bias": "model-00004-of-00004.safetensors",
894
- "retriever.cross_attn.out_proj.weight": "model-00004-of-00004.safetensors",
895
- "retriever.cross_key_proj.weight": "model-00003-of-00004.safetensors",
896
- "retriever.cross_out_proj.weight": "model-00004-of-00004.safetensors",
897
- "retriever.cross_query_proj.weight": "model-00003-of-00004.safetensors",
898
- "retriever.cross_value_proj.weight": "model-00004-of-00004.safetensors",
899
- "retriever.current_weight": "model-00003-of-00004.safetensors",
900
- "retriever.hyper_U.0.bias": "model-00003-of-00004.safetensors",
901
- "retriever.hyper_U.0.weight": "model-00003-of-00004.safetensors",
902
- "retriever.hyper_U.2.bias": "model-00003-of-00004.safetensors",
903
- "retriever.hyper_U.2.weight": "model-00003-of-00004.safetensors",
904
- "retriever.hyper_V": "model-00003-of-00004.safetensors",
905
- "retriever.keys": "model-00003-of-00004.safetensors",
906
- "retriever.previous_weights.0": "model-00003-of-00004.safetensors",
907
- "retriever.previous_weights.1": "model-00003-of-00004.safetensors",
908
- "retriever.previous_weights.2": "model-00003-of-00004.safetensors",
909
- "retriever.previous_weights.3": "model-00003-of-00004.safetensors",
910
- "retriever.previous_weights.4": "model-00003-of-00004.safetensors",
911
- "retriever.previous_weights.5": "model-00003-of-00004.safetensors",
912
- "retriever.previous_weights.6": "model-00003-of-00004.safetensors",
913
- "retriever.retrieve_lora": "model-00003-of-00004.safetensors",
914
- "retriever.weight_offset_components": "model-00003-of-00004.safetensors"
915
- }
916
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava-med4_CXR/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava-med4_CXR/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
 
 
 
llava-med4_CXR/tokenizer_config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- }
29
- },
30
- "bos_token": "<s>",
31
- "clean_up_tokenization_spaces": false,
32
- "eos_token": "</s>",
33
- "legacy": false,
34
- "model_max_length": 2048,
35
- "pad_token": "<unk>",
36
- "padding_side": "right",
37
- "sp_model_kwargs": {},
38
- "spaces_between_special_tokens": false,
39
- "tokenizer_class": "LlamaTokenizer",
40
- "unk_token": "<unk>",
41
- "use_default_system_prompt": false
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava-med4_CXR/trainer_state.json DELETED
@@ -1,798 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
- "eval_steps": 500,
6
- "global_step": 128,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.01,
13
- "learning_rate": 5e-06,
14
- "loss": 0.9504,
15
- "step": 1
16
- },
17
- {
18
- "epoch": 0.02,
19
- "learning_rate": 1e-05,
20
- "loss": 0.901,
21
- "step": 2
22
- },
23
- {
24
- "epoch": 0.02,
25
- "learning_rate": 1.5000000000000002e-05,
26
- "loss": 0.8897,
27
- "step": 3
28
- },
29
- {
30
- "epoch": 0.03,
31
- "learning_rate": 2e-05,
32
- "loss": 0.8943,
33
- "step": 4
34
- },
35
- {
36
- "epoch": 0.04,
37
- "learning_rate": 1.9996790752964305e-05,
38
- "loss": 0.9301,
39
- "step": 5
40
- },
41
- {
42
- "epoch": 0.05,
43
- "learning_rate": 1.998716507171053e-05,
44
- "loss": 0.8246,
45
- "step": 6
46
- },
47
- {
48
- "epoch": 0.05,
49
- "learning_rate": 1.9971129134476474e-05,
50
- "loss": 0.8817,
51
- "step": 7
52
- },
53
- {
54
- "epoch": 0.06,
55
- "learning_rate": 1.994869323391895e-05,
56
- "loss": 0.8112,
57
- "step": 8
58
- },
59
- {
60
- "epoch": 0.07,
61
- "learning_rate": 1.991987177050743e-05,
62
- "loss": 0.8144,
63
- "step": 9
64
- },
65
- {
66
- "epoch": 0.08,
67
- "learning_rate": 1.9884683243281117e-05,
68
- "loss": 0.8268,
69
- "step": 10
70
- },
71
- {
72
- "epoch": 0.09,
73
- "learning_rate": 1.9843150237975343e-05,
74
- "loss": 0.7232,
75
- "step": 11
76
- },
77
- {
78
- "epoch": 0.09,
79
- "learning_rate": 1.9795299412524948e-05,
80
- "loss": 0.7665,
81
- "step": 12
82
- },
83
- {
84
- "epoch": 0.1,
85
- "learning_rate": 1.9741161479953872e-05,
86
- "loss": 0.7476,
87
- "step": 13
88
- },
89
- {
90
- "epoch": 0.11,
91
- "learning_rate": 1.9680771188662044e-05,
92
- "loss": 0.8077,
93
- "step": 14
94
- },
95
- {
96
- "epoch": 0.12,
97
- "learning_rate": 1.9614167300122126e-05,
98
- "loss": 0.7163,
99
- "step": 15
100
- },
101
- {
102
- "epoch": 0.12,
103
- "learning_rate": 1.954139256400049e-05,
104
- "loss": 0.7014,
105
- "step": 16
106
- },
107
- {
108
- "epoch": 0.13,
109
- "learning_rate": 1.9462493690718373e-05,
110
- "loss": 0.7475,
111
- "step": 17
112
- },
113
- {
114
- "epoch": 0.14,
115
- "learning_rate": 1.9377521321470806e-05,
116
- "loss": 0.7403,
117
- "step": 18
118
- },
119
- {
120
- "epoch": 0.15,
121
- "learning_rate": 1.9286529995722624e-05,
122
- "loss": 0.703,
123
- "step": 19
124
- },
125
- {
126
- "epoch": 0.16,
127
- "learning_rate": 1.918957811620231e-05,
128
- "loss": 0.7194,
129
- "step": 20
130
- },
131
- {
132
- "epoch": 0.16,
133
- "learning_rate": 1.908672791141625e-05,
134
- "loss": 0.7485,
135
- "step": 21
136
- },
137
- {
138
- "epoch": 0.17,
139
- "learning_rate": 1.897804539570742e-05,
140
- "loss": 0.7832,
141
- "step": 22
142
- },
143
- {
144
- "epoch": 0.18,
145
- "learning_rate": 1.8863600326884085e-05,
146
- "loss": 0.7631,
147
- "step": 23
148
- },
149
- {
150
- "epoch": 0.19,
151
- "learning_rate": 1.8743466161445823e-05,
152
- "loss": 0.6299,
153
- "step": 24
154
- },
155
- {
156
- "epoch": 0.2,
157
- "learning_rate": 1.8617720007435497e-05,
158
- "loss": 0.7198,
159
- "step": 25
160
- },
161
- {
162
- "epoch": 0.2,
163
- "learning_rate": 1.848644257494751e-05,
164
- "loss": 0.7476,
165
- "step": 26
166
- },
167
- {
168
- "epoch": 0.21,
169
- "learning_rate": 1.8349718124324075e-05,
170
- "loss": 0.7193,
171
- "step": 27
172
- },
173
- {
174
- "epoch": 0.22,
175
- "learning_rate": 1.8207634412072765e-05,
176
- "loss": 0.6922,
177
- "step": 28
178
- },
179
- {
180
- "epoch": 0.23,
181
- "learning_rate": 1.8060282634540053e-05,
182
- "loss": 0.6785,
183
- "step": 29
184
- },
185
- {
186
- "epoch": 0.23,
187
- "learning_rate": 1.7907757369376984e-05,
188
- "loss": 0.7018,
189
- "step": 30
190
- },
191
- {
192
- "epoch": 0.24,
193
- "learning_rate": 1.775015651483459e-05,
194
- "loss": 0.7281,
195
- "step": 31
196
- },
197
- {
198
- "epoch": 0.25,
199
- "learning_rate": 1.758758122692791e-05,
200
- "loss": 0.7861,
201
- "step": 32
202
- },
203
- {
204
- "epoch": 0.26,
205
- "learning_rate": 1.742013585450911e-05,
206
- "loss": 0.7506,
207
- "step": 33
208
- },
209
- {
210
- "epoch": 0.27,
211
- "learning_rate": 1.72479278722912e-05,
212
- "loss": 0.7214,
213
- "step": 34
214
- },
215
- {
216
- "epoch": 0.27,
217
- "learning_rate": 1.7071067811865477e-05,
218
- "loss": 0.6632,
219
- "step": 35
220
- },
221
- {
222
- "epoch": 0.28,
223
- "learning_rate": 1.688966919075687e-05,
224
- "loss": 0.7026,
225
- "step": 36
226
- },
227
- {
228
- "epoch": 0.29,
229
- "learning_rate": 1.6703848439562787e-05,
230
- "loss": 0.6651,
231
- "step": 37
232
- },
233
- {
234
- "epoch": 0.3,
235
- "learning_rate": 1.6513724827222225e-05,
236
- "loss": 0.6965,
237
- "step": 38
238
- },
239
- {
240
- "epoch": 0.3,
241
- "learning_rate": 1.631942038446304e-05,
242
- "loss": 0.7635,
243
- "step": 39
244
- },
245
- {
246
- "epoch": 0.31,
247
- "learning_rate": 1.612105982547663e-05,
248
- "loss": 0.7031,
249
- "step": 40
250
- },
251
- {
252
- "epoch": 0.32,
253
- "learning_rate": 1.5918770467870174e-05,
254
- "loss": 0.7306,
255
- "step": 41
256
- },
257
- {
258
- "epoch": 0.33,
259
- "learning_rate": 1.5712682150947926e-05,
260
- "loss": 0.7492,
261
- "step": 42
262
- },
263
- {
264
- "epoch": 0.34,
265
- "learning_rate": 1.5502927152373913e-05,
266
- "loss": 0.6562,
267
- "step": 43
268
- },
269
- {
270
- "epoch": 0.34,
271
- "learning_rate": 1.5289640103269626e-05,
272
- "loss": 0.7852,
273
- "step": 44
274
- },
275
- {
276
- "epoch": 0.35,
277
- "learning_rate": 1.5072957901801075e-05,
278
- "loss": 0.6852,
279
- "step": 45
280
- },
281
- {
282
- "epoch": 0.36,
283
- "learning_rate": 1.4853019625310813e-05,
284
- "loss": 1.1711,
285
- "step": 46
286
- },
287
- {
288
- "epoch": 0.37,
289
- "learning_rate": 1.4629966441051208e-05,
290
- "loss": 0.6877,
291
- "step": 47
292
- },
293
- {
294
- "epoch": 0.38,
295
- "learning_rate": 1.4403941515576344e-05,
296
- "loss": 0.7231,
297
- "step": 48
298
- },
299
- {
300
- "epoch": 0.38,
301
- "learning_rate": 1.4175089922850633e-05,
302
- "loss": 0.6413,
303
- "step": 49
304
- },
305
- {
306
- "epoch": 0.39,
307
- "learning_rate": 1.3943558551133186e-05,
308
- "loss": 0.6416,
309
- "step": 50
310
- },
311
- {
312
- "epoch": 0.4,
313
- "learning_rate": 1.370949600869768e-05,
314
- "loss": 0.689,
315
- "step": 51
316
- },
317
- {
318
- "epoch": 0.41,
319
- "learning_rate": 1.3473052528448203e-05,
320
- "loss": 0.6143,
321
- "step": 52
322
- },
323
- {
324
- "epoch": 0.41,
325
- "learning_rate": 1.3234379871492381e-05,
326
- "loss": 0.7214,
327
- "step": 53
328
- },
329
- {
330
- "epoch": 0.42,
331
- "learning_rate": 1.2993631229733584e-05,
332
- "loss": 0.6634,
333
- "step": 54
334
- },
335
- {
336
- "epoch": 0.43,
337
- "learning_rate": 1.2750961127544782e-05,
338
- "loss": 0.7104,
339
- "step": 55
340
- },
341
- {
342
- "epoch": 0.44,
343
- "learning_rate": 1.2506525322587207e-05,
344
- "loss": 0.6859,
345
- "step": 56
346
- },
347
- {
348
- "epoch": 0.45,
349
- "learning_rate": 1.226048070583735e-05,
350
- "loss": 0.7042,
351
- "step": 57
352
- },
353
- {
354
- "epoch": 0.45,
355
- "learning_rate": 1.2012985200886602e-05,
356
- "loss": 0.6434,
357
- "step": 58
358
- },
359
- {
360
- "epoch": 0.46,
361
- "learning_rate": 1.1764197662578087e-05,
362
- "loss": 0.6865,
363
- "step": 59
364
- },
365
- {
366
- "epoch": 0.47,
367
- "learning_rate": 1.1514277775045768e-05,
368
- "loss": 0.6509,
369
- "step": 60
370
- },
371
- {
372
- "epoch": 0.48,
373
- "learning_rate": 1.1263385949221294e-05,
374
- "loss": 0.6981,
375
- "step": 61
376
- },
377
- {
378
- "epoch": 0.48,
379
- "learning_rate": 1.1011683219874324e-05,
380
- "loss": 0.6415,
381
- "step": 62
382
- },
383
- {
384
- "epoch": 0.49,
385
- "learning_rate": 1.0759331142252463e-05,
386
- "loss": 0.7014,
387
- "step": 63
388
- },
389
- {
390
- "epoch": 0.5,
391
- "learning_rate": 1.0506491688387128e-05,
392
- "loss": 0.6632,
393
- "step": 64
394
- },
395
- {
396
- "epoch": 0.51,
397
- "learning_rate": 1.025332714313188e-05,
398
- "loss": 0.6853,
399
- "step": 65
400
- },
401
- {
402
- "epoch": 0.52,
403
- "learning_rate": 1e-05,
404
- "loss": 0.6738,
405
- "step": 66
406
- },
407
- {
408
- "epoch": 0.52,
409
- "learning_rate": 9.746672856868124e-06,
410
- "loss": 0.7046,
411
- "step": 67
412
- },
413
- {
414
- "epoch": 0.53,
415
- "learning_rate": 9.493508311612874e-06,
416
- "loss": 0.7185,
417
- "step": 68
418
- },
419
- {
420
- "epoch": 0.54,
421
- "learning_rate": 9.24066885774754e-06,
422
- "loss": 0.662,
423
- "step": 69
424
- },
425
- {
426
- "epoch": 0.55,
427
- "learning_rate": 8.98831678012568e-06,
428
- "loss": 0.6882,
429
- "step": 70
430
- },
431
- {
432
- "epoch": 0.55,
433
- "learning_rate": 8.73661405077871e-06,
434
- "loss": 0.7271,
435
- "step": 71
436
- },
437
- {
438
- "epoch": 0.56,
439
- "learning_rate": 8.485722224954237e-06,
440
- "loss": 0.6583,
441
- "step": 72
442
- },
443
- {
444
- "epoch": 0.57,
445
- "learning_rate": 8.23580233742192e-06,
446
- "loss": 0.6379,
447
- "step": 73
448
- },
449
- {
450
- "epoch": 0.58,
451
- "learning_rate": 7.987014799113398e-06,
452
- "loss": 0.6748,
453
- "step": 74
454
- },
455
- {
456
- "epoch": 0.59,
457
- "learning_rate": 7.739519294162652e-06,
458
- "loss": 0.663,
459
- "step": 75
460
- },
461
- {
462
- "epoch": 0.59,
463
- "learning_rate": 7.493474677412795e-06,
464
- "loss": 0.7036,
465
- "step": 76
466
- },
467
- {
468
- "epoch": 0.6,
469
- "learning_rate": 7.24903887245522e-06,
470
- "loss": 0.6946,
471
- "step": 77
472
- },
473
- {
474
- "epoch": 0.61,
475
- "learning_rate": 7.006368770266421e-06,
476
- "loss": 0.6874,
477
- "step": 78
478
- },
479
- {
480
- "epoch": 0.62,
481
- "learning_rate": 6.7656201285076195e-06,
482
- "loss": 0.6204,
483
- "step": 79
484
- },
485
- {
486
- "epoch": 0.62,
487
- "learning_rate": 6.526947471551799e-06,
488
- "loss": 0.7973,
489
- "step": 80
490
- },
491
- {
492
- "epoch": 0.63,
493
- "learning_rate": 6.290503991302324e-06,
494
- "loss": 0.7534,
495
- "step": 81
496
- },
497
- {
498
- "epoch": 0.64,
499
- "learning_rate": 6.056441448866817e-06,
500
- "loss": 0.7022,
501
- "step": 82
502
- },
503
- {
504
- "epoch": 0.65,
505
- "learning_rate": 5.824910077149372e-06,
506
- "loss": 0.7117,
507
- "step": 83
508
- },
509
- {
510
- "epoch": 0.66,
511
- "learning_rate": 5.5960584844236565e-06,
512
- "loss": 0.6683,
513
- "step": 84
514
- },
515
- {
516
- "epoch": 0.66,
517
- "learning_rate": 5.370033558948793e-06,
518
- "loss": 0.6849,
519
- "step": 85
520
- },
521
- {
522
- "epoch": 0.67,
523
- "learning_rate": 5.146980374689192e-06,
524
- "loss": 0.6372,
525
- "step": 86
526
- },
527
- {
528
- "epoch": 0.68,
529
- "learning_rate": 4.9270420981989295e-06,
530
- "loss": 0.6842,
531
- "step": 87
532
- },
533
- {
534
- "epoch": 0.69,
535
- "learning_rate": 4.710359896730379e-06,
536
- "loss": 0.7774,
537
- "step": 88
538
- },
539
- {
540
- "epoch": 0.7,
541
- "learning_rate": 4.497072847626087e-06,
542
- "loss": 0.6211,
543
- "step": 89
544
- },
545
- {
546
- "epoch": 0.7,
547
- "learning_rate": 4.287317849052075e-06,
548
- "loss": 0.7054,
549
- "step": 90
550
- },
551
- {
552
- "epoch": 0.71,
553
- "learning_rate": 4.081229532129826e-06,
554
- "loss": 0.6393,
555
- "step": 91
556
- },
557
- {
558
- "epoch": 0.72,
559
- "learning_rate": 3.878940174523371e-06,
560
- "loss": 0.6815,
561
- "step": 92
562
- },
563
- {
564
- "epoch": 0.73,
565
- "learning_rate": 3.680579615536961e-06,
566
- "loss": 0.6435,
567
- "step": 93
568
- },
569
- {
570
- "epoch": 0.73,
571
- "learning_rate": 3.48627517277778e-06,
572
- "loss": 0.7027,
573
- "step": 94
574
- },
575
- {
576
- "epoch": 0.74,
577
- "learning_rate": 3.296151560437214e-06,
578
- "loss": 0.6794,
579
- "step": 95
580
- },
581
- {
582
- "epoch": 0.75,
583
- "learning_rate": 3.110330809243134e-06,
584
- "loss": 0.6884,
585
- "step": 96
586
- },
587
- {
588
- "epoch": 0.76,
589
- "learning_rate": 2.9289321881345257e-06,
590
- "loss": 0.6515,
591
- "step": 97
592
- },
593
- {
594
- "epoch": 0.77,
595
- "learning_rate": 2.7520721277088023e-06,
596
- "loss": 0.6724,
597
- "step": 98
598
- },
599
- {
600
- "epoch": 0.77,
601
- "learning_rate": 2.5798641454908945e-06,
602
- "loss": 0.6586,
603
- "step": 99
604
- },
605
- {
606
- "epoch": 0.78,
607
- "learning_rate": 2.4124187730720916e-06,
608
- "loss": 0.6303,
609
- "step": 100
610
- },
611
- {
612
- "epoch": 0.79,
613
- "learning_rate": 2.2498434851654125e-06,
614
- "loss": 0.6983,
615
- "step": 101
616
- },
617
- {
618
- "epoch": 0.8,
619
- "learning_rate": 2.092242630623016e-06,
620
- "loss": 0.7019,
621
- "step": 102
622
- },
623
- {
624
- "epoch": 0.8,
625
- "learning_rate": 1.939717365459952e-06,
626
- "loss": 0.6606,
627
- "step": 103
628
- },
629
- {
630
- "epoch": 0.81,
631
- "learning_rate": 1.7923655879272395e-06,
632
- "loss": 0.6972,
633
- "step": 104
634
- },
635
- {
636
- "epoch": 0.82,
637
- "learning_rate": 1.6502818756759275e-06,
638
- "loss": 0.6886,
639
- "step": 105
640
- },
641
- {
642
- "epoch": 0.83,
643
- "learning_rate": 1.5135574250524898e-06,
644
- "loss": 0.6781,
645
- "step": 106
646
- },
647
- {
648
- "epoch": 0.84,
649
- "learning_rate": 1.3822799925645036e-06,
650
- "loss": 0.6804,
651
- "step": 107
652
- },
653
- {
654
- "epoch": 0.84,
655
- "learning_rate": 1.2565338385541792e-06,
656
- "loss": 0.573,
657
- "step": 108
658
- },
659
- {
660
- "epoch": 0.85,
661
- "learning_rate": 1.1363996731159188e-06,
662
- "loss": 0.6189,
663
- "step": 109
664
- },
665
- {
666
- "epoch": 0.86,
667
- "learning_rate": 1.0219546042925842e-06,
668
- "loss": 0.6076,
669
- "step": 110
670
- },
671
- {
672
- "epoch": 0.87,
673
- "learning_rate": 9.132720885837509e-07,
674
- "loss": 0.6663,
675
- "step": 111
676
- },
677
- {
678
- "epoch": 0.88,
679
- "learning_rate": 8.10421883797694e-07,
680
- "loss": 0.7017,
681
- "step": 112
682
- },
683
- {
684
- "epoch": 0.88,
685
- "learning_rate": 7.13470004277379e-07,
686
- "loss": 0.6649,
687
- "step": 113
688
- },
689
- {
690
- "epoch": 0.89,
691
- "learning_rate": 6.22478678529197e-07,
692
- "loss": 0.6347,
693
- "step": 114
694
- },
695
- {
696
- "epoch": 0.9,
697
- "learning_rate": 5.375063092816313e-07,
698
- "loss": 0.7224,
699
- "step": 115
700
- },
701
- {
702
- "epoch": 0.91,
703
- "learning_rate": 4.5860743599951186e-07,
704
- "loss": 0.6747,
705
- "step": 116
706
- },
707
- {
708
- "epoch": 0.91,
709
- "learning_rate": 3.8583269987787607e-07,
710
- "loss": 0.6342,
711
- "step": 117
712
- },
713
- {
714
- "epoch": 0.92,
715
- "learning_rate": 3.1922881133795827e-07,
716
- "loss": 0.6575,
717
- "step": 118
718
- },
719
- {
720
- "epoch": 0.93,
721
- "learning_rate": 2.588385200461307e-07,
722
- "loss": 0.5832,
723
- "step": 119
724
- },
725
- {
726
- "epoch": 0.94,
727
- "learning_rate": 2.0470058747505516e-07,
728
- "loss": 0.6407,
729
- "step": 120
730
- },
731
- {
732
- "epoch": 0.95,
733
- "learning_rate": 1.5684976202465786e-07,
734
- "loss": 0.6012,
735
- "step": 121
736
- },
737
- {
738
- "epoch": 0.95,
739
- "learning_rate": 1.1531675671888621e-07,
740
- "loss": 0.6925,
741
- "step": 122
742
- },
743
- {
744
- "epoch": 0.96,
745
- "learning_rate": 8.012822949256981e-08,
746
- "loss": 0.715,
747
- "step": 123
748
- },
749
- {
750
- "epoch": 0.97,
751
- "learning_rate": 5.1306766081048456e-08,
752
- "loss": 0.6602,
753
- "step": 124
754
- },
755
- {
756
- "epoch": 0.98,
757
- "learning_rate": 2.8870865523525916e-08,
758
- "loss": 0.5662,
759
- "step": 125
760
- },
761
- {
762
- "epoch": 0.98,
763
- "learning_rate": 1.2834928289472415e-08,
764
- "loss": 0.6619,
765
- "step": 126
766
- },
767
- {
768
- "epoch": 0.99,
769
- "learning_rate": 3.209247035694807e-09,
770
- "loss": 0.6624,
771
- "step": 127
772
- },
773
- {
774
- "epoch": 1.0,
775
- "learning_rate": 0.0,
776
- "loss": 0.7308,
777
- "step": 128
778
- },
779
- {
780
- "epoch": 1.0,
781
- "step": 128,
782
- "total_flos": 7094238343168.0,
783
- "train_loss": 0.7068097596056759,
784
- "train_runtime": 326.9883,
785
- "train_samples_per_second": 12.502,
786
- "train_steps_per_second": 0.391
787
- }
788
- ],
789
- "logging_steps": 1.0,
790
- "max_steps": 128,
791
- "num_input_tokens_seen": 0,
792
- "num_train_epochs": 1,
793
- "save_steps": 50000,
794
- "total_flos": 7094238343168.0,
795
- "train_batch_size": 4,
796
- "trial_name": null,
797
- "trial_params": null
798
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
llava-med4_CXR/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e19b647446e247d6b922d826ec497ef724999b53a25480cc7b4ef7a016bfde2
3
- size 7249