File size: 17,518 Bytes
eae2a1f 2e7e702 2fd53f3 f0f0438 eae2a1f 2fd53f3 d5f27e5 eae2a1f 2e7e702 d5f27e5 2e7e702 d5f27e5 eae2a1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
---
license: llama2
base_model:
- unsloth/llama-2-13b
- layoric/llama-2-13b-code-alpaca
- vanillaOVO/WizardMath-13B-V1.0
library_name: transformers
tags:
- merge
---
---
license: llama2
base_model:
- unsloth/llama-2-13b
- layoric/llama-2-13b-code-alpaca
- vanillaOVO/WizardMath-13B-V1.0
tags:
- merge
library_name: transformers
---
# AIM Paper Checkpoints Uploaded For Replication
This repository includes one of the checkpoints used in the paper "Activation-Informed Merging of Large Language Models" at [Hugging Face](https://huggingface.co/papers/2502.02421). Specifics of this model are as follows:
- **Merging Method:** task_arithmetic
- **Models Used In Merging**
- ***Base Model:*** unsloth/llama-2-13b
- ***Code:*** layoric/llama-2-13b-code-alpaca
- ***Math:*** vanillaOVO/WizardMath-13B-V1.0
- **AIM:** True
Benchmark results and paper details can be found at the official [GitHub](https://github.com/ahnobari/ActivationInformedMerging.git).
# File information
The repository contains the following file information:
Filename: tokenizer_config.json
Content: {
"add_bos_token": true,
"add_eos_token": false,
"add_prefix_space": null,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"bos_token": "<s>",
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"extra_special_tokens": {},
"legacy": false,
"model_max_length": 1000000000000000019884624838656,
"pad_token": "<unk>",
"padding_side": "left",
"sp_model_kwargs": {},
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}
Filename: special_tokens_map.json
Content: {
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}
Filename: config.json
Content: {
"_name_or_path": "./FinalSet/AWR_04_task_arithmetic_Math-Code",
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 5120,
"initializer_range": 0.02,
"intermediate_size": 13824,
"max_position_embeddings": 4096,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 40,
"num_hidden_layers": 40,
"num_key_value_heads": 40,
"pad_token_id": 0,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 10000.0,
"tie_word_embeddings": false,
"torch_dtype": "float16",
"transformers_version": "4.48.0",
"unsloth_version": "2024.9",
"use_cache": true,
"vocab_size": 32000
}
Filename: tokenizer.json
Content: "Content of the file is larger than 50 KB, too long to display."
Filename: generation_config.json
Content: {
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"pad_token_id": 0,
"transformers_version": "4.48.0"
}
Filename: tokenizer.model
Content: "Content of the file is larger than 50 KB, too long to display."
Filename: model.safetensors.index.json
Content: {
"metadata": {
"total_size": 26031728640
},
"weight_map": {
"lm_head.weight": "model-00006-of-00006.safetensors",
"model.embed_tokens.weight": "model-00001-of-00006.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
"model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
"model.layers.20.input_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
"model.layers.21.input_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
"model.layers.25.input_layernorm.weight": "model-00004-of |