diff --git a/.gitattributes b/.gitattributes index 03abab212099d4341011097f810b8bf338f02a00..d12a79fbb84ccc48f9b0f55cc1de0016de8b8cb3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -89,3 +89,11 @@ Qwen3-32B/ll_bsearch_kl0.005_eap0.985_sha_bw4.29_4-8bit_seed1234/tokenizer.json Qwen3-32B/ll_bsearch_kl0.005_eap0.99_sha_bw5.74_4-8bit_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text Qwen3-32B/ll_bsearch_kl0.005_eap0.99_sha_bw5.74_4-8bit_seed42/tokenizer.json filter=lfs diff=lfs merge=lfs -text Qwen3-32B/ll_bsearch_kl0.005_eap0.985_sha_bw4.26_4-5-6-7-8bit_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json filter=lfs diff=lfs merge=lfs -text +Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..df0d353583539d7603192fb456743fd52816e29f --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949a717376d70bd7c14cb9f0a04ef3e48fa6c9571a0959dbad1d7816052530b1 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..877a1a2d42af3b501c94badd8967f22c77cf8538 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f79c9f35494ee28ac2f307fda7ef1f0c699855fcd9fd88db82f5e2e1d532273e +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8d8ab9b1d182e51266c673ce6bf0c5cf4b2d9368 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553898c0a66462cf9da4b62e3c4b60322a464a5ecd0f4ddaf5c3b653ba632710 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..df0d353583539d7603192fb456743fd52816e29f --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949a717376d70bd7c14cb9f0a04ef3e48fa6c9571a0959dbad1d7816052530b1 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..877a1a2d42af3b501c94badd8967f22c77cf8538 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f79c9f35494ee28ac2f307fda7ef1f0c699855fcd9fd88db82f5e2e1d532273e +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..887190f26439ecff7a6f256b7a8518b505505399 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b9d2c8849bb6be4634cc740b7d5d3cf942b8eccd9b7bb23b7113b21c25a3d1a +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b033573c96253191741d900d6be179900f7429e --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt @@ -0,0 +1,412 @@ +# Model: meta-llama/Llama-3.1-8B-Instruct +# Layer directory: /nfs/scistore19/alistgrp/mhelcig/local/data/search/4_5_6_7_8bit_asym_g128/Llama-3.1-8B-Instruct/6bit/ +# Sensitivity method: shapley +# Estimation method: permutation_separate +# Available bitwidths: [4, 8] +# Bitwidth map: {4: 4.156, 5: 5.156, 6: 6.156, 7: 7.156, 8: 8.156} +# +# Layer groups: 128 groups (fused layers share bitwidth) +# block_0:mlp.gate_proj,mlp.up_proj.block_0:mlp.gate_proj,mlp.up_proj: group 0, 2 layers +# block_0:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 1, 3 layers +# block_10:mlp.gate_proj,mlp.up_proj.block_10:mlp.gate_proj,mlp.up_proj: group 20, 2 layers +# block_10:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 21, 3 layers +# block_11:mlp.gate_proj,mlp.up_proj.block_11:mlp.gate_proj,mlp.up_proj: group 22, 2 layers +# block_11:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 23, 3 layers +# block_12:mlp.gate_proj,mlp.up_proj.block_12:mlp.gate_proj,mlp.up_proj: group 24, 2 layers +# block_12:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 25, 3 layers +# block_13:mlp.gate_proj,mlp.up_proj.block_13:mlp.gate_proj,mlp.up_proj: group 26, 2 layers +# block_13:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 27, 3 layers +# block_14:mlp.gate_proj,mlp.up_proj.block_14:mlp.gate_proj,mlp.up_proj: group 28, 2 layers +# block_14:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 29, 3 layers +# block_15:mlp.gate_proj,mlp.up_proj.block_15:mlp.gate_proj,mlp.up_proj: group 30, 2 layers +# block_15:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 31, 3 layers +# block_16:mlp.gate_proj,mlp.up_proj.block_16:mlp.gate_proj,mlp.up_proj: group 32, 2 layers +# block_16:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 33, 3 layers +# block_17:mlp.gate_proj,mlp.up_proj.block_17:mlp.gate_proj,mlp.up_proj: group 34, 2 layers +# block_17:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 35, 3 layers +# block_18:mlp.gate_proj,mlp.up_proj.block_18:mlp.gate_proj,mlp.up_proj: group 36, 2 layers +# block_18:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 37, 3 layers +# block_19:mlp.gate_proj,mlp.up_proj.block_19:mlp.gate_proj,mlp.up_proj: group 38, 2 layers +# block_19:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 39, 3 layers +# block_1:mlp.gate_proj,mlp.up_proj.block_1:mlp.gate_proj,mlp.up_proj: group 2, 2 layers +# block_1:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 3, 3 layers +# block_20:mlp.gate_proj,mlp.up_proj.block_20:mlp.gate_proj,mlp.up_proj: group 40, 2 layers +# block_20:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 41, 3 layers +# block_21:mlp.gate_proj,mlp.up_proj.block_21:mlp.gate_proj,mlp.up_proj: group 42, 2 layers +# block_21:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 43, 3 layers +# block_22:mlp.gate_proj,mlp.up_proj.block_22:mlp.gate_proj,mlp.up_proj: group 44, 2 layers +# block_22:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 45, 3 layers +# block_23:mlp.gate_proj,mlp.up_proj.block_23:mlp.gate_proj,mlp.up_proj: group 46, 2 layers +# block_23:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 47, 3 layers +# block_24:mlp.gate_proj,mlp.up_proj.block_24:mlp.gate_proj,mlp.up_proj: group 48, 2 layers +# block_24:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 49, 3 layers +# block_25:mlp.gate_proj,mlp.up_proj.block_25:mlp.gate_proj,mlp.up_proj: group 50, 2 layers +# block_25:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 51, 3 layers +# block_26:mlp.gate_proj,mlp.up_proj.block_26:mlp.gate_proj,mlp.up_proj: group 52, 2 layers +# block_26:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 53, 3 layers +# block_27:mlp.gate_proj,mlp.up_proj.block_27:mlp.gate_proj,mlp.up_proj: group 54, 2 layers +# block_27:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 55, 3 layers +# block_28:mlp.gate_proj,mlp.up_proj.block_28:mlp.gate_proj,mlp.up_proj: group 56, 2 layers +# block_28:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 57, 3 layers +# block_29:mlp.gate_proj,mlp.up_proj.block_29:mlp.gate_proj,mlp.up_proj: group 58, 2 layers +# block_29:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 59, 3 layers +# block_2:mlp.gate_proj,mlp.up_proj.block_2:mlp.gate_proj,mlp.up_proj: group 4, 2 layers +# block_2:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 5, 3 layers +# block_30:mlp.gate_proj,mlp.up_proj.block_30:mlp.gate_proj,mlp.up_proj: group 60, 2 layers +# block_30:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 61, 3 layers +# block_31:mlp.gate_proj,mlp.up_proj.block_31:mlp.gate_proj,mlp.up_proj: group 62, 2 layers +# block_31:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 63, 3 layers +# block_3:mlp.gate_proj,mlp.up_proj.block_3:mlp.gate_proj,mlp.up_proj: group 6, 2 layers +# block_3:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 7, 3 layers +# block_4:mlp.gate_proj,mlp.up_proj.block_4:mlp.gate_proj,mlp.up_proj: group 8, 2 layers +# block_4:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 9, 3 layers +# block_5:mlp.gate_proj,mlp.up_proj.block_5:mlp.gate_proj,mlp.up_proj: group 10, 2 layers +# block_5:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 11, 3 layers +# block_6:mlp.gate_proj,mlp.up_proj.block_6:mlp.gate_proj,mlp.up_proj: group 12, 2 layers +# block_6:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 13, 3 layers +# block_7:mlp.gate_proj,mlp.up_proj.block_7:mlp.gate_proj,mlp.up_proj: group 14, 2 layers +# block_7:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 15, 3 layers +# block_8:mlp.gate_proj,mlp.up_proj.block_8:mlp.gate_proj,mlp.up_proj: group 16, 2 layers +# block_8:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 17, 3 layers +# block_9:mlp.gate_proj,mlp.up_proj.block_9:mlp.gate_proj,mlp.up_proj: group 18, 2 layers +# block_9:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 19, 3 layers +# Fused in model.layers.0: +# - self_attn.o_proj (group 64, 1 layers) +# - mlp.down_proj (group 65, 1 layers) +# Fused in model.layers.1: +# - self_attn.o_proj (group 66, 1 layers) +# - mlp.down_proj (group 67, 1 layers) +# Fused in model.layers.10: +# - self_attn.o_proj (group 84, 1 layers) +# - mlp.down_proj (group 85, 1 layers) +# Fused in model.layers.11: +# - self_attn.o_proj (group 86, 1 layers) +# - mlp.down_proj (group 87, 1 layers) +# Fused in model.layers.12: +# - self_attn.o_proj (group 88, 1 layers) +# - mlp.down_proj (group 89, 1 layers) +# Fused in model.layers.13: +# - self_attn.o_proj (group 90, 1 layers) +# - mlp.down_proj (group 91, 1 layers) +# Fused in model.layers.14: +# - self_attn.o_proj (group 92, 1 layers) +# - mlp.down_proj (group 93, 1 layers) +# Fused in model.layers.15: +# - self_attn.o_proj (group 94, 1 layers) +# - mlp.down_proj (group 95, 1 layers) +# Fused in model.layers.16: +# - self_attn.o_proj (group 96, 1 layers) +# - mlp.down_proj (group 97, 1 layers) +# Fused in model.layers.17: +# - self_attn.o_proj (group 98, 1 layers) +# - mlp.down_proj (group 99, 1 layers) +# Fused in model.layers.18: +# - self_attn.o_proj (group 100, 1 layers) +# - mlp.down_proj (group 101, 1 layers) +# Fused in model.layers.19: +# - self_attn.o_proj (group 102, 1 layers) +# - mlp.down_proj (group 103, 1 layers) +# Fused in model.layers.2: +# - self_attn.o_proj (group 68, 1 layers) +# - mlp.down_proj (group 69, 1 layers) +# Fused in model.layers.20: +# - self_attn.o_proj (group 104, 1 layers) +# - mlp.down_proj (group 105, 1 layers) +# Fused in model.layers.21: +# - self_attn.o_proj (group 106, 1 layers) +# - mlp.down_proj (group 107, 1 layers) +# Fused in model.layers.22: +# - self_attn.o_proj (group 108, 1 layers) +# - mlp.down_proj (group 109, 1 layers) +# Fused in model.layers.23: +# - self_attn.o_proj (group 110, 1 layers) +# - mlp.down_proj (group 111, 1 layers) +# Fused in model.layers.24: +# - self_attn.o_proj (group 112, 1 layers) +# - mlp.down_proj (group 113, 1 layers) +# Fused in model.layers.25: +# - self_attn.o_proj (group 114, 1 layers) +# - mlp.down_proj (group 115, 1 layers) +# Fused in model.layers.26: +# - self_attn.o_proj (group 116, 1 layers) +# - mlp.down_proj (group 117, 1 layers) +# Fused in model.layers.27: +# - self_attn.o_proj (group 118, 1 layers) +# - mlp.down_proj (group 119, 1 layers) +# Fused in model.layers.28: +# - self_attn.o_proj (group 120, 1 layers) +# - mlp.down_proj (group 121, 1 layers) +# Fused in model.layers.29: +# - self_attn.o_proj (group 122, 1 layers) +# - mlp.down_proj (group 123, 1 layers) +# Fused in model.layers.3: +# - self_attn.o_proj (group 70, 1 layers) +# - mlp.down_proj (group 71, 1 layers) +# Fused in model.layers.30: +# - self_attn.o_proj (group 124, 1 layers) +# - mlp.down_proj (group 125, 1 layers) +# Fused in model.layers.31: +# - self_attn.o_proj (group 126, 1 layers) +# - mlp.down_proj (group 127, 1 layers) +# Fused in model.layers.4: +# - self_attn.o_proj (group 72, 1 layers) +# - mlp.down_proj (group 73, 1 layers) +# Fused in model.layers.5: +# - self_attn.o_proj (group 74, 1 layers) +# - mlp.down_proj (group 75, 1 layers) +# Fused in model.layers.6: +# - self_attn.o_proj (group 76, 1 layers) +# - mlp.down_proj (group 77, 1 layers) +# Fused in model.layers.7: +# - self_attn.o_proj (group 78, 1 layers) +# - mlp.down_proj (group 79, 1 layers) +# Fused in model.layers.8: +# - self_attn.o_proj (group 80, 1 layers) +# - mlp.down_proj (group 81, 1 layers) +# Fused in model.layers.9: +# - self_attn.o_proj (group 82, 1 layers) +# - mlp.down_proj (group 83, 1 layers) +# +# Mode: binary_search_constraint (measured) +# Constraint max_kl: 0.005 +# Constraint min_eap: 0.985 +# Weights: nll=0.0, kl=0.0, eap=1.0 +# +# Average bitwidth: 7.7089 +# Total params: 6979321856 +# Total bits: 53802786882 +# Final KL: 0.002595 +# Final EAP: 0.985184 +# Final ETL: 0.014816 +# Satisfies constraints: True +# Solver calls: 9 +# Evaluations: 9 +# +# Bitwidth distribution: +# 8-bit: 210 layers (93.8%) +# 4-bit: 14 layers (6.2%) +# +model.layers.0.mlp.gate_proj: 4 +model.layers.0.mlp.up_proj: 4 +model.layers.0.self_attn.k_proj: 8 +model.layers.0.self_attn.q_proj: 8 +model.layers.0.self_attn.v_proj: 8 +model.layers.1.mlp.gate_proj: 8 +model.layers.1.mlp.up_proj: 8 +model.layers.1.self_attn.k_proj: 8 +model.layers.1.self_attn.q_proj: 8 +model.layers.1.self_attn.v_proj: 8 +model.layers.2.mlp.gate_proj: 8 +model.layers.2.mlp.up_proj: 8 +model.layers.2.self_attn.k_proj: 8 +model.layers.2.self_attn.q_proj: 8 +model.layers.2.self_attn.v_proj: 8 +model.layers.3.mlp.gate_proj: 8 +model.layers.3.mlp.up_proj: 8 +model.layers.3.self_attn.k_proj: 8 +model.layers.3.self_attn.q_proj: 8 +model.layers.3.self_attn.v_proj: 8 +model.layers.4.mlp.gate_proj: 8 +model.layers.4.mlp.up_proj: 8 +model.layers.4.self_attn.k_proj: 8 +model.layers.4.self_attn.q_proj: 8 +model.layers.4.self_attn.v_proj: 8 +model.layers.5.mlp.gate_proj: 8 +model.layers.5.mlp.up_proj: 8 +model.layers.5.self_attn.k_proj: 8 +model.layers.5.self_attn.q_proj: 8 +model.layers.5.self_attn.v_proj: 8 +model.layers.6.mlp.gate_proj: 8 +model.layers.6.mlp.up_proj: 8 +model.layers.6.self_attn.k_proj: 8 +model.layers.6.self_attn.q_proj: 8 +model.layers.6.self_attn.v_proj: 8 +model.layers.7.mlp.gate_proj: 8 +model.layers.7.mlp.up_proj: 8 +model.layers.7.self_attn.k_proj: 8 +model.layers.7.self_attn.q_proj: 8 +model.layers.7.self_attn.v_proj: 8 +model.layers.8.mlp.gate_proj: 8 +model.layers.8.mlp.up_proj: 8 +model.layers.8.self_attn.k_proj: 8 +model.layers.8.self_attn.q_proj: 8 +model.layers.8.self_attn.v_proj: 8 +model.layers.9.mlp.gate_proj: 8 +model.layers.9.mlp.up_proj: 8 +model.layers.9.self_attn.k_proj: 8 +model.layers.9.self_attn.q_proj: 8 +model.layers.9.self_attn.v_proj: 8 +model.layers.10.mlp.gate_proj: 8 +model.layers.10.mlp.up_proj: 8 +model.layers.10.self_attn.k_proj: 8 +model.layers.10.self_attn.q_proj: 8 +model.layers.10.self_attn.v_proj: 8 +model.layers.11.mlp.gate_proj: 8 +model.layers.11.mlp.up_proj: 8 +model.layers.11.self_attn.k_proj: 8 +model.layers.11.self_attn.q_proj: 8 +model.layers.11.self_attn.v_proj: 8 +model.layers.12.mlp.gate_proj: 8 +model.layers.12.mlp.up_proj: 8 +model.layers.12.self_attn.k_proj: 8 +model.layers.12.self_attn.q_proj: 8 +model.layers.12.self_attn.v_proj: 8 +model.layers.13.mlp.gate_proj: 8 +model.layers.13.mlp.up_proj: 8 +model.layers.13.self_attn.k_proj: 8 +model.layers.13.self_attn.q_proj: 8 +model.layers.13.self_attn.v_proj: 8 +model.layers.14.mlp.gate_proj: 8 +model.layers.14.mlp.up_proj: 8 +model.layers.14.self_attn.k_proj: 8 +model.layers.14.self_attn.q_proj: 8 +model.layers.14.self_attn.v_proj: 8 +model.layers.15.mlp.gate_proj: 8 +model.layers.15.mlp.up_proj: 8 +model.layers.15.self_attn.k_proj: 8 +model.layers.15.self_attn.q_proj: 8 +model.layers.15.self_attn.v_proj: 8 +model.layers.16.mlp.gate_proj: 8 +model.layers.16.mlp.up_proj: 8 +model.layers.16.self_attn.k_proj: 8 +model.layers.16.self_attn.q_proj: 8 +model.layers.16.self_attn.v_proj: 8 +model.layers.17.mlp.gate_proj: 8 +model.layers.17.mlp.up_proj: 8 +model.layers.17.self_attn.k_proj: 8 +model.layers.17.self_attn.q_proj: 8 +model.layers.17.self_attn.v_proj: 8 +model.layers.18.mlp.gate_proj: 8 +model.layers.18.mlp.up_proj: 8 +model.layers.18.self_attn.k_proj: 8 +model.layers.18.self_attn.q_proj: 8 +model.layers.18.self_attn.v_proj: 8 +model.layers.19.mlp.gate_proj: 8 +model.layers.19.mlp.up_proj: 8 +model.layers.19.self_attn.k_proj: 8 +model.layers.19.self_attn.q_proj: 8 +model.layers.19.self_attn.v_proj: 8 +model.layers.20.mlp.gate_proj: 8 +model.layers.20.mlp.up_proj: 8 +model.layers.20.self_attn.k_proj: 8 +model.layers.20.self_attn.q_proj: 8 +model.layers.20.self_attn.v_proj: 8 +model.layers.21.mlp.gate_proj: 8 +model.layers.21.mlp.up_proj: 8 +model.layers.21.self_attn.k_proj: 8 +model.layers.21.self_attn.q_proj: 8 +model.layers.21.self_attn.v_proj: 8 +model.layers.22.mlp.gate_proj: 8 +model.layers.22.mlp.up_proj: 8 +model.layers.22.self_attn.k_proj: 8 +model.layers.22.self_attn.q_proj: 8 +model.layers.22.self_attn.v_proj: 8 +model.layers.23.mlp.gate_proj: 8 +model.layers.23.mlp.up_proj: 8 +model.layers.23.self_attn.k_proj: 8 +model.layers.23.self_attn.q_proj: 8 +model.layers.23.self_attn.v_proj: 8 +model.layers.24.mlp.gate_proj: 8 +model.layers.24.mlp.up_proj: 8 +model.layers.24.self_attn.k_proj: 8 +model.layers.24.self_attn.q_proj: 8 +model.layers.24.self_attn.v_proj: 8 +model.layers.25.mlp.gate_proj: 8 +model.layers.25.mlp.up_proj: 8 +model.layers.25.self_attn.k_proj: 8 +model.layers.25.self_attn.q_proj: 8 +model.layers.25.self_attn.v_proj: 8 +model.layers.26.mlp.gate_proj: 8 +model.layers.26.mlp.up_proj: 8 +model.layers.26.self_attn.k_proj: 8 +model.layers.26.self_attn.q_proj: 8 +model.layers.26.self_attn.v_proj: 8 +model.layers.27.mlp.gate_proj: 8 +model.layers.27.mlp.up_proj: 8 +model.layers.27.self_attn.k_proj: 8 +model.layers.27.self_attn.q_proj: 8 +model.layers.27.self_attn.v_proj: 8 +model.layers.28.mlp.gate_proj: 8 +model.layers.28.mlp.up_proj: 8 +model.layers.28.self_attn.k_proj: 8 +model.layers.28.self_attn.q_proj: 8 +model.layers.28.self_attn.v_proj: 8 +model.layers.29.mlp.gate_proj: 8 +model.layers.29.mlp.up_proj: 8 +model.layers.29.self_attn.k_proj: 8 +model.layers.29.self_attn.q_proj: 8 +model.layers.29.self_attn.v_proj: 8 +model.layers.30.mlp.gate_proj: 8 +model.layers.30.mlp.up_proj: 8 +model.layers.30.self_attn.k_proj: 8 +model.layers.30.self_attn.q_proj: 8 +model.layers.30.self_attn.v_proj: 8 +model.layers.31.mlp.gate_proj: 8 +model.layers.31.mlp.up_proj: 8 +model.layers.31.self_attn.k_proj: 8 +model.layers.31.self_attn.q_proj: 8 +model.layers.31.self_attn.v_proj: 8 +model.layers.0.self_attn.o_proj: 8 +model.layers.0.mlp.down_proj: 8 +model.layers.1.self_attn.o_proj: 8 +model.layers.1.mlp.down_proj: 8 +model.layers.2.self_attn.o_proj: 8 +model.layers.2.mlp.down_proj: 4 +model.layers.3.self_attn.o_proj: 8 +model.layers.3.mlp.down_proj: 8 +model.layers.4.self_attn.o_proj: 8 +model.layers.4.mlp.down_proj: 8 +model.layers.5.self_attn.o_proj: 8 +model.layers.5.mlp.down_proj: 8 +model.layers.6.self_attn.o_proj: 8 +model.layers.6.mlp.down_proj: 8 +model.layers.7.self_attn.o_proj: 8 +model.layers.7.mlp.down_proj: 4 +model.layers.8.self_attn.o_proj: 8 +model.layers.8.mlp.down_proj: 4 +model.layers.9.self_attn.o_proj: 8 +model.layers.9.mlp.down_proj: 4 +model.layers.10.self_attn.o_proj: 8 +model.layers.10.mlp.down_proj: 4 +model.layers.11.self_attn.o_proj: 8 +model.layers.11.mlp.down_proj: 8 +model.layers.12.self_attn.o_proj: 8 +model.layers.12.mlp.down_proj: 4 +model.layers.13.self_attn.o_proj: 8 +model.layers.13.mlp.down_proj: 8 +model.layers.14.self_attn.o_proj: 8 +model.layers.14.mlp.down_proj: 8 +model.layers.15.self_attn.o_proj: 8 +model.layers.15.mlp.down_proj: 8 +model.layers.16.self_attn.o_proj: 8 +model.layers.16.mlp.down_proj: 8 +model.layers.17.self_attn.o_proj: 8 +model.layers.17.mlp.down_proj: 8 +model.layers.18.self_attn.o_proj: 8 +model.layers.18.mlp.down_proj: 8 +model.layers.19.self_attn.o_proj: 8 +model.layers.19.mlp.down_proj: 8 +model.layers.20.self_attn.o_proj: 8 +model.layers.20.mlp.down_proj: 8 +model.layers.21.self_attn.o_proj: 8 +model.layers.21.mlp.down_proj: 8 +model.layers.22.self_attn.o_proj: 8 +model.layers.22.mlp.down_proj: 8 +model.layers.23.self_attn.o_proj: 8 +model.layers.23.mlp.down_proj: 8 +model.layers.24.self_attn.o_proj: 8 +model.layers.24.mlp.down_proj: 8 +model.layers.25.self_attn.o_proj: 4 +model.layers.25.mlp.down_proj: 4 +model.layers.26.self_attn.o_proj: 8 +model.layers.26.mlp.down_proj: 4 +model.layers.27.self_attn.o_proj: 8 +model.layers.27.mlp.down_proj: 4 +model.layers.28.self_attn.o_proj: 8 +model.layers.28.mlp.down_proj: 4 +model.layers.29.self_attn.o_proj: 8 +model.layers.29.mlp.down_proj: 4 +model.layers.30.self_attn.o_proj: 8 +model.layers.30.mlp.down_proj: 8 +model.layers.31.self_attn.o_proj: 8 +model.layers.31.mlp.down_proj: 8 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/README.md b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e0285486859657f0b8255e67ec1ce47cbbf1d9a --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/README.md @@ -0,0 +1,18 @@ +# Quantized Model Checkpoint + +**Base model:** meta-llama/Llama-3.1-8B-Instruct + +**Average bitwidth:** 8.0118 + +**Sensitivity method:** shapley + +**Constraints:** +- max_kl: 0.005 +- min_eap: 0.99 + +**Metrics:** +- final_kl: 0.001238 +- final_eap: 0.990277 +- final_etl: 0.009723 + +See `quantization_config.txt` for full configuration details. diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/chat_template.jinja b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/chat_template.jinja new file mode 100644 index 0000000000000000000000000000000000000000..33089ace1be88f22a10fe861ad49718d5d886090 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/chat_template.jinja @@ -0,0 +1,109 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 Jul 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/generation_config.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..993459bf55ed73c1390809c2e2a3d7c1c0e0d844 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/generation_config.json @@ -0,0 +1,12 @@ +{ + "bos_token_id": 128000, + "do_sample": true, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "temperature": 0.6, + "top_p": 0.9, + "transformers_version": "4.57.3" +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db71f26566b004203e776ba1248e8052eaebeb47 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d066765903924aec0e915a00987ef9ffe0ee366e52368f44884ab71f3e1298 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e415e91b845d25ab9b9e0e32b2ed876bb2d7f3d2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:779c706f56bce5452da8b5fda679c8bd3125b870678e5544b5debd531844d939 +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..644745d32960e62f170dbe2ef5b830e4d21adafc --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37681ec6ff920f2de78c107de2c44abbf969646f21ada20bde02938b153054e9 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model.safetensors.index.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..5c64f1e87be95160fabc494eebfa0f7e68064af2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model.safetensors.index.json @@ -0,0 +1,299 @@ +{ + "metadata": { + "total_parameters": 8030261248, + "total_size": 16060522496 + }, + "weight_map": { + "lm_head.weight": "model-00004-of-00004.safetensors", + "model.embed_tokens.weight": "model-00001-of-00004.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.norm.weight": "model-00004-of-00004.safetensors" + } +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/quantization_config.txt b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/quantization_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5eb704482c193ea7b67adebc4e20291751b7c69 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/quantization_config.txt @@ -0,0 +1,412 @@ +# Model: meta-llama/Llama-3.1-8B-Instruct +# Layer directory: /nfs/scistore19/alistgrp/mhelcig/local/data/search/4_5_6_7_8bit_asym_g128/Llama-3.1-8B-Instruct/6bit/ +# Sensitivity method: shapley +# Estimation method: permutation_separate +# Available bitwidths: [4, 8] +# Bitwidth map: {4: 4.156, 5: 5.156, 6: 6.156, 7: 7.156, 8: 8.156} +# +# Layer groups: 128 groups (fused layers share bitwidth) +# block_0:mlp.gate_proj,mlp.up_proj.block_0:mlp.gate_proj,mlp.up_proj: group 0, 2 layers +# block_0:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 1, 3 layers +# block_10:mlp.gate_proj,mlp.up_proj.block_10:mlp.gate_proj,mlp.up_proj: group 20, 2 layers +# block_10:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 21, 3 layers +# block_11:mlp.gate_proj,mlp.up_proj.block_11:mlp.gate_proj,mlp.up_proj: group 22, 2 layers +# block_11:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 23, 3 layers +# block_12:mlp.gate_proj,mlp.up_proj.block_12:mlp.gate_proj,mlp.up_proj: group 24, 2 layers +# block_12:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 25, 3 layers +# block_13:mlp.gate_proj,mlp.up_proj.block_13:mlp.gate_proj,mlp.up_proj: group 26, 2 layers +# block_13:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 27, 3 layers +# block_14:mlp.gate_proj,mlp.up_proj.block_14:mlp.gate_proj,mlp.up_proj: group 28, 2 layers +# block_14:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 29, 3 layers +# block_15:mlp.gate_proj,mlp.up_proj.block_15:mlp.gate_proj,mlp.up_proj: group 30, 2 layers +# block_15:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 31, 3 layers +# block_16:mlp.gate_proj,mlp.up_proj.block_16:mlp.gate_proj,mlp.up_proj: group 32, 2 layers +# block_16:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 33, 3 layers +# block_17:mlp.gate_proj,mlp.up_proj.block_17:mlp.gate_proj,mlp.up_proj: group 34, 2 layers +# block_17:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 35, 3 layers +# block_18:mlp.gate_proj,mlp.up_proj.block_18:mlp.gate_proj,mlp.up_proj: group 36, 2 layers +# block_18:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 37, 3 layers +# block_19:mlp.gate_proj,mlp.up_proj.block_19:mlp.gate_proj,mlp.up_proj: group 38, 2 layers +# block_19:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 39, 3 layers +# block_1:mlp.gate_proj,mlp.up_proj.block_1:mlp.gate_proj,mlp.up_proj: group 2, 2 layers +# block_1:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 3, 3 layers +# block_20:mlp.gate_proj,mlp.up_proj.block_20:mlp.gate_proj,mlp.up_proj: group 40, 2 layers +# block_20:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 41, 3 layers +# block_21:mlp.gate_proj,mlp.up_proj.block_21:mlp.gate_proj,mlp.up_proj: group 42, 2 layers +# block_21:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 43, 3 layers +# block_22:mlp.gate_proj,mlp.up_proj.block_22:mlp.gate_proj,mlp.up_proj: group 44, 2 layers +# block_22:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 45, 3 layers +# block_23:mlp.gate_proj,mlp.up_proj.block_23:mlp.gate_proj,mlp.up_proj: group 46, 2 layers +# block_23:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 47, 3 layers +# block_24:mlp.gate_proj,mlp.up_proj.block_24:mlp.gate_proj,mlp.up_proj: group 48, 2 layers +# block_24:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 49, 3 layers +# block_25:mlp.gate_proj,mlp.up_proj.block_25:mlp.gate_proj,mlp.up_proj: group 50, 2 layers +# block_25:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 51, 3 layers +# block_26:mlp.gate_proj,mlp.up_proj.block_26:mlp.gate_proj,mlp.up_proj: group 52, 2 layers +# block_26:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 53, 3 layers +# block_27:mlp.gate_proj,mlp.up_proj.block_27:mlp.gate_proj,mlp.up_proj: group 54, 2 layers +# block_27:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 55, 3 layers +# block_28:mlp.gate_proj,mlp.up_proj.block_28:mlp.gate_proj,mlp.up_proj: group 56, 2 layers +# block_28:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 57, 3 layers +# block_29:mlp.gate_proj,mlp.up_proj.block_29:mlp.gate_proj,mlp.up_proj: group 58, 2 layers +# block_29:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 59, 3 layers +# block_2:mlp.gate_proj,mlp.up_proj.block_2:mlp.gate_proj,mlp.up_proj: group 4, 2 layers +# block_2:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 5, 3 layers +# block_30:mlp.gate_proj,mlp.up_proj.block_30:mlp.gate_proj,mlp.up_proj: group 60, 2 layers +# block_30:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 61, 3 layers +# block_31:mlp.gate_proj,mlp.up_proj.block_31:mlp.gate_proj,mlp.up_proj: group 62, 2 layers +# block_31:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 63, 3 layers +# block_3:mlp.gate_proj,mlp.up_proj.block_3:mlp.gate_proj,mlp.up_proj: group 6, 2 layers +# block_3:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 7, 3 layers +# block_4:mlp.gate_proj,mlp.up_proj.block_4:mlp.gate_proj,mlp.up_proj: group 8, 2 layers +# block_4:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 9, 3 layers +# block_5:mlp.gate_proj,mlp.up_proj.block_5:mlp.gate_proj,mlp.up_proj: group 10, 2 layers +# block_5:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 11, 3 layers +# block_6:mlp.gate_proj,mlp.up_proj.block_6:mlp.gate_proj,mlp.up_proj: group 12, 2 layers +# block_6:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 13, 3 layers +# block_7:mlp.gate_proj,mlp.up_proj.block_7:mlp.gate_proj,mlp.up_proj: group 14, 2 layers +# block_7:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 15, 3 layers +# block_8:mlp.gate_proj,mlp.up_proj.block_8:mlp.gate_proj,mlp.up_proj: group 16, 2 layers +# block_8:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 17, 3 layers +# block_9:mlp.gate_proj,mlp.up_proj.block_9:mlp.gate_proj,mlp.up_proj: group 18, 2 layers +# block_9:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 19, 3 layers +# Fused in model.layers.0: +# - self_attn.o_proj (group 64, 1 layers) +# - mlp.down_proj (group 65, 1 layers) +# Fused in model.layers.1: +# - self_attn.o_proj (group 66, 1 layers) +# - mlp.down_proj (group 67, 1 layers) +# Fused in model.layers.10: +# - self_attn.o_proj (group 84, 1 layers) +# - mlp.down_proj (group 85, 1 layers) +# Fused in model.layers.11: +# - self_attn.o_proj (group 86, 1 layers) +# - mlp.down_proj (group 87, 1 layers) +# Fused in model.layers.12: +# - self_attn.o_proj (group 88, 1 layers) +# - mlp.down_proj (group 89, 1 layers) +# Fused in model.layers.13: +# - self_attn.o_proj (group 90, 1 layers) +# - mlp.down_proj (group 91, 1 layers) +# Fused in model.layers.14: +# - self_attn.o_proj (group 92, 1 layers) +# - mlp.down_proj (group 93, 1 layers) +# Fused in model.layers.15: +# - self_attn.o_proj (group 94, 1 layers) +# - mlp.down_proj (group 95, 1 layers) +# Fused in model.layers.16: +# - self_attn.o_proj (group 96, 1 layers) +# - mlp.down_proj (group 97, 1 layers) +# Fused in model.layers.17: +# - self_attn.o_proj (group 98, 1 layers) +# - mlp.down_proj (group 99, 1 layers) +# Fused in model.layers.18: +# - self_attn.o_proj (group 100, 1 layers) +# - mlp.down_proj (group 101, 1 layers) +# Fused in model.layers.19: +# - self_attn.o_proj (group 102, 1 layers) +# - mlp.down_proj (group 103, 1 layers) +# Fused in model.layers.2: +# - self_attn.o_proj (group 68, 1 layers) +# - mlp.down_proj (group 69, 1 layers) +# Fused in model.layers.20: +# - self_attn.o_proj (group 104, 1 layers) +# - mlp.down_proj (group 105, 1 layers) +# Fused in model.layers.21: +# - self_attn.o_proj (group 106, 1 layers) +# - mlp.down_proj (group 107, 1 layers) +# Fused in model.layers.22: +# - self_attn.o_proj (group 108, 1 layers) +# - mlp.down_proj (group 109, 1 layers) +# Fused in model.layers.23: +# - self_attn.o_proj (group 110, 1 layers) +# - mlp.down_proj (group 111, 1 layers) +# Fused in model.layers.24: +# - self_attn.o_proj (group 112, 1 layers) +# - mlp.down_proj (group 113, 1 layers) +# Fused in model.layers.25: +# - self_attn.o_proj (group 114, 1 layers) +# - mlp.down_proj (group 115, 1 layers) +# Fused in model.layers.26: +# - self_attn.o_proj (group 116, 1 layers) +# - mlp.down_proj (group 117, 1 layers) +# Fused in model.layers.27: +# - self_attn.o_proj (group 118, 1 layers) +# - mlp.down_proj (group 119, 1 layers) +# Fused in model.layers.28: +# - self_attn.o_proj (group 120, 1 layers) +# - mlp.down_proj (group 121, 1 layers) +# Fused in model.layers.29: +# - self_attn.o_proj (group 122, 1 layers) +# - mlp.down_proj (group 123, 1 layers) +# Fused in model.layers.3: +# - self_attn.o_proj (group 70, 1 layers) +# - mlp.down_proj (group 71, 1 layers) +# Fused in model.layers.30: +# - self_attn.o_proj (group 124, 1 layers) +# - mlp.down_proj (group 125, 1 layers) +# Fused in model.layers.31: +# - self_attn.o_proj (group 126, 1 layers) +# - mlp.down_proj (group 127, 1 layers) +# Fused in model.layers.4: +# - self_attn.o_proj (group 72, 1 layers) +# - mlp.down_proj (group 73, 1 layers) +# Fused in model.layers.5: +# - self_attn.o_proj (group 74, 1 layers) +# - mlp.down_proj (group 75, 1 layers) +# Fused in model.layers.6: +# - self_attn.o_proj (group 76, 1 layers) +# - mlp.down_proj (group 77, 1 layers) +# Fused in model.layers.7: +# - self_attn.o_proj (group 78, 1 layers) +# - mlp.down_proj (group 79, 1 layers) +# Fused in model.layers.8: +# - self_attn.o_proj (group 80, 1 layers) +# - mlp.down_proj (group 81, 1 layers) +# Fused in model.layers.9: +# - self_attn.o_proj (group 82, 1 layers) +# - mlp.down_proj (group 83, 1 layers) +# +# Mode: binary_search_constraint (measured) +# Constraint max_kl: 0.005 +# Constraint min_eap: 0.99 +# Weights: nll=0.0, kl=0.0, eap=1.0 +# +# Average bitwidth: 8.0118 +# Total params: 6979321856 +# Total bits: 55916716098 +# Final KL: 0.001238 +# Final EAP: 0.990277 +# Final ETL: 0.009723 +# Satisfies constraints: True +# Solver calls: 9 +# Evaluations: 9 +# +# Bitwidth distribution: +# 8-bit: 219 layers (97.8%) +# 4-bit: 5 layers (2.2%) +# +model.layers.0.mlp.gate_proj: 8 +model.layers.0.mlp.up_proj: 8 +model.layers.0.self_attn.k_proj: 8 +model.layers.0.self_attn.q_proj: 8 +model.layers.0.self_attn.v_proj: 8 +model.layers.1.mlp.gate_proj: 8 +model.layers.1.mlp.up_proj: 8 +model.layers.1.self_attn.k_proj: 8 +model.layers.1.self_attn.q_proj: 8 +model.layers.1.self_attn.v_proj: 8 +model.layers.2.mlp.gate_proj: 8 +model.layers.2.mlp.up_proj: 8 +model.layers.2.self_attn.k_proj: 8 +model.layers.2.self_attn.q_proj: 8 +model.layers.2.self_attn.v_proj: 8 +model.layers.3.mlp.gate_proj: 8 +model.layers.3.mlp.up_proj: 8 +model.layers.3.self_attn.k_proj: 8 +model.layers.3.self_attn.q_proj: 8 +model.layers.3.self_attn.v_proj: 8 +model.layers.4.mlp.gate_proj: 8 +model.layers.4.mlp.up_proj: 8 +model.layers.4.self_attn.k_proj: 8 +model.layers.4.self_attn.q_proj: 8 +model.layers.4.self_attn.v_proj: 8 +model.layers.5.mlp.gate_proj: 8 +model.layers.5.mlp.up_proj: 8 +model.layers.5.self_attn.k_proj: 8 +model.layers.5.self_attn.q_proj: 8 +model.layers.5.self_attn.v_proj: 8 +model.layers.6.mlp.gate_proj: 8 +model.layers.6.mlp.up_proj: 8 +model.layers.6.self_attn.k_proj: 8 +model.layers.6.self_attn.q_proj: 8 +model.layers.6.self_attn.v_proj: 8 +model.layers.7.mlp.gate_proj: 8 +model.layers.7.mlp.up_proj: 8 +model.layers.7.self_attn.k_proj: 8 +model.layers.7.self_attn.q_proj: 8 +model.layers.7.self_attn.v_proj: 8 +model.layers.8.mlp.gate_proj: 8 +model.layers.8.mlp.up_proj: 8 +model.layers.8.self_attn.k_proj: 8 +model.layers.8.self_attn.q_proj: 8 +model.layers.8.self_attn.v_proj: 8 +model.layers.9.mlp.gate_proj: 8 +model.layers.9.mlp.up_proj: 8 +model.layers.9.self_attn.k_proj: 8 +model.layers.9.self_attn.q_proj: 8 +model.layers.9.self_attn.v_proj: 8 +model.layers.10.mlp.gate_proj: 8 +model.layers.10.mlp.up_proj: 8 +model.layers.10.self_attn.k_proj: 8 +model.layers.10.self_attn.q_proj: 8 +model.layers.10.self_attn.v_proj: 8 +model.layers.11.mlp.gate_proj: 8 +model.layers.11.mlp.up_proj: 8 +model.layers.11.self_attn.k_proj: 8 +model.layers.11.self_attn.q_proj: 8 +model.layers.11.self_attn.v_proj: 8 +model.layers.12.mlp.gate_proj: 8 +model.layers.12.mlp.up_proj: 8 +model.layers.12.self_attn.k_proj: 8 +model.layers.12.self_attn.q_proj: 8 +model.layers.12.self_attn.v_proj: 8 +model.layers.13.mlp.gate_proj: 8 +model.layers.13.mlp.up_proj: 8 +model.layers.13.self_attn.k_proj: 8 +model.layers.13.self_attn.q_proj: 8 +model.layers.13.self_attn.v_proj: 8 +model.layers.14.mlp.gate_proj: 8 +model.layers.14.mlp.up_proj: 8 +model.layers.14.self_attn.k_proj: 8 +model.layers.14.self_attn.q_proj: 8 +model.layers.14.self_attn.v_proj: 8 +model.layers.15.mlp.gate_proj: 8 +model.layers.15.mlp.up_proj: 8 +model.layers.15.self_attn.k_proj: 8 +model.layers.15.self_attn.q_proj: 8 +model.layers.15.self_attn.v_proj: 8 +model.layers.16.mlp.gate_proj: 8 +model.layers.16.mlp.up_proj: 8 +model.layers.16.self_attn.k_proj: 8 +model.layers.16.self_attn.q_proj: 8 +model.layers.16.self_attn.v_proj: 8 +model.layers.17.mlp.gate_proj: 8 +model.layers.17.mlp.up_proj: 8 +model.layers.17.self_attn.k_proj: 8 +model.layers.17.self_attn.q_proj: 8 +model.layers.17.self_attn.v_proj: 8 +model.layers.18.mlp.gate_proj: 8 +model.layers.18.mlp.up_proj: 8 +model.layers.18.self_attn.k_proj: 8 +model.layers.18.self_attn.q_proj: 8 +model.layers.18.self_attn.v_proj: 8 +model.layers.19.mlp.gate_proj: 8 +model.layers.19.mlp.up_proj: 8 +model.layers.19.self_attn.k_proj: 8 +model.layers.19.self_attn.q_proj: 8 +model.layers.19.self_attn.v_proj: 8 +model.layers.20.mlp.gate_proj: 8 +model.layers.20.mlp.up_proj: 8 +model.layers.20.self_attn.k_proj: 8 +model.layers.20.self_attn.q_proj: 8 +model.layers.20.self_attn.v_proj: 8 +model.layers.21.mlp.gate_proj: 8 +model.layers.21.mlp.up_proj: 8 +model.layers.21.self_attn.k_proj: 8 +model.layers.21.self_attn.q_proj: 8 +model.layers.21.self_attn.v_proj: 8 +model.layers.22.mlp.gate_proj: 8 +model.layers.22.mlp.up_proj: 8 +model.layers.22.self_attn.k_proj: 8 +model.layers.22.self_attn.q_proj: 8 +model.layers.22.self_attn.v_proj: 8 +model.layers.23.mlp.gate_proj: 8 +model.layers.23.mlp.up_proj: 8 +model.layers.23.self_attn.k_proj: 8 +model.layers.23.self_attn.q_proj: 8 +model.layers.23.self_attn.v_proj: 8 +model.layers.24.mlp.gate_proj: 8 +model.layers.24.mlp.up_proj: 8 +model.layers.24.self_attn.k_proj: 8 +model.layers.24.self_attn.q_proj: 8 +model.layers.24.self_attn.v_proj: 8 +model.layers.25.mlp.gate_proj: 8 +model.layers.25.mlp.up_proj: 8 +model.layers.25.self_attn.k_proj: 8 +model.layers.25.self_attn.q_proj: 8 +model.layers.25.self_attn.v_proj: 8 +model.layers.26.mlp.gate_proj: 8 +model.layers.26.mlp.up_proj: 8 +model.layers.26.self_attn.k_proj: 8 +model.layers.26.self_attn.q_proj: 8 +model.layers.26.self_attn.v_proj: 8 +model.layers.27.mlp.gate_proj: 8 +model.layers.27.mlp.up_proj: 8 +model.layers.27.self_attn.k_proj: 8 +model.layers.27.self_attn.q_proj: 8 +model.layers.27.self_attn.v_proj: 8 +model.layers.28.mlp.gate_proj: 8 +model.layers.28.mlp.up_proj: 8 +model.layers.28.self_attn.k_proj: 8 +model.layers.28.self_attn.q_proj: 8 +model.layers.28.self_attn.v_proj: 8 +model.layers.29.mlp.gate_proj: 8 +model.layers.29.mlp.up_proj: 8 +model.layers.29.self_attn.k_proj: 8 +model.layers.29.self_attn.q_proj: 8 +model.layers.29.self_attn.v_proj: 8 +model.layers.30.mlp.gate_proj: 8 +model.layers.30.mlp.up_proj: 8 +model.layers.30.self_attn.k_proj: 8 +model.layers.30.self_attn.q_proj: 8 +model.layers.30.self_attn.v_proj: 8 +model.layers.31.mlp.gate_proj: 8 +model.layers.31.mlp.up_proj: 8 +model.layers.31.self_attn.k_proj: 8 +model.layers.31.self_attn.q_proj: 8 +model.layers.31.self_attn.v_proj: 8 +model.layers.0.self_attn.o_proj: 8 +model.layers.0.mlp.down_proj: 8 +model.layers.1.self_attn.o_proj: 8 +model.layers.1.mlp.down_proj: 8 +model.layers.2.self_attn.o_proj: 8 +model.layers.2.mlp.down_proj: 8 +model.layers.3.self_attn.o_proj: 8 +model.layers.3.mlp.down_proj: 8 +model.layers.4.self_attn.o_proj: 8 +model.layers.4.mlp.down_proj: 8 +model.layers.5.self_attn.o_proj: 8 +model.layers.5.mlp.down_proj: 8 +model.layers.6.self_attn.o_proj: 8 +model.layers.6.mlp.down_proj: 8 +model.layers.7.self_attn.o_proj: 8 +model.layers.7.mlp.down_proj: 8 +model.layers.8.self_attn.o_proj: 8 +model.layers.8.mlp.down_proj: 8 +model.layers.9.self_attn.o_proj: 8 +model.layers.9.mlp.down_proj: 8 +model.layers.10.self_attn.o_proj: 8 +model.layers.10.mlp.down_proj: 8 +model.layers.11.self_attn.o_proj: 8 +model.layers.11.mlp.down_proj: 8 +model.layers.12.self_attn.o_proj: 8 +model.layers.12.mlp.down_proj: 8 +model.layers.13.self_attn.o_proj: 8 +model.layers.13.mlp.down_proj: 8 +model.layers.14.self_attn.o_proj: 8 +model.layers.14.mlp.down_proj: 8 +model.layers.15.self_attn.o_proj: 8 +model.layers.15.mlp.down_proj: 8 +model.layers.16.self_attn.o_proj: 8 +model.layers.16.mlp.down_proj: 8 +model.layers.17.self_attn.o_proj: 8 +model.layers.17.mlp.down_proj: 8 +model.layers.18.self_attn.o_proj: 8 +model.layers.18.mlp.down_proj: 8 +model.layers.19.self_attn.o_proj: 8 +model.layers.19.mlp.down_proj: 8 +model.layers.20.self_attn.o_proj: 8 +model.layers.20.mlp.down_proj: 8 +model.layers.21.self_attn.o_proj: 8 +model.layers.21.mlp.down_proj: 8 +model.layers.22.self_attn.o_proj: 8 +model.layers.22.mlp.down_proj: 8 +model.layers.23.self_attn.o_proj: 8 +model.layers.23.mlp.down_proj: 8 +model.layers.24.self_attn.o_proj: 8 +model.layers.24.mlp.down_proj: 8 +model.layers.25.self_attn.o_proj: 4 +model.layers.25.mlp.down_proj: 8 +model.layers.26.self_attn.o_proj: 8 +model.layers.26.mlp.down_proj: 4 +model.layers.27.self_attn.o_proj: 8 +model.layers.27.mlp.down_proj: 4 +model.layers.28.self_attn.o_proj: 8 +model.layers.28.mlp.down_proj: 4 +model.layers.29.self_attn.o_proj: 8 +model.layers.29.mlp.down_proj: 4 +model.layers.30.self_attn.o_proj: 8 +model.layers.30.mlp.down_proj: 8 +model.layers.31.self_attn.o_proj: 8 +model.layers.31.mlp.down_proj: 8 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/special_tokens_map.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..b43be96621d147110fb8a18b5776ec6e38516127 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/special_tokens_map.json @@ -0,0 +1,17 @@ +{ + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db71f26566b004203e776ba1248e8052eaebeb47 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d066765903924aec0e915a00987ef9ffe0ee366e52368f44884ab71f3e1298 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e415e91b845d25ab9b9e0e32b2ed876bb2d7f3d2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:779c706f56bce5452da8b5fda679c8bd3125b870678e5544b5debd531844d939 +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ee249ed635229ff154fcba2fa95385bd6fc56834 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7426f0cdef097b0b84a4403954fae06642649a7c4a06bdcf7cc39f963e75e40 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.005_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..df0d353583539d7603192fb456743fd52816e29f --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949a717376d70bd7c14cb9f0a04ef3e48fa6c9571a0959dbad1d7816052530b1 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..877a1a2d42af3b501c94badd8967f22c77cf8538 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f79c9f35494ee28ac2f307fda7ef1f0c699855fcd9fd88db82f5e2e1d532273e +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..8d8ab9b1d182e51266c673ce6bf0c5cf4b2d9368 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553898c0a66462cf9da4b62e3c4b60322a464a5ecd0f4ddaf5c3b653ba632710 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.68_4-8bit_grouped_seed1234/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..df0d353583539d7603192fb456743fd52816e29f --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:949a717376d70bd7c14cb9f0a04ef3e48fa6c9571a0959dbad1d7816052530b1 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..877a1a2d42af3b501c94badd8967f22c77cf8538 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f79c9f35494ee28ac2f307fda7ef1f0c699855fcd9fd88db82f5e2e1d532273e +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..887190f26439ecff7a6f256b7a8518b505505399 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b9d2c8849bb6be4634cc740b7d5d3cf942b8eccd9b7bb23b7113b21c25a3d1a +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..67a1363c10f34af7e525b7de96dae1ecb9321395 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/quantization_config.txt @@ -0,0 +1,412 @@ +# Model: meta-llama/Llama-3.1-8B-Instruct +# Layer directory: /nfs/scistore19/alistgrp/mhelcig/local/data/search/4_5_6_7_8bit_asym_g128/Llama-3.1-8B-Instruct/6bit/ +# Sensitivity method: shapley +# Estimation method: permutation_separate +# Available bitwidths: [4, 8] +# Bitwidth map: {4: 4.156, 5: 5.156, 6: 6.156, 7: 7.156, 8: 8.156} +# +# Layer groups: 128 groups (fused layers share bitwidth) +# block_0:mlp.gate_proj,mlp.up_proj.block_0:mlp.gate_proj,mlp.up_proj: group 0, 2 layers +# block_0:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 1, 3 layers +# block_10:mlp.gate_proj,mlp.up_proj.block_10:mlp.gate_proj,mlp.up_proj: group 20, 2 layers +# block_10:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 21, 3 layers +# block_11:mlp.gate_proj,mlp.up_proj.block_11:mlp.gate_proj,mlp.up_proj: group 22, 2 layers +# block_11:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 23, 3 layers +# block_12:mlp.gate_proj,mlp.up_proj.block_12:mlp.gate_proj,mlp.up_proj: group 24, 2 layers +# block_12:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 25, 3 layers +# block_13:mlp.gate_proj,mlp.up_proj.block_13:mlp.gate_proj,mlp.up_proj: group 26, 2 layers +# block_13:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 27, 3 layers +# block_14:mlp.gate_proj,mlp.up_proj.block_14:mlp.gate_proj,mlp.up_proj: group 28, 2 layers +# block_14:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 29, 3 layers +# block_15:mlp.gate_proj,mlp.up_proj.block_15:mlp.gate_proj,mlp.up_proj: group 30, 2 layers +# block_15:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 31, 3 layers +# block_16:mlp.gate_proj,mlp.up_proj.block_16:mlp.gate_proj,mlp.up_proj: group 32, 2 layers +# block_16:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 33, 3 layers +# block_17:mlp.gate_proj,mlp.up_proj.block_17:mlp.gate_proj,mlp.up_proj: group 34, 2 layers +# block_17:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 35, 3 layers +# block_18:mlp.gate_proj,mlp.up_proj.block_18:mlp.gate_proj,mlp.up_proj: group 36, 2 layers +# block_18:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 37, 3 layers +# block_19:mlp.gate_proj,mlp.up_proj.block_19:mlp.gate_proj,mlp.up_proj: group 38, 2 layers +# block_19:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 39, 3 layers +# block_1:mlp.gate_proj,mlp.up_proj.block_1:mlp.gate_proj,mlp.up_proj: group 2, 2 layers +# block_1:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 3, 3 layers +# block_20:mlp.gate_proj,mlp.up_proj.block_20:mlp.gate_proj,mlp.up_proj: group 40, 2 layers +# block_20:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 41, 3 layers +# block_21:mlp.gate_proj,mlp.up_proj.block_21:mlp.gate_proj,mlp.up_proj: group 42, 2 layers +# block_21:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 43, 3 layers +# block_22:mlp.gate_proj,mlp.up_proj.block_22:mlp.gate_proj,mlp.up_proj: group 44, 2 layers +# block_22:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 45, 3 layers +# block_23:mlp.gate_proj,mlp.up_proj.block_23:mlp.gate_proj,mlp.up_proj: group 46, 2 layers +# block_23:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 47, 3 layers +# block_24:mlp.gate_proj,mlp.up_proj.block_24:mlp.gate_proj,mlp.up_proj: group 48, 2 layers +# block_24:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 49, 3 layers +# block_25:mlp.gate_proj,mlp.up_proj.block_25:mlp.gate_proj,mlp.up_proj: group 50, 2 layers +# block_25:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 51, 3 layers +# block_26:mlp.gate_proj,mlp.up_proj.block_26:mlp.gate_proj,mlp.up_proj: group 52, 2 layers +# block_26:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 53, 3 layers +# block_27:mlp.gate_proj,mlp.up_proj.block_27:mlp.gate_proj,mlp.up_proj: group 54, 2 layers +# block_27:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 55, 3 layers +# block_28:mlp.gate_proj,mlp.up_proj.block_28:mlp.gate_proj,mlp.up_proj: group 56, 2 layers +# block_28:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 57, 3 layers +# block_29:mlp.gate_proj,mlp.up_proj.block_29:mlp.gate_proj,mlp.up_proj: group 58, 2 layers +# block_29:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 59, 3 layers +# block_2:mlp.gate_proj,mlp.up_proj.block_2:mlp.gate_proj,mlp.up_proj: group 4, 2 layers +# block_2:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 5, 3 layers +# block_30:mlp.gate_proj,mlp.up_proj.block_30:mlp.gate_proj,mlp.up_proj: group 60, 2 layers +# block_30:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 61, 3 layers +# block_31:mlp.gate_proj,mlp.up_proj.block_31:mlp.gate_proj,mlp.up_proj: group 62, 2 layers +# block_31:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 63, 3 layers +# block_3:mlp.gate_proj,mlp.up_proj.block_3:mlp.gate_proj,mlp.up_proj: group 6, 2 layers +# block_3:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 7, 3 layers +# block_4:mlp.gate_proj,mlp.up_proj.block_4:mlp.gate_proj,mlp.up_proj: group 8, 2 layers +# block_4:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 9, 3 layers +# block_5:mlp.gate_proj,mlp.up_proj.block_5:mlp.gate_proj,mlp.up_proj: group 10, 2 layers +# block_5:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 11, 3 layers +# block_6:mlp.gate_proj,mlp.up_proj.block_6:mlp.gate_proj,mlp.up_proj: group 12, 2 layers +# block_6:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 13, 3 layers +# block_7:mlp.gate_proj,mlp.up_proj.block_7:mlp.gate_proj,mlp.up_proj: group 14, 2 layers +# block_7:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 15, 3 layers +# block_8:mlp.gate_proj,mlp.up_proj.block_8:mlp.gate_proj,mlp.up_proj: group 16, 2 layers +# block_8:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 17, 3 layers +# block_9:mlp.gate_proj,mlp.up_proj.block_9:mlp.gate_proj,mlp.up_proj: group 18, 2 layers +# block_9:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 19, 3 layers +# Fused in model.layers.0: +# - self_attn.o_proj (group 64, 1 layers) +# - mlp.down_proj (group 65, 1 layers) +# Fused in model.layers.1: +# - self_attn.o_proj (group 66, 1 layers) +# - mlp.down_proj (group 67, 1 layers) +# Fused in model.layers.10: +# - self_attn.o_proj (group 84, 1 layers) +# - mlp.down_proj (group 85, 1 layers) +# Fused in model.layers.11: +# - self_attn.o_proj (group 86, 1 layers) +# - mlp.down_proj (group 87, 1 layers) +# Fused in model.layers.12: +# - self_attn.o_proj (group 88, 1 layers) +# - mlp.down_proj (group 89, 1 layers) +# Fused in model.layers.13: +# - self_attn.o_proj (group 90, 1 layers) +# - mlp.down_proj (group 91, 1 layers) +# Fused in model.layers.14: +# - self_attn.o_proj (group 92, 1 layers) +# - mlp.down_proj (group 93, 1 layers) +# Fused in model.layers.15: +# - self_attn.o_proj (group 94, 1 layers) +# - mlp.down_proj (group 95, 1 layers) +# Fused in model.layers.16: +# - self_attn.o_proj (group 96, 1 layers) +# - mlp.down_proj (group 97, 1 layers) +# Fused in model.layers.17: +# - self_attn.o_proj (group 98, 1 layers) +# - mlp.down_proj (group 99, 1 layers) +# Fused in model.layers.18: +# - self_attn.o_proj (group 100, 1 layers) +# - mlp.down_proj (group 101, 1 layers) +# Fused in model.layers.19: +# - self_attn.o_proj (group 102, 1 layers) +# - mlp.down_proj (group 103, 1 layers) +# Fused in model.layers.2: +# - self_attn.o_proj (group 68, 1 layers) +# - mlp.down_proj (group 69, 1 layers) +# Fused in model.layers.20: +# - self_attn.o_proj (group 104, 1 layers) +# - mlp.down_proj (group 105, 1 layers) +# Fused in model.layers.21: +# - self_attn.o_proj (group 106, 1 layers) +# - mlp.down_proj (group 107, 1 layers) +# Fused in model.layers.22: +# - self_attn.o_proj (group 108, 1 layers) +# - mlp.down_proj (group 109, 1 layers) +# Fused in model.layers.23: +# - self_attn.o_proj (group 110, 1 layers) +# - mlp.down_proj (group 111, 1 layers) +# Fused in model.layers.24: +# - self_attn.o_proj (group 112, 1 layers) +# - mlp.down_proj (group 113, 1 layers) +# Fused in model.layers.25: +# - self_attn.o_proj (group 114, 1 layers) +# - mlp.down_proj (group 115, 1 layers) +# Fused in model.layers.26: +# - self_attn.o_proj (group 116, 1 layers) +# - mlp.down_proj (group 117, 1 layers) +# Fused in model.layers.27: +# - self_attn.o_proj (group 118, 1 layers) +# - mlp.down_proj (group 119, 1 layers) +# Fused in model.layers.28: +# - self_attn.o_proj (group 120, 1 layers) +# - mlp.down_proj (group 121, 1 layers) +# Fused in model.layers.29: +# - self_attn.o_proj (group 122, 1 layers) +# - mlp.down_proj (group 123, 1 layers) +# Fused in model.layers.3: +# - self_attn.o_proj (group 70, 1 layers) +# - mlp.down_proj (group 71, 1 layers) +# Fused in model.layers.30: +# - self_attn.o_proj (group 124, 1 layers) +# - mlp.down_proj (group 125, 1 layers) +# Fused in model.layers.31: +# - self_attn.o_proj (group 126, 1 layers) +# - mlp.down_proj (group 127, 1 layers) +# Fused in model.layers.4: +# - self_attn.o_proj (group 72, 1 layers) +# - mlp.down_proj (group 73, 1 layers) +# Fused in model.layers.5: +# - self_attn.o_proj (group 74, 1 layers) +# - mlp.down_proj (group 75, 1 layers) +# Fused in model.layers.6: +# - self_attn.o_proj (group 76, 1 layers) +# - mlp.down_proj (group 77, 1 layers) +# Fused in model.layers.7: +# - self_attn.o_proj (group 78, 1 layers) +# - mlp.down_proj (group 79, 1 layers) +# Fused in model.layers.8: +# - self_attn.o_proj (group 80, 1 layers) +# - mlp.down_proj (group 81, 1 layers) +# Fused in model.layers.9: +# - self_attn.o_proj (group 82, 1 layers) +# - mlp.down_proj (group 83, 1 layers) +# +# Mode: binary_search_constraint (measured) +# Constraint max_kl: 0.01 +# Constraint min_eap: 0.985 +# Weights: nll=0.0, kl=0.0, eap=1.0 +# +# Average bitwidth: 7.7089 +# Total params: 6979321856 +# Total bits: 53802786882 +# Final KL: 0.002595 +# Final EAP: 0.985184 +# Final ETL: 0.014816 +# Satisfies constraints: True +# Solver calls: 9 +# Evaluations: 9 +# +# Bitwidth distribution: +# 8-bit: 210 layers (93.8%) +# 4-bit: 14 layers (6.2%) +# +model.layers.0.mlp.gate_proj: 4 +model.layers.0.mlp.up_proj: 4 +model.layers.0.self_attn.k_proj: 8 +model.layers.0.self_attn.q_proj: 8 +model.layers.0.self_attn.v_proj: 8 +model.layers.1.mlp.gate_proj: 8 +model.layers.1.mlp.up_proj: 8 +model.layers.1.self_attn.k_proj: 8 +model.layers.1.self_attn.q_proj: 8 +model.layers.1.self_attn.v_proj: 8 +model.layers.2.mlp.gate_proj: 8 +model.layers.2.mlp.up_proj: 8 +model.layers.2.self_attn.k_proj: 8 +model.layers.2.self_attn.q_proj: 8 +model.layers.2.self_attn.v_proj: 8 +model.layers.3.mlp.gate_proj: 8 +model.layers.3.mlp.up_proj: 8 +model.layers.3.self_attn.k_proj: 8 +model.layers.3.self_attn.q_proj: 8 +model.layers.3.self_attn.v_proj: 8 +model.layers.4.mlp.gate_proj: 8 +model.layers.4.mlp.up_proj: 8 +model.layers.4.self_attn.k_proj: 8 +model.layers.4.self_attn.q_proj: 8 +model.layers.4.self_attn.v_proj: 8 +model.layers.5.mlp.gate_proj: 8 +model.layers.5.mlp.up_proj: 8 +model.layers.5.self_attn.k_proj: 8 +model.layers.5.self_attn.q_proj: 8 +model.layers.5.self_attn.v_proj: 8 +model.layers.6.mlp.gate_proj: 8 +model.layers.6.mlp.up_proj: 8 +model.layers.6.self_attn.k_proj: 8 +model.layers.6.self_attn.q_proj: 8 +model.layers.6.self_attn.v_proj: 8 +model.layers.7.mlp.gate_proj: 8 +model.layers.7.mlp.up_proj: 8 +model.layers.7.self_attn.k_proj: 8 +model.layers.7.self_attn.q_proj: 8 +model.layers.7.self_attn.v_proj: 8 +model.layers.8.mlp.gate_proj: 8 +model.layers.8.mlp.up_proj: 8 +model.layers.8.self_attn.k_proj: 8 +model.layers.8.self_attn.q_proj: 8 +model.layers.8.self_attn.v_proj: 8 +model.layers.9.mlp.gate_proj: 8 +model.layers.9.mlp.up_proj: 8 +model.layers.9.self_attn.k_proj: 8 +model.layers.9.self_attn.q_proj: 8 +model.layers.9.self_attn.v_proj: 8 +model.layers.10.mlp.gate_proj: 8 +model.layers.10.mlp.up_proj: 8 +model.layers.10.self_attn.k_proj: 8 +model.layers.10.self_attn.q_proj: 8 +model.layers.10.self_attn.v_proj: 8 +model.layers.11.mlp.gate_proj: 8 +model.layers.11.mlp.up_proj: 8 +model.layers.11.self_attn.k_proj: 8 +model.layers.11.self_attn.q_proj: 8 +model.layers.11.self_attn.v_proj: 8 +model.layers.12.mlp.gate_proj: 8 +model.layers.12.mlp.up_proj: 8 +model.layers.12.self_attn.k_proj: 8 +model.layers.12.self_attn.q_proj: 8 +model.layers.12.self_attn.v_proj: 8 +model.layers.13.mlp.gate_proj: 8 +model.layers.13.mlp.up_proj: 8 +model.layers.13.self_attn.k_proj: 8 +model.layers.13.self_attn.q_proj: 8 +model.layers.13.self_attn.v_proj: 8 +model.layers.14.mlp.gate_proj: 8 +model.layers.14.mlp.up_proj: 8 +model.layers.14.self_attn.k_proj: 8 +model.layers.14.self_attn.q_proj: 8 +model.layers.14.self_attn.v_proj: 8 +model.layers.15.mlp.gate_proj: 8 +model.layers.15.mlp.up_proj: 8 +model.layers.15.self_attn.k_proj: 8 +model.layers.15.self_attn.q_proj: 8 +model.layers.15.self_attn.v_proj: 8 +model.layers.16.mlp.gate_proj: 8 +model.layers.16.mlp.up_proj: 8 +model.layers.16.self_attn.k_proj: 8 +model.layers.16.self_attn.q_proj: 8 +model.layers.16.self_attn.v_proj: 8 +model.layers.17.mlp.gate_proj: 8 +model.layers.17.mlp.up_proj: 8 +model.layers.17.self_attn.k_proj: 8 +model.layers.17.self_attn.q_proj: 8 +model.layers.17.self_attn.v_proj: 8 +model.layers.18.mlp.gate_proj: 8 +model.layers.18.mlp.up_proj: 8 +model.layers.18.self_attn.k_proj: 8 +model.layers.18.self_attn.q_proj: 8 +model.layers.18.self_attn.v_proj: 8 +model.layers.19.mlp.gate_proj: 8 +model.layers.19.mlp.up_proj: 8 +model.layers.19.self_attn.k_proj: 8 +model.layers.19.self_attn.q_proj: 8 +model.layers.19.self_attn.v_proj: 8 +model.layers.20.mlp.gate_proj: 8 +model.layers.20.mlp.up_proj: 8 +model.layers.20.self_attn.k_proj: 8 +model.layers.20.self_attn.q_proj: 8 +model.layers.20.self_attn.v_proj: 8 +model.layers.21.mlp.gate_proj: 8 +model.layers.21.mlp.up_proj: 8 +model.layers.21.self_attn.k_proj: 8 +model.layers.21.self_attn.q_proj: 8 +model.layers.21.self_attn.v_proj: 8 +model.layers.22.mlp.gate_proj: 8 +model.layers.22.mlp.up_proj: 8 +model.layers.22.self_attn.k_proj: 8 +model.layers.22.self_attn.q_proj: 8 +model.layers.22.self_attn.v_proj: 8 +model.layers.23.mlp.gate_proj: 8 +model.layers.23.mlp.up_proj: 8 +model.layers.23.self_attn.k_proj: 8 +model.layers.23.self_attn.q_proj: 8 +model.layers.23.self_attn.v_proj: 8 +model.layers.24.mlp.gate_proj: 8 +model.layers.24.mlp.up_proj: 8 +model.layers.24.self_attn.k_proj: 8 +model.layers.24.self_attn.q_proj: 8 +model.layers.24.self_attn.v_proj: 8 +model.layers.25.mlp.gate_proj: 8 +model.layers.25.mlp.up_proj: 8 +model.layers.25.self_attn.k_proj: 8 +model.layers.25.self_attn.q_proj: 8 +model.layers.25.self_attn.v_proj: 8 +model.layers.26.mlp.gate_proj: 8 +model.layers.26.mlp.up_proj: 8 +model.layers.26.self_attn.k_proj: 8 +model.layers.26.self_attn.q_proj: 8 +model.layers.26.self_attn.v_proj: 8 +model.layers.27.mlp.gate_proj: 8 +model.layers.27.mlp.up_proj: 8 +model.layers.27.self_attn.k_proj: 8 +model.layers.27.self_attn.q_proj: 8 +model.layers.27.self_attn.v_proj: 8 +model.layers.28.mlp.gate_proj: 8 +model.layers.28.mlp.up_proj: 8 +model.layers.28.self_attn.k_proj: 8 +model.layers.28.self_attn.q_proj: 8 +model.layers.28.self_attn.v_proj: 8 +model.layers.29.mlp.gate_proj: 8 +model.layers.29.mlp.up_proj: 8 +model.layers.29.self_attn.k_proj: 8 +model.layers.29.self_attn.q_proj: 8 +model.layers.29.self_attn.v_proj: 8 +model.layers.30.mlp.gate_proj: 8 +model.layers.30.mlp.up_proj: 8 +model.layers.30.self_attn.k_proj: 8 +model.layers.30.self_attn.q_proj: 8 +model.layers.30.self_attn.v_proj: 8 +model.layers.31.mlp.gate_proj: 8 +model.layers.31.mlp.up_proj: 8 +model.layers.31.self_attn.k_proj: 8 +model.layers.31.self_attn.q_proj: 8 +model.layers.31.self_attn.v_proj: 8 +model.layers.0.self_attn.o_proj: 8 +model.layers.0.mlp.down_proj: 8 +model.layers.1.self_attn.o_proj: 8 +model.layers.1.mlp.down_proj: 8 +model.layers.2.self_attn.o_proj: 8 +model.layers.2.mlp.down_proj: 4 +model.layers.3.self_attn.o_proj: 8 +model.layers.3.mlp.down_proj: 8 +model.layers.4.self_attn.o_proj: 8 +model.layers.4.mlp.down_proj: 8 +model.layers.5.self_attn.o_proj: 8 +model.layers.5.mlp.down_proj: 8 +model.layers.6.self_attn.o_proj: 8 +model.layers.6.mlp.down_proj: 8 +model.layers.7.self_attn.o_proj: 8 +model.layers.7.mlp.down_proj: 4 +model.layers.8.self_attn.o_proj: 8 +model.layers.8.mlp.down_proj: 4 +model.layers.9.self_attn.o_proj: 8 +model.layers.9.mlp.down_proj: 4 +model.layers.10.self_attn.o_proj: 8 +model.layers.10.mlp.down_proj: 4 +model.layers.11.self_attn.o_proj: 8 +model.layers.11.mlp.down_proj: 8 +model.layers.12.self_attn.o_proj: 8 +model.layers.12.mlp.down_proj: 4 +model.layers.13.self_attn.o_proj: 8 +model.layers.13.mlp.down_proj: 8 +model.layers.14.self_attn.o_proj: 8 +model.layers.14.mlp.down_proj: 8 +model.layers.15.self_attn.o_proj: 8 +model.layers.15.mlp.down_proj: 8 +model.layers.16.self_attn.o_proj: 8 +model.layers.16.mlp.down_proj: 8 +model.layers.17.self_attn.o_proj: 8 +model.layers.17.mlp.down_proj: 8 +model.layers.18.self_attn.o_proj: 8 +model.layers.18.mlp.down_proj: 8 +model.layers.19.self_attn.o_proj: 8 +model.layers.19.mlp.down_proj: 8 +model.layers.20.self_attn.o_proj: 8 +model.layers.20.mlp.down_proj: 8 +model.layers.21.self_attn.o_proj: 8 +model.layers.21.mlp.down_proj: 8 +model.layers.22.self_attn.o_proj: 8 +model.layers.22.mlp.down_proj: 8 +model.layers.23.self_attn.o_proj: 8 +model.layers.23.mlp.down_proj: 8 +model.layers.24.self_attn.o_proj: 8 +model.layers.24.mlp.down_proj: 8 +model.layers.25.self_attn.o_proj: 4 +model.layers.25.mlp.down_proj: 4 +model.layers.26.self_attn.o_proj: 8 +model.layers.26.mlp.down_proj: 4 +model.layers.27.self_attn.o_proj: 8 +model.layers.27.mlp.down_proj: 4 +model.layers.28.self_attn.o_proj: 8 +model.layers.28.mlp.down_proj: 4 +model.layers.29.self_attn.o_proj: 8 +model.layers.29.mlp.down_proj: 4 +model.layers.30.self_attn.o_proj: 8 +model.layers.30.mlp.down_proj: 8 +model.layers.31.self_attn.o_proj: 8 +model.layers.31.mlp.down_proj: 8 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.985_sha_bw7.71_4-8bit_grouped_seed42/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db71f26566b004203e776ba1248e8052eaebeb47 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d066765903924aec0e915a00987ef9ffe0ee366e52368f44884ab71f3e1298 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e415e91b845d25ab9b9e0e32b2ed876bb2d7f3d2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:779c706f56bce5452da8b5fda679c8bd3125b870678e5544b5debd531844d939 +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..644745d32960e62f170dbe2ef5b830e4d21adafc --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37681ec6ff920f2de78c107de2c44abbf969646f21ada20bde02938b153054e9 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.01_4-8bit_grouped_seed1234/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/README.md b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aaf369a0485a51e8e42ae557890d8819ba69b9a3 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/README.md @@ -0,0 +1,18 @@ +# Quantized Model Checkpoint + +**Base model:** meta-llama/Llama-3.1-8B-Instruct + +**Average bitwidth:** 8.0454 + +**Sensitivity method:** shapley + +**Constraints:** +- max_kl: 0.01 +- min_eap: 0.99 + +**Metrics:** +- final_kl: 0.001326 +- final_eap: 0.990180 +- final_etl: 0.009820 + +See `quantization_config.txt` for full configuration details. diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/config.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2bf71dab1ee4525127aba58e9446aa0a1dd046e6 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/config.json @@ -0,0 +1,39 @@ +{ + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "dtype": "float16", + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "transformers_version": "4.57.3", + "use_cache": false, + "vocab_size": 128256 +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/generation_config.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..993459bf55ed73c1390809c2e2a3d7c1c0e0d844 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/generation_config.json @@ -0,0 +1,12 @@ +{ + "bos_token_id": 128000, + "do_sample": true, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "temperature": 0.6, + "top_p": 0.9, + "transformers_version": "4.57.3" +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..db71f26566b004203e776ba1248e8052eaebeb47 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00001-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d066765903924aec0e915a00987ef9ffe0ee366e52368f44884ab71f3e1298 +size 4976698592 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..e415e91b845d25ab9b9e0e32b2ed876bb2d7f3d2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00002-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:779c706f56bce5452da8b5fda679c8bd3125b870678e5544b5debd531844d939 +size 4999802616 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..ee249ed635229ff154fcba2fa95385bd6fc56834 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00003-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7426f0cdef097b0b84a4403954fae06642649a7c4a06bdcf7cc39f963e75e40 +size 4915916080 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b09e9cdcb05f5cf77b3ef3a8ffa431eb33ad02b9 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model-00004-of-00004.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44408391c116c33adf6e43ab53f84d75bee5e2956b293c34dc60509fb0fd825b +size 1168138808 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model.safetensors.index.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..5c64f1e87be95160fabc494eebfa0f7e68064af2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/model.safetensors.index.json @@ -0,0 +1,299 @@ +{ + "metadata": { + "total_parameters": 8030261248, + "total_size": 16060522496 + }, + "weight_map": { + "lm_head.weight": "model-00004-of-00004.safetensors", + "model.embed_tokens.weight": "model-00001-of-00004.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors", + "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors", + "model.norm.weight": "model-00004-of-00004.safetensors" + } +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/quantization_config.txt b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/quantization_config.txt new file mode 100644 index 0000000000000000000000000000000000000000..67a1a3921088d7e7adad995999119a2b37467330 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/quantization_config.txt @@ -0,0 +1,412 @@ +# Model: meta-llama/Llama-3.1-8B-Instruct +# Layer directory: /nfs/scistore19/alistgrp/mhelcig/local/data/search/4_5_6_7_8bit_asym_g128/Llama-3.1-8B-Instruct/6bit/ +# Sensitivity method: shapley +# Estimation method: permutation_separate +# Available bitwidths: [4, 8] +# Bitwidth map: {4: 4.156, 5: 5.156, 6: 6.156, 7: 7.156, 8: 8.156} +# +# Layer groups: 128 groups (fused layers share bitwidth) +# block_0:mlp.gate_proj,mlp.up_proj.block_0:mlp.gate_proj,mlp.up_proj: group 0, 2 layers +# block_0:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 1, 3 layers +# block_10:mlp.gate_proj,mlp.up_proj.block_10:mlp.gate_proj,mlp.up_proj: group 20, 2 layers +# block_10:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 21, 3 layers +# block_11:mlp.gate_proj,mlp.up_proj.block_11:mlp.gate_proj,mlp.up_proj: group 22, 2 layers +# block_11:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 23, 3 layers +# block_12:mlp.gate_proj,mlp.up_proj.block_12:mlp.gate_proj,mlp.up_proj: group 24, 2 layers +# block_12:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 25, 3 layers +# block_13:mlp.gate_proj,mlp.up_proj.block_13:mlp.gate_proj,mlp.up_proj: group 26, 2 layers +# block_13:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 27, 3 layers +# block_14:mlp.gate_proj,mlp.up_proj.block_14:mlp.gate_proj,mlp.up_proj: group 28, 2 layers +# block_14:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 29, 3 layers +# block_15:mlp.gate_proj,mlp.up_proj.block_15:mlp.gate_proj,mlp.up_proj: group 30, 2 layers +# block_15:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 31, 3 layers +# block_16:mlp.gate_proj,mlp.up_proj.block_16:mlp.gate_proj,mlp.up_proj: group 32, 2 layers +# block_16:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 33, 3 layers +# block_17:mlp.gate_proj,mlp.up_proj.block_17:mlp.gate_proj,mlp.up_proj: group 34, 2 layers +# block_17:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 35, 3 layers +# block_18:mlp.gate_proj,mlp.up_proj.block_18:mlp.gate_proj,mlp.up_proj: group 36, 2 layers +# block_18:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 37, 3 layers +# block_19:mlp.gate_proj,mlp.up_proj.block_19:mlp.gate_proj,mlp.up_proj: group 38, 2 layers +# block_19:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 39, 3 layers +# block_1:mlp.gate_proj,mlp.up_proj.block_1:mlp.gate_proj,mlp.up_proj: group 2, 2 layers +# block_1:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 3, 3 layers +# block_20:mlp.gate_proj,mlp.up_proj.block_20:mlp.gate_proj,mlp.up_proj: group 40, 2 layers +# block_20:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 41, 3 layers +# block_21:mlp.gate_proj,mlp.up_proj.block_21:mlp.gate_proj,mlp.up_proj: group 42, 2 layers +# block_21:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 43, 3 layers +# block_22:mlp.gate_proj,mlp.up_proj.block_22:mlp.gate_proj,mlp.up_proj: group 44, 2 layers +# block_22:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 45, 3 layers +# block_23:mlp.gate_proj,mlp.up_proj.block_23:mlp.gate_proj,mlp.up_proj: group 46, 2 layers +# block_23:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 47, 3 layers +# block_24:mlp.gate_proj,mlp.up_proj.block_24:mlp.gate_proj,mlp.up_proj: group 48, 2 layers +# block_24:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 49, 3 layers +# block_25:mlp.gate_proj,mlp.up_proj.block_25:mlp.gate_proj,mlp.up_proj: group 50, 2 layers +# block_25:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 51, 3 layers +# block_26:mlp.gate_proj,mlp.up_proj.block_26:mlp.gate_proj,mlp.up_proj: group 52, 2 layers +# block_26:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 53, 3 layers +# block_27:mlp.gate_proj,mlp.up_proj.block_27:mlp.gate_proj,mlp.up_proj: group 54, 2 layers +# block_27:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 55, 3 layers +# block_28:mlp.gate_proj,mlp.up_proj.block_28:mlp.gate_proj,mlp.up_proj: group 56, 2 layers +# block_28:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 57, 3 layers +# block_29:mlp.gate_proj,mlp.up_proj.block_29:mlp.gate_proj,mlp.up_proj: group 58, 2 layers +# block_29:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 59, 3 layers +# block_2:mlp.gate_proj,mlp.up_proj.block_2:mlp.gate_proj,mlp.up_proj: group 4, 2 layers +# block_2:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 5, 3 layers +# block_30:mlp.gate_proj,mlp.up_proj.block_30:mlp.gate_proj,mlp.up_proj: group 60, 2 layers +# block_30:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 61, 3 layers +# block_31:mlp.gate_proj,mlp.up_proj.block_31:mlp.gate_proj,mlp.up_proj: group 62, 2 layers +# block_31:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 63, 3 layers +# block_3:mlp.gate_proj,mlp.up_proj.block_3:mlp.gate_proj,mlp.up_proj: group 6, 2 layers +# block_3:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 7, 3 layers +# block_4:mlp.gate_proj,mlp.up_proj.block_4:mlp.gate_proj,mlp.up_proj: group 8, 2 layers +# block_4:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 9, 3 layers +# block_5:mlp.gate_proj,mlp.up_proj.block_5:mlp.gate_proj,mlp.up_proj: group 10, 2 layers +# block_5:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 11, 3 layers +# block_6:mlp.gate_proj,mlp.up_proj.block_6:mlp.gate_proj,mlp.up_proj: group 12, 2 layers +# block_6:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 13, 3 layers +# block_7:mlp.gate_proj,mlp.up_proj.block_7:mlp.gate_proj,mlp.up_proj: group 14, 2 layers +# block_7:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 15, 3 layers +# block_8:mlp.gate_proj,mlp.up_proj.block_8:mlp.gate_proj,mlp.up_proj: group 16, 2 layers +# block_8:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 17, 3 layers +# block_9:mlp.gate_proj,mlp.up_proj.block_9:mlp.gate_proj,mlp.up_proj: group 18, 2 layers +# block_9:self_attn.k_proj,self_attn.q_proj,self_attn.v_proj: group 19, 3 layers +# Fused in model.layers.0: +# - self_attn.o_proj (group 64, 1 layers) +# - mlp.down_proj (group 65, 1 layers) +# Fused in model.layers.1: +# - self_attn.o_proj (group 66, 1 layers) +# - mlp.down_proj (group 67, 1 layers) +# Fused in model.layers.10: +# - self_attn.o_proj (group 84, 1 layers) +# - mlp.down_proj (group 85, 1 layers) +# Fused in model.layers.11: +# - self_attn.o_proj (group 86, 1 layers) +# - mlp.down_proj (group 87, 1 layers) +# Fused in model.layers.12: +# - self_attn.o_proj (group 88, 1 layers) +# - mlp.down_proj (group 89, 1 layers) +# Fused in model.layers.13: +# - self_attn.o_proj (group 90, 1 layers) +# - mlp.down_proj (group 91, 1 layers) +# Fused in model.layers.14: +# - self_attn.o_proj (group 92, 1 layers) +# - mlp.down_proj (group 93, 1 layers) +# Fused in model.layers.15: +# - self_attn.o_proj (group 94, 1 layers) +# - mlp.down_proj (group 95, 1 layers) +# Fused in model.layers.16: +# - self_attn.o_proj (group 96, 1 layers) +# - mlp.down_proj (group 97, 1 layers) +# Fused in model.layers.17: +# - self_attn.o_proj (group 98, 1 layers) +# - mlp.down_proj (group 99, 1 layers) +# Fused in model.layers.18: +# - self_attn.o_proj (group 100, 1 layers) +# - mlp.down_proj (group 101, 1 layers) +# Fused in model.layers.19: +# - self_attn.o_proj (group 102, 1 layers) +# - mlp.down_proj (group 103, 1 layers) +# Fused in model.layers.2: +# - self_attn.o_proj (group 68, 1 layers) +# - mlp.down_proj (group 69, 1 layers) +# Fused in model.layers.20: +# - self_attn.o_proj (group 104, 1 layers) +# - mlp.down_proj (group 105, 1 layers) +# Fused in model.layers.21: +# - self_attn.o_proj (group 106, 1 layers) +# - mlp.down_proj (group 107, 1 layers) +# Fused in model.layers.22: +# - self_attn.o_proj (group 108, 1 layers) +# - mlp.down_proj (group 109, 1 layers) +# Fused in model.layers.23: +# - self_attn.o_proj (group 110, 1 layers) +# - mlp.down_proj (group 111, 1 layers) +# Fused in model.layers.24: +# - self_attn.o_proj (group 112, 1 layers) +# - mlp.down_proj (group 113, 1 layers) +# Fused in model.layers.25: +# - self_attn.o_proj (group 114, 1 layers) +# - mlp.down_proj (group 115, 1 layers) +# Fused in model.layers.26: +# - self_attn.o_proj (group 116, 1 layers) +# - mlp.down_proj (group 117, 1 layers) +# Fused in model.layers.27: +# - self_attn.o_proj (group 118, 1 layers) +# - mlp.down_proj (group 119, 1 layers) +# Fused in model.layers.28: +# - self_attn.o_proj (group 120, 1 layers) +# - mlp.down_proj (group 121, 1 layers) +# Fused in model.layers.29: +# - self_attn.o_proj (group 122, 1 layers) +# - mlp.down_proj (group 123, 1 layers) +# Fused in model.layers.3: +# - self_attn.o_proj (group 70, 1 layers) +# - mlp.down_proj (group 71, 1 layers) +# Fused in model.layers.30: +# - self_attn.o_proj (group 124, 1 layers) +# - mlp.down_proj (group 125, 1 layers) +# Fused in model.layers.31: +# - self_attn.o_proj (group 126, 1 layers) +# - mlp.down_proj (group 127, 1 layers) +# Fused in model.layers.4: +# - self_attn.o_proj (group 72, 1 layers) +# - mlp.down_proj (group 73, 1 layers) +# Fused in model.layers.5: +# - self_attn.o_proj (group 74, 1 layers) +# - mlp.down_proj (group 75, 1 layers) +# Fused in model.layers.6: +# - self_attn.o_proj (group 76, 1 layers) +# - mlp.down_proj (group 77, 1 layers) +# Fused in model.layers.7: +# - self_attn.o_proj (group 78, 1 layers) +# - mlp.down_proj (group 79, 1 layers) +# Fused in model.layers.8: +# - self_attn.o_proj (group 80, 1 layers) +# - mlp.down_proj (group 81, 1 layers) +# Fused in model.layers.9: +# - self_attn.o_proj (group 82, 1 layers) +# - mlp.down_proj (group 83, 1 layers) +# +# Mode: binary_search_constraint (measured) +# Constraint max_kl: 0.01 +# Constraint min_eap: 0.99 +# Weights: nll=0.0, kl=0.0, eap=1.0 +# +# Average bitwidth: 8.0454 +# Total params: 6979321856 +# Total bits: 56151597122 +# Final KL: 0.001326 +# Final EAP: 0.990180 +# Final ETL: 0.009820 +# Satisfies constraints: True +# Solver calls: 9 +# Evaluations: 9 +# +# Bitwidth distribution: +# 8-bit: 220 layers (98.2%) +# 4-bit: 4 layers (1.8%) +# +model.layers.0.mlp.gate_proj: 8 +model.layers.0.mlp.up_proj: 8 +model.layers.0.self_attn.k_proj: 8 +model.layers.0.self_attn.q_proj: 8 +model.layers.0.self_attn.v_proj: 8 +model.layers.1.mlp.gate_proj: 8 +model.layers.1.mlp.up_proj: 8 +model.layers.1.self_attn.k_proj: 8 +model.layers.1.self_attn.q_proj: 8 +model.layers.1.self_attn.v_proj: 8 +model.layers.2.mlp.gate_proj: 8 +model.layers.2.mlp.up_proj: 8 +model.layers.2.self_attn.k_proj: 8 +model.layers.2.self_attn.q_proj: 8 +model.layers.2.self_attn.v_proj: 8 +model.layers.3.mlp.gate_proj: 8 +model.layers.3.mlp.up_proj: 8 +model.layers.3.self_attn.k_proj: 8 +model.layers.3.self_attn.q_proj: 8 +model.layers.3.self_attn.v_proj: 8 +model.layers.4.mlp.gate_proj: 8 +model.layers.4.mlp.up_proj: 8 +model.layers.4.self_attn.k_proj: 8 +model.layers.4.self_attn.q_proj: 8 +model.layers.4.self_attn.v_proj: 8 +model.layers.5.mlp.gate_proj: 8 +model.layers.5.mlp.up_proj: 8 +model.layers.5.self_attn.k_proj: 8 +model.layers.5.self_attn.q_proj: 8 +model.layers.5.self_attn.v_proj: 8 +model.layers.6.mlp.gate_proj: 8 +model.layers.6.mlp.up_proj: 8 +model.layers.6.self_attn.k_proj: 8 +model.layers.6.self_attn.q_proj: 8 +model.layers.6.self_attn.v_proj: 8 +model.layers.7.mlp.gate_proj: 8 +model.layers.7.mlp.up_proj: 8 +model.layers.7.self_attn.k_proj: 8 +model.layers.7.self_attn.q_proj: 8 +model.layers.7.self_attn.v_proj: 8 +model.layers.8.mlp.gate_proj: 8 +model.layers.8.mlp.up_proj: 8 +model.layers.8.self_attn.k_proj: 8 +model.layers.8.self_attn.q_proj: 8 +model.layers.8.self_attn.v_proj: 8 +model.layers.9.mlp.gate_proj: 8 +model.layers.9.mlp.up_proj: 8 +model.layers.9.self_attn.k_proj: 8 +model.layers.9.self_attn.q_proj: 8 +model.layers.9.self_attn.v_proj: 8 +model.layers.10.mlp.gate_proj: 8 +model.layers.10.mlp.up_proj: 8 +model.layers.10.self_attn.k_proj: 8 +model.layers.10.self_attn.q_proj: 8 +model.layers.10.self_attn.v_proj: 8 +model.layers.11.mlp.gate_proj: 8 +model.layers.11.mlp.up_proj: 8 +model.layers.11.self_attn.k_proj: 8 +model.layers.11.self_attn.q_proj: 8 +model.layers.11.self_attn.v_proj: 8 +model.layers.12.mlp.gate_proj: 8 +model.layers.12.mlp.up_proj: 8 +model.layers.12.self_attn.k_proj: 8 +model.layers.12.self_attn.q_proj: 8 +model.layers.12.self_attn.v_proj: 8 +model.layers.13.mlp.gate_proj: 8 +model.layers.13.mlp.up_proj: 8 +model.layers.13.self_attn.k_proj: 8 +model.layers.13.self_attn.q_proj: 8 +model.layers.13.self_attn.v_proj: 8 +model.layers.14.mlp.gate_proj: 8 +model.layers.14.mlp.up_proj: 8 +model.layers.14.self_attn.k_proj: 8 +model.layers.14.self_attn.q_proj: 8 +model.layers.14.self_attn.v_proj: 8 +model.layers.15.mlp.gate_proj: 8 +model.layers.15.mlp.up_proj: 8 +model.layers.15.self_attn.k_proj: 8 +model.layers.15.self_attn.q_proj: 8 +model.layers.15.self_attn.v_proj: 8 +model.layers.16.mlp.gate_proj: 8 +model.layers.16.mlp.up_proj: 8 +model.layers.16.self_attn.k_proj: 8 +model.layers.16.self_attn.q_proj: 8 +model.layers.16.self_attn.v_proj: 8 +model.layers.17.mlp.gate_proj: 8 +model.layers.17.mlp.up_proj: 8 +model.layers.17.self_attn.k_proj: 8 +model.layers.17.self_attn.q_proj: 8 +model.layers.17.self_attn.v_proj: 8 +model.layers.18.mlp.gate_proj: 8 +model.layers.18.mlp.up_proj: 8 +model.layers.18.self_attn.k_proj: 8 +model.layers.18.self_attn.q_proj: 8 +model.layers.18.self_attn.v_proj: 8 +model.layers.19.mlp.gate_proj: 8 +model.layers.19.mlp.up_proj: 8 +model.layers.19.self_attn.k_proj: 8 +model.layers.19.self_attn.q_proj: 8 +model.layers.19.self_attn.v_proj: 8 +model.layers.20.mlp.gate_proj: 8 +model.layers.20.mlp.up_proj: 8 +model.layers.20.self_attn.k_proj: 8 +model.layers.20.self_attn.q_proj: 8 +model.layers.20.self_attn.v_proj: 8 +model.layers.21.mlp.gate_proj: 8 +model.layers.21.mlp.up_proj: 8 +model.layers.21.self_attn.k_proj: 8 +model.layers.21.self_attn.q_proj: 8 +model.layers.21.self_attn.v_proj: 8 +model.layers.22.mlp.gate_proj: 8 +model.layers.22.mlp.up_proj: 8 +model.layers.22.self_attn.k_proj: 8 +model.layers.22.self_attn.q_proj: 8 +model.layers.22.self_attn.v_proj: 8 +model.layers.23.mlp.gate_proj: 8 +model.layers.23.mlp.up_proj: 8 +model.layers.23.self_attn.k_proj: 8 +model.layers.23.self_attn.q_proj: 8 +model.layers.23.self_attn.v_proj: 8 +model.layers.24.mlp.gate_proj: 8 +model.layers.24.mlp.up_proj: 8 +model.layers.24.self_attn.k_proj: 8 +model.layers.24.self_attn.q_proj: 8 +model.layers.24.self_attn.v_proj: 8 +model.layers.25.mlp.gate_proj: 8 +model.layers.25.mlp.up_proj: 8 +model.layers.25.self_attn.k_proj: 8 +model.layers.25.self_attn.q_proj: 8 +model.layers.25.self_attn.v_proj: 8 +model.layers.26.mlp.gate_proj: 8 +model.layers.26.mlp.up_proj: 8 +model.layers.26.self_attn.k_proj: 8 +model.layers.26.self_attn.q_proj: 8 +model.layers.26.self_attn.v_proj: 8 +model.layers.27.mlp.gate_proj: 8 +model.layers.27.mlp.up_proj: 8 +model.layers.27.self_attn.k_proj: 8 +model.layers.27.self_attn.q_proj: 8 +model.layers.27.self_attn.v_proj: 8 +model.layers.28.mlp.gate_proj: 8 +model.layers.28.mlp.up_proj: 8 +model.layers.28.self_attn.k_proj: 8 +model.layers.28.self_attn.q_proj: 8 +model.layers.28.self_attn.v_proj: 8 +model.layers.29.mlp.gate_proj: 8 +model.layers.29.mlp.up_proj: 8 +model.layers.29.self_attn.k_proj: 8 +model.layers.29.self_attn.q_proj: 8 +model.layers.29.self_attn.v_proj: 8 +model.layers.30.mlp.gate_proj: 8 +model.layers.30.mlp.up_proj: 8 +model.layers.30.self_attn.k_proj: 8 +model.layers.30.self_attn.q_proj: 8 +model.layers.30.self_attn.v_proj: 8 +model.layers.31.mlp.gate_proj: 8 +model.layers.31.mlp.up_proj: 8 +model.layers.31.self_attn.k_proj: 8 +model.layers.31.self_attn.q_proj: 8 +model.layers.31.self_attn.v_proj: 8 +model.layers.0.self_attn.o_proj: 8 +model.layers.0.mlp.down_proj: 8 +model.layers.1.self_attn.o_proj: 8 +model.layers.1.mlp.down_proj: 8 +model.layers.2.self_attn.o_proj: 8 +model.layers.2.mlp.down_proj: 8 +model.layers.3.self_attn.o_proj: 8 +model.layers.3.mlp.down_proj: 8 +model.layers.4.self_attn.o_proj: 8 +model.layers.4.mlp.down_proj: 8 +model.layers.5.self_attn.o_proj: 8 +model.layers.5.mlp.down_proj: 8 +model.layers.6.self_attn.o_proj: 8 +model.layers.6.mlp.down_proj: 8 +model.layers.7.self_attn.o_proj: 8 +model.layers.7.mlp.down_proj: 8 +model.layers.8.self_attn.o_proj: 8 +model.layers.8.mlp.down_proj: 8 +model.layers.9.self_attn.o_proj: 8 +model.layers.9.mlp.down_proj: 8 +model.layers.10.self_attn.o_proj: 8 +model.layers.10.mlp.down_proj: 8 +model.layers.11.self_attn.o_proj: 8 +model.layers.11.mlp.down_proj: 8 +model.layers.12.self_attn.o_proj: 8 +model.layers.12.mlp.down_proj: 8 +model.layers.13.self_attn.o_proj: 8 +model.layers.13.mlp.down_proj: 8 +model.layers.14.self_attn.o_proj: 8 +model.layers.14.mlp.down_proj: 8 +model.layers.15.self_attn.o_proj: 8 +model.layers.15.mlp.down_proj: 8 +model.layers.16.self_attn.o_proj: 8 +model.layers.16.mlp.down_proj: 8 +model.layers.17.self_attn.o_proj: 8 +model.layers.17.mlp.down_proj: 8 +model.layers.18.self_attn.o_proj: 8 +model.layers.18.mlp.down_proj: 8 +model.layers.19.self_attn.o_proj: 8 +model.layers.19.mlp.down_proj: 8 +model.layers.20.self_attn.o_proj: 8 +model.layers.20.mlp.down_proj: 8 +model.layers.21.self_attn.o_proj: 8 +model.layers.21.mlp.down_proj: 8 +model.layers.22.self_attn.o_proj: 8 +model.layers.22.mlp.down_proj: 8 +model.layers.23.self_attn.o_proj: 8 +model.layers.23.mlp.down_proj: 8 +model.layers.24.self_attn.o_proj: 8 +model.layers.24.mlp.down_proj: 8 +model.layers.25.self_attn.o_proj: 4 +model.layers.25.mlp.down_proj: 8 +model.layers.26.self_attn.o_proj: 8 +model.layers.26.mlp.down_proj: 4 +model.layers.27.self_attn.o_proj: 8 +model.layers.27.mlp.down_proj: 4 +model.layers.28.self_attn.o_proj: 8 +model.layers.28.mlp.down_proj: 4 +model.layers.29.self_attn.o_proj: 8 +model.layers.29.mlp.down_proj: 8 +model.layers.30.self_attn.o_proj: 8 +model.layers.30.mlp.down_proj: 8 +model.layers.31.self_attn.o_proj: 8 +model.layers.31.mlp.down_proj: 8 diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/special_tokens_map.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..b43be96621d147110fb8a18b5776ec6e38516127 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/special_tokens_map.json @@ -0,0 +1,17 @@ +{ + "bos_token": { + "content": "<|begin_of_text|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "<|eot_id|>", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": "<|eot_id|>" +} diff --git a/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json new file mode 100644 index 0000000000000000000000000000000000000000..1c1d8d5c9024994f1d3b00f9662b8dd89ca13cf2 --- /dev/null +++ b/Llama-3.1-8B-Instruct/ll_bsearch_kl0.01_eap0.99_sha_bw8.05_4-8bit_grouped_seed42/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b +size 17209920