rotary_emb
#3
by
echarlaix HF Staff - opened
- config.json +6 -6
- generation_config.json +1 -1
- pytorch_model.bin → model.safetensors +2 -2
config.json
CHANGED
|
@@ -5,13 +5,14 @@
|
|
| 5 |
"Zamba2ForCausalLM"
|
| 6 |
],
|
| 7 |
"attention_dropout": 0.0,
|
| 8 |
-
"attention_head_dim":
|
| 9 |
"attention_hidden_size": 32,
|
| 10 |
"bos_token_id": 1,
|
| 11 |
"chunk_size": 256,
|
| 12 |
"conv_kernel": 3,
|
| 13 |
"d_model": 16,
|
| 14 |
"d_state": 32,
|
|
|
|
| 15 |
"eos_token_id": 2,
|
| 16 |
"expand": 2,
|
| 17 |
"hidden_act": "gelu",
|
|
@@ -36,9 +37,9 @@
|
|
| 36 |
"max_position_embeddings": 4096,
|
| 37 |
"model_type": "zamba2",
|
| 38 |
"n_mamba_heads": 8,
|
| 39 |
-
"num_attention_heads":
|
| 40 |
"num_hidden_layers": 4,
|
| 41 |
-
"num_key_value_heads":
|
| 42 |
"num_logits_to_keep": 1,
|
| 43 |
"num_mem_blocks": 1,
|
| 44 |
"num_query_groups": 32,
|
|
@@ -49,13 +50,12 @@
|
|
| 49 |
"time_step_limit": null,
|
| 50 |
"time_step_max": 0.1,
|
| 51 |
"time_step_min": 0.001,
|
| 52 |
-
"
|
| 53 |
-
"transformers_version": "4.55.4",
|
| 54 |
"use_cache": true,
|
| 55 |
"use_conv_bias": true,
|
| 56 |
"use_long_context": false,
|
| 57 |
"use_mem_eff_path": false,
|
| 58 |
-
"use_mem_rope":
|
| 59 |
"use_shared_attention_adapter": false,
|
| 60 |
"vocab_size": 50280
|
| 61 |
}
|
|
|
|
| 5 |
"Zamba2ForCausalLM"
|
| 6 |
],
|
| 7 |
"attention_dropout": 0.0,
|
| 8 |
+
"attention_head_dim": 2,
|
| 9 |
"attention_hidden_size": 32,
|
| 10 |
"bos_token_id": 1,
|
| 11 |
"chunk_size": 256,
|
| 12 |
"conv_kernel": 3,
|
| 13 |
"d_model": 16,
|
| 14 |
"d_state": 32,
|
| 15 |
+
"dtype": "float32",
|
| 16 |
"eos_token_id": 2,
|
| 17 |
"expand": 2,
|
| 18 |
"hidden_act": "gelu",
|
|
|
|
| 37 |
"max_position_embeddings": 4096,
|
| 38 |
"model_type": "zamba2",
|
| 39 |
"n_mamba_heads": 8,
|
| 40 |
+
"num_attention_heads": 16,
|
| 41 |
"num_hidden_layers": 4,
|
| 42 |
+
"num_key_value_heads": 16,
|
| 43 |
"num_logits_to_keep": 1,
|
| 44 |
"num_mem_blocks": 1,
|
| 45 |
"num_query_groups": 32,
|
|
|
|
| 50 |
"time_step_limit": null,
|
| 51 |
"time_step_max": 0.1,
|
| 52 |
"time_step_min": 0.001,
|
| 53 |
+
"transformers_version": "4.57.6",
|
|
|
|
| 54 |
"use_cache": true,
|
| 55 |
"use_conv_bias": true,
|
| 56 |
"use_long_context": false,
|
| 57 |
"use_mem_eff_path": false,
|
| 58 |
+
"use_mem_rope": true,
|
| 59 |
"use_shared_attention_adapter": false,
|
| 60 |
"vocab_size": 50280
|
| 61 |
}
|
generation_config.json
CHANGED
|
@@ -3,5 +3,5 @@
|
|
| 3 |
"bos_token_id": 1,
|
| 4 |
"eos_token_id": 2,
|
| 5 |
"pad_token_id": 0,
|
| 6 |
-
"transformers_version": "4.
|
| 7 |
}
|
|
|
|
| 3 |
"bos_token_id": 1,
|
| 4 |
"eos_token_id": 2,
|
| 5 |
"pad_token_id": 0,
|
| 6 |
+
"transformers_version": "4.57.6"
|
| 7 |
}
|
pytorch_model.bin → model.safetensors
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:245acf626383bc1f49e9e1630edb9d7ad3d2c3be2affda6db9978dc14ec38c8b
|
| 3 |
+
size 3398168
|