edobobo commited on
Commit
1d2b832
·
verified ·
1 Parent(s): 0de8cfe

Upload model

Browse files
Files changed (2) hide show
  1. config.json +96 -0
  2. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma4TextModel"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attention_k_eq_v": false,
8
+ "bos_token_id": 2,
9
+ "dtype": "bfloat16",
10
+ "enable_moe_block": false,
11
+ "eos_token_id": 1,
12
+ "expert_intermediate_size": null,
13
+ "final_logit_softcapping": 30.0,
14
+ "global_head_dim": 512,
15
+ "head_dim": 256,
16
+ "hidden_activation": "gelu_pytorch_tanh",
17
+ "hidden_size": 2560,
18
+ "hidden_size_per_layer_input": 256,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 10240,
21
+ "layer_types": [
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "full_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "full_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "full_attention",
40
+ "sliding_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "full_attention",
46
+ "sliding_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "full_attention",
52
+ "sliding_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "full_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "full_attention"
64
+ ],
65
+ "max_position_embeddings": 131072,
66
+ "model_type": "gemma4_text",
67
+ "moe_intermediate_size": null,
68
+ "num_attention_heads": 8,
69
+ "num_experts": null,
70
+ "num_global_key_value_heads": null,
71
+ "num_hidden_layers": 42,
72
+ "num_key_value_heads": 2,
73
+ "num_kv_shared_layers": 18,
74
+ "pad_token_id": 0,
75
+ "rms_norm_eps": 1e-06,
76
+ "rope_parameters": {
77
+ "full_attention": {
78
+ "partial_rotary_factor": 0.25,
79
+ "rope_theta": 1000000.0,
80
+ "rope_type": "proportional"
81
+ },
82
+ "sliding_attention": {
83
+ "rope_theta": 10000.0,
84
+ "rope_type": "default"
85
+ }
86
+ },
87
+ "sliding_window": 512,
88
+ "tie_word_embeddings": true,
89
+ "top_k_experts": null,
90
+ "transformers_version": "5.5.0",
91
+ "use_bidirectional_attention": null,
92
+ "use_cache": true,
93
+ "use_double_wide_mlp": false,
94
+ "vocab_size": 262144,
95
+ "vocab_size_per_layer_input": 262144
96
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6b5f2994e4f5294516c8da2c1ec06b54199faec1923627c7f1df937e18f903
3
+ size 15036218268