caug37 commited on
Commit
515441f
·
verified ·
1 Parent(s): bc593bf

Upload Gemma3ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +67 -0
  2. generation_config.json +13 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_sliding_window_pattern": 6,
3
+ "architectures": [
4
+ "Gemma3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
10
+ "cache_implementation": "hybrid",
11
+ "dtype": "bfloat16",
12
+ "eos_token_id": [
13
+ 1,
14
+ 106
15
+ ],
16
+ "final_logit_softcapping": null,
17
+ "head_dim": 256,
18
+ "hidden_activation": "gelu_pytorch_tanh",
19
+ "hidden_size": 1152,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 6912,
22
+ "layer_types": [
23
+ "sliding_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "full_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention"
49
+ ],
50
+ "max_position_embeddings": 32768,
51
+ "model_type": "gemma3_text",
52
+ "num_attention_heads": 4,
53
+ "num_hidden_layers": 26,
54
+ "num_key_value_heads": 1,
55
+ "pad_token_id": 0,
56
+ "query_pre_attn_scalar": 256,
57
+ "rms_norm_eps": 1e-06,
58
+ "rope_local_base_freq": 10000,
59
+ "rope_scaling": null,
60
+ "rope_theta": 1000000,
61
+ "sliding_window": 512,
62
+ "sliding_window_pattern": 6,
63
+ "transformers_version": "4.57.1",
64
+ "use_bidirectional_attention": false,
65
+ "use_cache": true,
66
+ "vocab_size": 262144
67
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.57.1"
13
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b16a3b427b023b5071276b4d0499589d8877a07881f7d1370b5a9079bfb2d0b
3
+ size 1999811208