PabloCano1 commited on
Commit
59cd7a5
·
verified ·
1 Parent(s): 66521d6

Upload Gemma3ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +46 -46
  2. generation_config.json +6 -3
  3. model.safetensors +2 -2
config.json CHANGED
@@ -1,54 +1,54 @@
1
  {
2
- "activation_function": "gelu_new",
3
  "architectures": [
4
- "GPTNeoForCausalLM"
5
  ],
6
- "attention_dropout": 0,
7
- "attention_layers": [
8
- "global",
9
- "local",
10
- "global",
11
- "local",
12
- "global",
13
- "local",
14
- "global",
15
- "local",
16
- "global",
17
- "local",
18
- "global",
19
- "local"
20
- ],
21
- "attention_types": [
22
- [
23
- [
24
- "global",
25
- "local"
26
- ],
27
- 6
28
- ]
29
- ],
30
- "bos_token_id": 50256,
31
- "classifier_dropout": 0.1,
32
  "dtype": "bfloat16",
33
- "embed_dropout": 0,
34
- "eos_token_id": 50256,
35
- "gradient_checkpointing": false,
36
- "hidden_size": 768,
 
37
  "initializer_range": 0.02,
38
- "intermediate_size": null,
39
- "layer_norm_epsilon": 1e-05,
40
- "max_position_embeddings": 2048,
41
- "model_type": "gpt_neo",
42
- "num_heads": 12,
43
- "num_layers": 12,
44
- "resid_dropout": 0,
45
- "summary_activation": null,
46
- "summary_first_dropout": 0.1,
47
- "summary_proj_to_labels": true,
48
- "summary_type": "cls_index",
49
- "summary_use_proj": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  "transformers_version": "4.56.2",
 
51
  "use_cache": false,
52
- "vocab_size": 50257,
53
- "window_size": 256
54
  }
 
1
  {
2
+ "_sliding_window_pattern": 6,
3
  "architectures": [
4
+ "Gemma3ForCausalLM"
5
  ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  "dtype": "bfloat16",
11
+ "eos_token_id": 1,
12
+ "final_logit_softcapping": null,
13
+ "head_dim": 256,
14
+ "hidden_activation": "gelu_pytorch_tanh",
15
+ "hidden_size": 640,
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 2048,
18
+ "layer_types": [
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "full_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "full_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "full_attention"
37
+ ],
38
+ "max_position_embeddings": 32768,
39
+ "model_type": "gemma3_text",
40
+ "num_attention_heads": 4,
41
+ "num_hidden_layers": 18,
42
+ "num_key_value_heads": 1,
43
+ "pad_token_id": 0,
44
+ "query_pre_attn_scalar": 256,
45
+ "rms_norm_eps": 1e-06,
46
+ "rope_local_base_freq": 10000.0,
47
+ "rope_scaling": null,
48
+ "rope_theta": 1000000.0,
49
+ "sliding_window": 512,
50
  "transformers_version": "4.56.2",
51
+ "use_bidirectional_attention": false,
52
  "use_cache": false,
53
+ "vocab_size": 262144
 
54
  }
generation_config.json CHANGED
@@ -1,8 +1,11 @@
1
  {
2
- "_from_model_config": true,
3
- "bos_token_id": 50256,
4
  "eos_token_id": [
5
- 50256
6
  ],
 
 
 
7
  "transformers_version": "4.56.2"
8
  }
 
1
  {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
  "eos_token_id": [
5
+ 1
6
  ],
7
+ "pad_token_id": 0,
8
+ "top_k": 64,
9
+ "top_p": 0.95,
10
  "transformers_version": "4.56.2"
11
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de3a50a2ee12941f315604923d32a8e55080b0f38d38f46c1476a877cf3ef48b
3
- size 250414224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d2e15f3b80ce9a765f8bf632bd8278816a103d74a953f2ac2431c764dd48f7
3
+ size 536223056