qgallouedec HF Staff commited on
Commit
47430c5
·
verified ·
1 Parent(s): 40a4661

Upload Gemma2ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +33 -4
  2. generation_config.json +6 -2
  3. model.safetensors +2 -2
config.json CHANGED
@@ -6,14 +6,44 @@
6
  "attention_dropout": 0.0,
7
  "attn_logit_softcapping": 50.0,
8
  "bos_token_id": 2,
9
- "eos_token_id": 1,
 
 
 
 
 
10
  "final_logit_softcapping": 30.0,
11
  "head_dim": 256,
 
12
  "hidden_activation": "gelu_pytorch_tanh",
13
  "hidden_size": 8,
14
  "initializer_range": 0.02,
15
  "intermediate_size": 32,
16
  "layer_types": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  "sliding_attention",
18
  "full_attention"
19
  ],
@@ -27,8 +57,7 @@
27
  "rms_norm_eps": 1e-06,
28
  "rope_theta": 10000.0,
29
  "sliding_window": 4096,
30
- "torch_dtype": "float32",
31
- "transformers_version": "4.55.0.dev0",
32
  "use_cache": true,
33
- "vocab_size": 256249
34
  }
 
6
  "attention_dropout": 0.0,
7
  "attn_logit_softcapping": 50.0,
8
  "bos_token_id": 2,
9
+ "cache_implementation": "hybrid",
10
+ "dtype": "float32",
11
+ "eos_token_id": [
12
+ 1,
13
+ 107
14
+ ],
15
  "final_logit_softcapping": 30.0,
16
  "head_dim": 256,
17
+ "hidden_act": "gelu_pytorch_tanh",
18
  "hidden_activation": "gelu_pytorch_tanh",
19
  "hidden_size": 8,
20
  "initializer_range": 0.02,
21
  "intermediate_size": 32,
22
  "layer_types": [
23
+ "sliding_attention",
24
+ "full_attention",
25
+ "sliding_attention",
26
+ "full_attention",
27
+ "sliding_attention",
28
+ "full_attention",
29
+ "sliding_attention",
30
+ "full_attention",
31
+ "sliding_attention",
32
+ "full_attention",
33
+ "sliding_attention",
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "full_attention",
37
+ "sliding_attention",
38
+ "full_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "full_attention",
43
+ "sliding_attention",
44
+ "full_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
  "sliding_attention",
48
  "full_attention"
49
  ],
 
57
  "rms_norm_eps": 1e-06,
58
  "rope_theta": 10000.0,
59
  "sliding_window": 4096,
60
+ "transformers_version": "4.57.0.dev0",
 
61
  "use_cache": true,
62
+ "vocab_size": 256000
63
  }
generation_config.json CHANGED
@@ -1,7 +1,11 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 2,
4
- "eos_token_id": 1,
 
 
 
 
5
  "pad_token_id": 0,
6
- "transformers_version": "4.55.0.dev0"
7
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 2,
4
+ "cache_implementation": "hybrid",
5
+ "eos_token_id": [
6
+ 1,
7
+ 107
8
+ ],
9
  "pad_token_id": 0,
10
+ "transformers_version": "4.57.0.dev0"
11
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c553714d961ce15585363bf35e318a9a32ad55182f2b70a6ddf55cd4866a656
3
- size 8405552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31c16135c340347c6e76077a3655ccf85a95b286cfb37a396b9ce0c2723c8c9f
3
+ size 8397584