2796gauravc commited on
Commit
1ce014c
·
verified ·
1 Parent(s): 9343273

Change to Gemma v1 architecture for Transformers.js compatibility

Browse files
Files changed (1) hide show
  1. config.json +2 -6
config.json CHANGED
@@ -1,18 +1,16 @@
1
  {
2
  "_sliding_window_pattern": 6,
3
  "architectures": [
4
- "Gemma2ForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
- "attn_logit_softcapping": null,
9
  "bos_token_id": 2,
10
  "dtype": "bfloat16",
11
  "eos_token_id": [
12
  1,
13
  50
14
  ],
15
- "final_logit_softcapping": null,
16
  "head_dim": 256,
17
  "hidden_activation": "gelu_pytorch_tanh",
18
  "hidden_size": 640,
@@ -39,17 +37,15 @@
39
  "full_attention"
40
  ],
41
  "max_position_embeddings": 32768,
42
- "model_type": "gemma2",
43
  "num_attention_heads": 4,
44
  "num_hidden_layers": 18,
45
  "num_key_value_heads": 1,
46
  "pad_token_id": 0,
47
- "query_pre_attn_scalar": 256,
48
  "rms_norm_eps": 1e-06,
49
  "rope_local_base_freq": 10000.0,
50
  "rope_scaling": null,
51
  "rope_theta": 1000000.0,
52
- "sliding_window": 512,
53
  "torch_dtype": "bfloat16",
54
  "transformers_version": "4.55.4",
55
  "use_bidirectional_attention": false,
 
1
  {
2
  "_sliding_window_pattern": 6,
3
  "architectures": [
4
+ "GemmaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
 
8
  "bos_token_id": 2,
9
  "dtype": "bfloat16",
10
  "eos_token_id": [
11
  1,
12
  50
13
  ],
 
14
  "head_dim": 256,
15
  "hidden_activation": "gelu_pytorch_tanh",
16
  "hidden_size": 640,
 
37
  "full_attention"
38
  ],
39
  "max_position_embeddings": 32768,
40
+ "model_type": "gemma",
41
  "num_attention_heads": 4,
42
  "num_hidden_layers": 18,
43
  "num_key_value_heads": 1,
44
  "pad_token_id": 0,
 
45
  "rms_norm_eps": 1e-06,
46
  "rope_local_base_freq": 10000.0,
47
  "rope_scaling": null,
48
  "rope_theta": 1000000.0,
 
49
  "torch_dtype": "bfloat16",
50
  "transformers_version": "4.55.4",
51
  "use_bidirectional_attention": false,