giayphuyen commited on
Commit
3ef6271
·
verified ·
1 Parent(s): 48114ad

Upload GemmaForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +19 -54
  2. generation_config.json +2 -8
  3. model.safetensors +2 -2
config.json CHANGED
@@ -1,15 +1,22 @@
1
  {
2
  "architectures": [
3
- "Gemma3ForConditionalGeneration"
4
  ],
5
- "boi_token_index": 255999,
 
6
  "bos_token_id": 2,
7
- "eoi_token_index": 256000,
8
- "eos_token_id": 106,
9
- "image_token_index": 262144,
 
 
10
  "initializer_range": 0.02,
11
- "mm_tokens_per_image": 256,
12
- "model_type": "gemma3",
 
 
 
 
13
  "pad_token_id": 0,
14
  "pretraining_tp": 1,
15
  "quantization_config": {
@@ -27,53 +34,11 @@
27
  "load_in_8bit": false,
28
  "quant_method": "bitsandbytes"
29
  },
30
- "text_config": {
31
- "attention_bias": false,
32
- "attention_dropout": 0.0,
33
- "attn_logit_softcapping": null,
34
- "cache_implementation": "hybrid",
35
- "final_logit_softcapping": null,
36
- "head_dim": 256,
37
- "hidden_activation": "gelu_pytorch_tanh",
38
- "hidden_size": 2560,
39
- "initializer_range": 0.02,
40
- "intermediate_size": 10240,
41
- "max_position_embeddings": 131072,
42
- "model_type": "gemma3_text",
43
- "num_attention_heads": 8,
44
- "num_hidden_layers": 34,
45
- "num_key_value_heads": 4,
46
- "query_pre_attn_scalar": 256,
47
- "rms_norm_eps": 1e-06,
48
- "rope_local_base_freq": 10000.0,
49
- "rope_scaling": {
50
- "factor": 8.0,
51
- "rope_type": "linear"
52
- },
53
- "rope_theta": 1000000.0,
54
- "sliding_window": 1024,
55
- "sliding_window_pattern": 6,
56
- "torch_dtype": "float16",
57
- "use_cache": true,
58
- "vocab_size": 262208
59
- },
60
- "torch_dtype": "float32",
61
  "transformers_version": "4.52.2",
62
- "unsloth_fixed": true,
63
  "use_cache": false,
64
- "vision_config": {
65
- "attention_dropout": 0.0,
66
- "hidden_act": "gelu_pytorch_tanh",
67
- "hidden_size": 1152,
68
- "image_size": 896,
69
- "intermediate_size": 4304,
70
- "layer_norm_eps": 1e-06,
71
- "model_type": "siglip_vision_model",
72
- "num_attention_heads": 16,
73
- "num_channels": 3,
74
- "num_hidden_layers": 27,
75
- "patch_size": 14,
76
- "torch_dtype": "float16",
77
- "vision_use_head": false
78
- }
79
  }
 
1
  {
2
  "architectures": [
3
+ "GemmaForCausalLM"
4
  ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
  "bos_token_id": 2,
8
+ "eos_token_id": 1,
9
+ "head_dim": 256,
10
+ "hidden_act": "gelu",
11
+ "hidden_activation": null,
12
+ "hidden_size": 2048,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 16384,
15
+ "max_position_embeddings": 8192,
16
+ "model_type": "gemma",
17
+ "num_attention_heads": 8,
18
+ "num_hidden_layers": 18,
19
+ "num_key_value_heads": 1,
20
  "pad_token_id": 0,
21
  "pretraining_tp": 1,
22
  "quantization_config": {
 
34
  "load_in_8bit": false,
35
  "quant_method": "bitsandbytes"
36
  },
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_scaling": null,
39
+ "rope_theta": 10000.0,
40
+ "torch_dtype": "bfloat16",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  "transformers_version": "4.52.2",
 
42
  "use_cache": false,
43
+ "vocab_size": 256000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  }
generation_config.json CHANGED
@@ -1,13 +1,7 @@
1
  {
 
2
  "bos_token_id": 2,
3
- "cache_implementation": "hybrid",
4
- "do_sample": true,
5
- "eos_token_id": [
6
- 1,
7
- 106
8
- ],
9
  "pad_token_id": 0,
10
- "top_k": 64,
11
- "top_p": 0.95,
12
  "transformers_version": "4.52.2"
13
  }
 
1
  {
2
+ "_from_model_config": true,
3
  "bos_token_id": 2,
4
+ "eos_token_id": 1,
 
 
 
 
 
5
  "pad_token_id": 0,
 
 
6
  "transformers_version": "4.52.2"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3db600fbadd6fa45797c9205a54a015f9c2bd41783e62e6510c75237c3ce75f5
3
- size 3546662926
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba195b5d5e80d001b934e75267a5f04a4da04041994f44b48e9be5610bae2df
3
+ size 2242213200