boopathiraj commited on
Commit
2126138
·
verified ·
1 Parent(s): b95e650

Upload Gemma3ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +0 -19
  2. generation_config.json +0 -2
  3. model.safetensors +2 -2
config.json CHANGED
@@ -10,7 +10,6 @@
10
  "dtype": "float16",
11
  "eos_token_id": 1,
12
  "final_logit_softcapping": null,
13
- "finetuned_from": "google/gemma-3-270m-it",
14
  "head_dim": 256,
15
  "hidden_activation": "gelu_pytorch_tanh",
16
  "hidden_size": 640,
@@ -42,30 +41,12 @@
42
  "num_hidden_layers": 18,
43
  "num_key_value_heads": 1,
44
  "pad_token_id": 0,
45
- "quantization_config": {
46
- "_load_in_4bit": true,
47
- "_load_in_8bit": false,
48
- "bnb_4bit_compute_dtype": "float16",
49
- "bnb_4bit_quant_storage": "uint8",
50
- "bnb_4bit_quant_type": "nf4",
51
- "bnb_4bit_use_double_quant": false,
52
- "llm_int8_enable_fp32_cpu_offload": false,
53
- "llm_int8_has_fp16_weight": false,
54
- "llm_int8_skip_modules": null,
55
- "llm_int8_threshold": 6.0,
56
- "load_in_4bit": true,
57
- "load_in_8bit": false,
58
- "quant_method": "bitsandbytes"
59
- },
60
  "query_pre_attn_scalar": 256,
61
  "rms_norm_eps": 1e-06,
62
  "rope_local_base_freq": 10000.0,
63
  "rope_scaling": null,
64
  "rope_theta": 1000000.0,
65
  "sliding_window": 512,
66
- "task_specific_params": {
67
- "task": "instruction-following / math"
68
- },
69
  "transformers_version": "4.57.3",
70
  "use_bidirectional_attention": false,
71
  "use_cache": true,
 
10
  "dtype": "float16",
11
  "eos_token_id": 1,
12
  "final_logit_softcapping": null,
 
13
  "head_dim": 256,
14
  "hidden_activation": "gelu_pytorch_tanh",
15
  "hidden_size": 640,
 
41
  "num_hidden_layers": 18,
42
  "num_key_value_heads": 1,
43
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  "query_pre_attn_scalar": 256,
45
  "rms_norm_eps": 1e-06,
46
  "rope_local_base_freq": 10000.0,
47
  "rope_scaling": null,
48
  "rope_theta": 1000000.0,
49
  "sliding_window": 512,
 
 
 
50
  "transformers_version": "4.57.3",
51
  "use_bidirectional_attention": false,
52
  "use_cache": true,
generation_config.json CHANGED
@@ -1,12 +1,10 @@
1
  {
2
- "bos_token_id": 2,
3
  "cache_implementation": "hybrid",
4
  "do_sample": true,
5
  "eos_token_id": [
6
  1,
7
  106
8
  ],
9
- "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
  "transformers_version": "4.57.3"
 
1
  {
 
2
  "cache_implementation": "hybrid",
3
  "do_sample": true,
4
  "eos_token_id": [
5
  1,
6
  106
7
  ],
 
8
  "top_k": 64,
9
  "top_p": 0.95,
10
  "transformers_version": "4.57.3"
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0870cec1af386150266d414de0b393ba98eee7bd07e08ead6f981afcf93c0cdb
3
- size 392147628
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23c9828a4ba86e5a2445aecaadfa7c200bb5f593a6f06c1654e3a32c84ad7cac
3
+ size 536222816