Chohui commited on
Commit
71a77b3
·
verified ·
1 Parent(s): e1f111e

(Trained with Unsloth)

Browse files
Files changed (3) hide show
  1. config.json +99 -0
  2. tokenizer.json +2 -2
  3. tokenizer_config.json +4 -3
config.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "bos_token_id": 2,
7
+ "torch_dtype": "bfloat16",
8
+ "eoi_token_index": 256000,
9
+ "eos_token_id": 106,
10
+ "image_token_index": 262144,
11
+ "initializer_range": 0.02,
12
+ "mm_tokens_per_image": 256,
13
+ "model_type": "gemma3",
14
+ "pad_token_id": 0,
15
+ "text_config": {
16
+ "_sliding_window_pattern": 6,
17
+ "attention_bias": false,
18
+ "attention_dropout": 0.0,
19
+ "attn_logit_softcapping": null,
20
+ "cache_implementation": "hybrid",
21
+ "torch_dtype": "bfloat16",
22
+ "final_logit_softcapping": null,
23
+ "head_dim": 256,
24
+ "hidden_activation": "gelu_pytorch_tanh",
25
+ "hidden_size": 2560,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 10240,
28
+ "layer_types": [
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "full_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "sliding_attention"
63
+ ],
64
+ "max_position_embeddings": 131072,
65
+ "model_type": "gemma3_text",
66
+ "num_attention_heads": 8,
67
+ "num_hidden_layers": 34,
68
+ "num_key_value_heads": 4,
69
+ "query_pre_attn_scalar": 256,
70
+ "rms_norm_eps": 1e-06,
71
+ "rope_local_base_freq": 10000.0,
72
+ "rope_scaling": {
73
+ "factor": 8.0,
74
+ "rope_type": "linear"
75
+ },
76
+ "rope_theta": 1000000.0,
77
+ "sliding_window": 1024,
78
+ "use_cache": true,
79
+ "vocab_size": 262208
80
+ },
81
+ "transformers_version": "4.56.2",
82
+ "unsloth_fixed": true,
83
+ "unsloth_version": "2025.10.4",
84
+ "vision_config": {
85
+ "attention_dropout": 0.0,
86
+ "torch_dtype": "bfloat16",
87
+ "hidden_act": "gelu_pytorch_tanh",
88
+ "hidden_size": 1152,
89
+ "image_size": 896,
90
+ "intermediate_size": 4304,
91
+ "layer_norm_eps": 1e-06,
92
+ "model_type": "siglip_vision_model",
93
+ "num_attention_heads": 16,
94
+ "num_channels": 3,
95
+ "num_hidden_layers": 27,
96
+ "patch_size": 14,
97
+ "vision_use_head": false
98
+ }
99
+ }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14053c1f3b38a060236792c0f799c08c2ce6e5f338e046bbf910069c8e53f382
3
- size 33384822
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json CHANGED
@@ -51336,11 +51336,12 @@
51336
  "image_token": "<image_soft_token>",
51337
  "model_max_length": 131072,
51338
  "pad_token": "<pad>",
51339
- "padding_side": "right",
51340
  "processor_class": "Gemma3Processor",
51341
  "sp_model_kwargs": null,
51342
  "spaces_between_special_tokens": false,
51343
  "tokenizer_class": "GemmaTokenizer",
51344
  "unk_token": "<unk>",
51345
- "use_default_system_prompt": false
51346
- }
 
 
51336
  "image_token": "<image_soft_token>",
51337
  "model_max_length": 131072,
51338
  "pad_token": "<pad>",
51339
+ "padding_side": "left",
51340
  "processor_class": "Gemma3Processor",
51341
  "sp_model_kwargs": null,
51342
  "spaces_between_special_tokens": false,
51343
  "tokenizer_class": "GemmaTokenizer",
51344
  "unk_token": "<unk>",
51345
+ "use_default_system_prompt": false,
51346
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
51347
+ }