Matej commited on
Commit
7761c3c
·
verified ·
1 Parent(s): ded2015

Upload folder using huggingface_hub

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json CHANGED
@@ -3,6 +3,7 @@
3
  "Gemma3ForConditionalGeneration"
4
  ],
5
  "boi_token_index": 255999,
 
6
  "eoi_token_index": 256000,
7
  "eos_token_id": [
8
  1,
@@ -14,16 +15,53 @@
14
  "model_type": "gemma3",
15
  "projector_lr": 1e-05,
16
  "text_config": {
 
17
  "attention_bias": false,
18
  "attention_dropout": 0.0,
19
  "attn_logit_softcapping": null,
20
- "cache_implementation": "hybrid",
21
  "final_logit_softcapping": null,
22
  "head_dim": 256,
23
  "hidden_activation": "gelu_pytorch_tanh",
24
  "hidden_size": 2560,
25
  "initializer_range": 0.02,
26
  "intermediate_size": 10240,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  "max_position_embeddings": 131072,
28
  "model_type": "gemma3_text",
29
  "num_attention_heads": 8,
@@ -38,16 +76,14 @@
38
  },
39
  "rope_theta": 1000000.0,
40
  "sliding_window": 1024,
41
- "sliding_window_pattern": 6,
42
- "torch_dtype": "bfloat16",
43
  "use_cache": true,
44
  "vocab_size": 262208
45
  },
46
- "torch_dtype": "bfloat16",
47
- "transformers_version": "4.51.3",
48
- "use_cache": true,
49
  "vision_config": {
50
  "attention_dropout": 0.0,
 
51
  "hidden_act": "gelu_pytorch_tanh",
52
  "hidden_size": 1152,
53
  "image_size": 896,
@@ -58,8 +94,7 @@
58
  "num_channels": 3,
59
  "num_hidden_layers": 27,
60
  "patch_size": 14,
61
- "torch_dtype": "bfloat16",
62
  "vision_use_head": false
63
  },
64
- "vision_lr": 2e-06
65
  }
 
3
  "Gemma3ForConditionalGeneration"
4
  ],
5
  "boi_token_index": 255999,
6
+ "dtype": "bfloat16",
7
  "eoi_token_index": 256000,
8
  "eos_token_id": [
9
  1,
 
15
  "model_type": "gemma3",
16
  "projector_lr": 1e-05,
17
  "text_config": {
18
+ "_sliding_window_pattern": 6,
19
  "attention_bias": false,
20
  "attention_dropout": 0.0,
21
  "attn_logit_softcapping": null,
22
+ "dtype": "bfloat16",
23
  "final_logit_softcapping": null,
24
  "head_dim": 256,
25
  "hidden_activation": "gelu_pytorch_tanh",
26
  "hidden_size": 2560,
27
  "initializer_range": 0.02,
28
  "intermediate_size": 10240,
29
+ "layer_types": [
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "full_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "sliding_attention",
41
+ "full_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "full_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "full_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "full_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention"
64
+ ],
65
  "max_position_embeddings": 131072,
66
  "model_type": "gemma3_text",
67
  "num_attention_heads": 8,
 
76
  },
77
  "rope_theta": 1000000.0,
78
  "sliding_window": 1024,
 
 
79
  "use_cache": true,
80
  "vocab_size": 262208
81
  },
82
+ "transformers_version": "4.56.2",
83
+ "use_cache": false,
 
84
  "vision_config": {
85
  "attention_dropout": 0.0,
86
+ "dtype": "bfloat16",
87
  "hidden_act": "gelu_pytorch_tanh",
88
  "hidden_size": 1152,
89
  "image_size": 896,
 
94
  "num_channels": 3,
95
  "num_hidden_layers": 27,
96
  "patch_size": 14,
 
97
  "vision_use_head": false
98
  },
99
+ "vision_lr": 2e-07
100
  }
generation_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "attn_implementation": "eager",
3
  "bos_token_id": 2,
4
  "cache_implementation": "hybrid",
5
  "do_sample": true,
@@ -10,5 +9,5 @@
10
  "pad_token_id": 0,
11
  "top_k": 64,
12
  "top_p": 0.95,
13
- "transformers_version": "4.51.3"
14
  }
 
1
  {
 
2
  "bos_token_id": 2,
3
  "cache_implementation": "hybrid",
4
  "do_sample": true,
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.56.2"
13
  }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f14754a6dfb75799d0f7f7ea96bbfbeab100c5a045e4b26ce9d70499e7974d0e
3
  size 4961251752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d9a77e86c1357e257f687f60a88bc0515b87b98453ae4e6f9822786fcedee0
3
  size 4961251752
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b6c0122c7e2a0a1b5efffc9a5b49df579290c6411d81e1cf8e7e78c8cd9c1bb
3
  size 4981531360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9aff74d9968d7cc46bce45054180ef7329ca542532bf8e1cd8ec235601f1280
3
  size 4981531360
model.safetensors.index.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "metadata": {
 
3
  "total_size": 9942663904
4
  },
5
  "weight_map": {
 
1
  {
2
  "metadata": {
3
+ "total_parameters": 4300079472,
4
  "total_size": 9942663904
5
  },
6
  "weight_map": {
preprocessor_config.json CHANGED
@@ -3,6 +3,7 @@
3
  "data_format": "channels_first",
4
  "default_to_square": true,
5
  "device": null,
 
6
  "do_center_crop": null,
7
  "do_convert_rgb": null,
8
  "do_normalize": true,
 
3
  "data_format": "channels_first",
4
  "default_to_square": true,
5
  "device": null,
6
+ "disable_grouping": null,
7
  "do_center_crop": null,
8
  "do_convert_rgb": null,
9
  "do_normalize": true,
tokenizer_config.json CHANGED
@@ -51325,7 +51325,6 @@
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<eos>",
 
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
 
51328
  "clean_up_tokenization_spaces": false,
51329
  "eoi_token": "<end_of_image>",
51330
  "eos_token": "<eos>",
trainer_state.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83490120dd7e1cf6a7140a26734eb574276ca6c824fec30f83f638ae410b59cf
3
- size 33348269
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16b2511cceb96f7d1d9121e08b005a7ee57f2d82a1ffc78f975569438275dd43
3
+ size 941672
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:943dbe161f39363cefe248ee99e67069490bd33278e55d04272c11b4fa4932bf
3
- size 7736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa3a1f1ebe49efb547a861186ccb3fe2e5d6169d2545c77525f68141e4295b43
3
+ size 8273