DimensionSTP commited on
Commit
cae4414
·
verified ·
1 Parent(s): 5862b05

Fix vision weights, vocab_size, and add processor files for revision 'last'

Browse files
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
config.json CHANGED
@@ -38,11 +38,12 @@
38
  "rope_theta": 1000000.0,
39
  "sliding_window": 1024,
40
  "sliding_window_pattern": 6,
 
41
  "use_cache": true,
42
- "vocab_size": 262208
43
  },
44
  "torch_dtype": "bfloat16",
45
- "transformers_version": "4.50.0",
46
  "vision_config": {
47
  "attention_dropout": 0.0,
48
  "hidden_act": "gelu_pytorch_tanh",
@@ -55,6 +56,7 @@
55
  "num_channels": 3,
56
  "num_hidden_layers": 27,
57
  "patch_size": 14,
 
58
  "vision_use_head": false
59
  },
60
  "vocab_size": 262147
 
38
  "rope_theta": 1000000.0,
39
  "sliding_window": 1024,
40
  "sliding_window_pattern": 6,
41
+ "torch_dtype": "bfloat16",
42
  "use_cache": true,
43
+ "vocab_size": 262147
44
  },
45
  "torch_dtype": "bfloat16",
46
+ "transformers_version": "4.51.3",
47
  "vision_config": {
48
  "attention_dropout": 0.0,
49
  "hidden_act": "gelu_pytorch_tanh",
 
56
  "num_channels": 3,
57
  "num_hidden_layers": 27,
58
  "patch_size": 14,
59
+ "torch_dtype": "bfloat16",
60
  "vision_use_head": false
61
  },
62
  "vocab_size": 262147
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
- "transformers_version": "4.50.0.dev0"
13
  }
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.51.3"
13
  }
model-00001-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ac4405aee2914591eada0ee82186e443130ba880fb4f21071d0f38b5b4a47d0
3
- size 6260322992
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d8603e861a171e8d22ed07e3a63dd604fea0f3de094fe90b5c860b6a2b56bd2
3
+ size 4979433712
model-00002-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ec59660920eb43140fa114989d460f97b37e57ad87a6af87ba4cf6dabba5b03
3
- size 4317842688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d416c15edaaf6a60b861b01095223bea39b6259d9e1c4eb29c5c333602b39a63
3
+ size 4931296592
model-00003-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b47cdc291c6aab8faf085b89951b74fdfeca53779f3c36344d61904e9f0c873
3
- size 4435785448
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e16180398467fa9c66b0d77cdf8133afd5b3f013e73a152b76c950c036536b4c
3
+ size 4931296656
model-00004-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8efa4376c433f5d0715052ea3fe4d935272ec25f53443caef299a58f5087a161
3
- size 4129091816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0cee40ce371e2127a8c2462e851351e49f517b1da5b922d89321661a7caf57
3
+ size 4931296656
model-00005-of-00005.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3964918e13fab90516346476f76e47b8ff53e510c2519021857b6ddaaf843c8c
3
- size 6401918968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4735cf0c744572c01c1a82c8ec57ca68250f8445f9e2b6054a4cdff567c38792
3
+ size 4601000928
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b244434a1e668213b5494c816f8077d7b5b64bdac094ea09e7aaf6281b77f00
3
- size 33384937
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json CHANGED
@@ -51321,27 +51321,11 @@
51321
  "rstrip": false,
51322
  "single_word": false,
51323
  "special": true
51324
- },
51325
- "262145": {
51326
- "content": "<think>",
51327
- "lstrip": false,
51328
- "normalized": true,
51329
- "rstrip": false,
51330
- "single_word": false,
51331
- "special": false
51332
- },
51333
- "262146": {
51334
- "content": "</think>",
51335
- "lstrip": false,
51336
- "normalized": true,
51337
- "rstrip": false,
51338
- "single_word": false,
51339
- "special": false
51340
  }
51341
  },
51342
  "boi_token": "<start_of_image>",
51343
  "bos_token": "<bos>",
51344
- "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\n\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- '' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><eos>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\n\\n' + messages[0]['content'] + '<eos>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<eos>' + '\\n' }}\n {%- elif message.role == \"assistant\" and not message.tool_calls %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<eos>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<eos>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user\n' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<eos>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n\\n<think>\\n' }}\n{%- endif %}\n",
51345
  "clean_up_tokenization_spaces": false,
51346
  "eoi_token": "<end_of_image>",
51347
  "eos_token": "<eos>",
 
51321
  "rstrip": false,
51322
  "single_word": false,
51323
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51324
  }
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<eos>",