diff --git a/.gitattributes b/.gitattributes
index 18361f17fba525e3a0902a5569dac902f31c8402..f6fa30076bf3796c9e0b91804da04b35848553d5 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -67,3 +67,38 @@ llama_p320_l8_together.axmodel filter=lfs diff=lfs merge=lfs -text
llama_p320_l9_together.axmodel filter=lfs diff=lfs merge=lfs -text
llama_post.axmodel filter=lfs diff=lfs merge=lfs -text
resampler.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l0_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l10_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l11_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l12_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l13_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l14_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l15_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l16_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l17_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l18_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l19_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l1_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l20_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l21_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l22_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l23_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l24_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l25_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l26_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l27_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l28_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l29_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l2_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l30_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l31_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l3_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l4_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l5_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l6_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l7_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l8_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_p320_l9_together.axmodel filter=lfs diff=lfs merge=lfs -text
+minicpm-v-4_axmodel/llama_post.axmodel filter=lfs diff=lfs merge=lfs -text
+show_demo.jpg filter=lfs diff=lfs merge=lfs -text
+siglip.axmodel filter=lfs diff=lfs merge=lfs -text
diff --git a/embed_tokens.pth b/embed_tokens.pth
new file mode 100644
index 0000000000000000000000000000000000000000..203fd9589a089e595999c42ecbcac23cd4a90113
--- /dev/null
+++ b/embed_tokens.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a0a9ac170ee39704b5bd21309d1c744cd99fc0f49049e629f0b52fab5049acc
+size 752109557
diff --git a/minicpm-v-4_axmodel/llama_p320_l0_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l0_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..9bfc89538cc706d42963a6b3a33d0262efd1dfd7
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l0_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fce479c8165af95a6ed22d3db780fef5944df7a9fbaa69ae18b5446c6e016535
+size 113077656
diff --git a/minicpm-v-4_axmodel/llama_p320_l10_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l10_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..e602832c8b6974352edb7daf47f01485d96fa4e9
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l10_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b25deb581f1bc7f91580918dfee5ae46549add3e3f21c058111c99becafaa34c
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l11_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l11_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..d0ea27fe0e09da8b86f711b226bde7c45a181f57
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l11_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7aeacf6402287675176a249bf35fa4e385da8bff20aa0159b47ea782e5e373c3
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l12_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l12_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..d68436ece8a63e43ecf3e8b62b38f73947e21ba8
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l12_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19e0b5ff82af61e6c61016eb58bb7fd7c61dd73e13cc3eef3e38d8fc06a41593
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l13_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l13_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..48d1590e6f0eef6fa2e219462a8922d028056a4d
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l13_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cee8f2be186e0ee766c2c60152bacb86a7727ad306ebaa50c54d6819f8b9a8b7
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l14_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l14_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..6f9d71ba43957a511abbcb727b6a4581b6669b52
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l14_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95d0f25920bb45fb426b1bce6e1e7d884b933d947553bd0e03f8a91619149640
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l15_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l15_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..8e9116d860581038c33618dc12b2f69f2f71c9c6
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l15_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a611da04229fcb1b5a837fd9ee8df777ec512c48a6e30e5818b805fa02db24f
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l16_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l16_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..301aa197c620f897fc4eb71db2703b68fe772240
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l16_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85364b343768095622495bcdc14ac6298884217f9b63d27ee1a65163ea403af6
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l17_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l17_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..2721de82b463f1dea794e0479796be572f999bbd
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l17_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6864ed07b4590da5b36816440098e6b1bafb342370c0e6aeb0c899de6d35fe8
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l18_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l18_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..6b9fdcee654fb5b94f6443d88f033822cea586fe
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l18_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:908e09f4e7104b1320d6f37fa7f2f61f6212dbf54f0588d48ad643052fb12a72
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l19_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l19_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..3c54560bd0324018b0ab962b9b4657862c610668
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l19_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2a05b77855ccc0361b1630d745694c9e6bed1660706f85a3d7f7fdc9aebc872
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l1_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l1_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..7787b7634f0d5a9208db80543703bbf75807b1dd
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l1_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65719d84bd3a27f7cc2c4cf2a66d8e05aeb788603dba2119d8cbf39ef25158b7
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l20_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l20_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..1890b96918b4c79ea64f005427812b6f44f7cfa2
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l20_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:778dc6ac0645fc90691c60eca216df26d714471489f5d740ee33dfb291fd4885
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l21_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l21_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..d52c827deb588e94fa137aed7a85bab4378fca60
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l21_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c926dcd211fee5c85d01fb3bdc93ce5710cee20bb357c7566abe2da6ab3c4689
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l22_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l22_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..f212bc96e01f5d1b09e5af408a80c1342c786ee4
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l22_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3aa58a9c9dbd551a3acdcd026236a223c2b4dd892481c551484e4684f8db3c45
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l23_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l23_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..be93e0ccc35da63f0ab5efa499a74903c39f8276
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l23_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a8f02009d6b8b69a144d7d70911837b5e3fba7a94509771d8c9aaa6aff139da
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l24_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l24_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..380c8b369d289426dcff3a289f726a284d9c8859
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l24_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da41b7dfa32579e1da776071a7be730c4a3b004ca7fb766bf566ab8195eb3bc8
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l25_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l25_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..378b0293b80602d25ddccfc1804a312d2277e718
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l25_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7192c83c54f36efab030bb539965a86e64b24d538786af2095f6e6af817a5a35
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l26_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l26_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..8e68283c36affa40778e8976eca7153d9258f5d2
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l26_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b410cc34956d85bfc5d9886c135cb527d81c90e5a3a653bfc935833fd5b02739
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l27_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l27_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..392555c15c5bf1698c08a0eb23691997baaa9acd
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l27_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ddcc6bfe4bd9ad7d35ed6dce9837ae59d6bef27bb5baad7cff14491f1c96d0db
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l28_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l28_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..64efdde006df59da40dfbb2ecfe64ea22829af92
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l28_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe609f25f9f0754bd99b16879c05b6b3f2ba101910a58ce17644fcc8e9be0284
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l29_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l29_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..740b49e88b011e8bc18fd91fddb194181411447d
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l29_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f199de4eacd7e22d153abb6728220825b780e1c5efea0df3263d22fd8991f188
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l2_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l2_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..5f61b93d4981d8ef51544c9950eec3eece7cd4f3
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l2_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:190bc2fd5d25b3e24d9e39731498e5236231232a0490fb352e7dde855d8c75f7
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l30_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l30_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..abbc28c5c9896b427eb82934d6827cd5ac380366
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l30_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f621d8829c815be2341950205b23a77527dcf020dd147bdec8372ceead670b57
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l31_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l31_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..94e4b07ad7e460c3f3b876c422c5502c082e977a
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l31_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a7360d5195a34bf643b5a06962d727ebe5de552a8a7ea72bbc6346529481591
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l3_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l3_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..a68c10e8d4881eb1169690dd62dcffb4d8175b95
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l3_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:baad71924b6b99c416f0d61517512bd357175aeb6b88beafea20869d3cbf7c27
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l4_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l4_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..2ad2152e59295a87e3471490af2dcb54056b95f5
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l4_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eaee19341637358710909a2b49a98218ac891894b3dbd412eec1ea82de39846a
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l5_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l5_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..a32a2a69238d2afd25da9ca860aedfdb2c935f3f
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l5_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6446bbfb1fb4f0497e7406c92a59602ddbc40101ce926fc6c3a405db283cb86e
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l6_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l6_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..b9d658f271b48b1e8320d435a980de1b5bd18120
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l6_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb9fe58303bbf751b46941a2638ef6b2a8b8f7e01246a3bfff6746d9f95dc505
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l7_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l7_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..6864ff3a77819feff87b5bd336ce3a0a7c5aa9ce
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l7_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:763182a63070ed2afd5ca4717e6b2eb23e02b129e11202a44e5c800a7273c1b9
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l8_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l8_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..219184a78c76c0a8b75ae4a1ce9b7fc6915d4739
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l8_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11ccc8912e65b050266920af762a61254a77778bd0558336a9597c542953aabf
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_p320_l9_together.axmodel b/minicpm-v-4_axmodel/llama_p320_l9_together.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..5e0c0638a0f02c958a2af7cda58b9c2a13934349
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_p320_l9_together.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb68e62d64d152bc0c2e1bfb2c5fb5943a193145ef523159bba9ba9fadfc5c6f
+size 113076280
diff --git a/minicpm-v-4_axmodel/llama_post.axmodel b/minicpm-v-4_axmodel/llama_post.axmodel
new file mode 100644
index 0000000000000000000000000000000000000000..24727f2c380d3ca62c0e3052b8be3d04bb49fb3c
--- /dev/null
+++ b/minicpm-v-4_axmodel/llama_post.axmodel
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dabc83d55e485d1f7fcacfb07ceff12a6da22724e9ec6a19c570fc71085704df
+size 205348258
diff --git a/minicpmv4_tokenizer/config.json b/minicpmv4_tokenizer/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..be2bdfedbbe7b66bea9e91ce1963d2bc78bf600f
--- /dev/null
+++ b/minicpmv4_tokenizer/config.json
@@ -0,0 +1,203 @@
+{
+ "architectures": [
+ "MiniCPMV"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "auto_map": {
+ "AutoConfig": "configuration_minicpm.MiniCPMVConfig",
+ "AutoModel": "modeling_minicpmv.MiniCPMV",
+ "AutoModelForCausalLM": "modeling_minicpmv.MiniCPMV"
+ },
+ "batch_vision_input": true,
+ "bos_token_id": 1,
+ "drop_vision_last_layer": false,
+ "eos_token_id": [
+ 2,
+ 73440
+ ],
+ "head_dim": 128,
+ "hidden_act": "silu",
+ "hidden_size": 2560,
+ "image_size": 448,
+ "initializer_range": 0.1,
+ "intermediate_size": 10240,
+ "max_position_embeddings": 32768,
+ "mlp_bias": false,
+ "model_type": "minicpmv",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 2,
+ "pad_token_id": 2,
+ "patch_size": 14,
+ "pretraining_tp": 1,
+ "query_num": 64,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": {
+ "factor": 1.0,
+ "long_factor": [
+ 0.9977997200264581,
+ 1.014658295992452,
+ 1.0349680404997148,
+ 1.059429246056193,
+ 1.0888815016813513,
+ 1.1243301355211495,
+ 1.166977103606075,
+ 1.2182568066927284,
+ 1.2798772354275727,
+ 1.3538666751582975,
+ 1.4426259039919596,
+ 1.5489853358570191,
+ 1.6762658237220625,
+ 1.8283407612492941,
+ 2.0096956085876183,
+ 2.225478927469756,
+ 2.481536379650452,
+ 2.784415934557119,
+ 3.1413289096347365,
+ 3.560047844772632,
+ 4.048719380066383,
+ 4.615569542115128,
+ 5.2684819496549835,
+ 6.014438591970396,
+ 6.858830049237097,
+ 7.804668263503327,
+ 8.851768731513417,
+ 9.99600492938444,
+ 11.228766118181639,
+ 12.536757560834843,
+ 13.902257701387796,
+ 15.303885189125953,
+ 16.717837610115794,
+ 18.119465097853947,
+ 19.484965238406907,
+ 20.792956681060105,
+ 22.02571786985731,
+ 23.16995406772833,
+ 24.217054535738416,
+ 25.16289275000465,
+ 26.007284207271347,
+ 26.753240849586767,
+ 27.40615325712662,
+ 27.973003419175363,
+ 28.461674954469114,
+ 28.880393889607006,
+ 29.237306864684626,
+ 29.540186419591297,
+ 29.79624387177199,
+ 30.01202719065413,
+ 30.193382037992453,
+ 30.34545697551969,
+ 30.47273746338473,
+ 30.579096895249787,
+ 30.66785612408345,
+ 30.741845563814174,
+ 30.80346599254902,
+ 30.85474569563567,
+ 30.897392663720595,
+ 30.932841297560394,
+ 30.962293553185553,
+ 30.986754758742034,
+ 31.007064503249293,
+ 31.02392307921529
+ ],
+ "original_max_position_embeddings": 32786,
+ "rope_type": "longrope",
+ "short_factor": [
+ 0.9977997200264581,
+ 1.014658295992452,
+ 1.0349680404997148,
+ 1.059429246056193,
+ 1.0888815016813513,
+ 1.1243301355211495,
+ 1.166977103606075,
+ 1.2182568066927284,
+ 1.2798772354275727,
+ 1.3538666751582975,
+ 1.4426259039919596,
+ 1.5489853358570191,
+ 1.6762658237220625,
+ 1.8283407612492941,
+ 2.0096956085876183,
+ 2.225478927469756,
+ 2.481536379650452,
+ 2.784415934557119,
+ 3.1413289096347365,
+ 3.560047844772632,
+ 4.048719380066383,
+ 4.615569542115128,
+ 5.2684819496549835,
+ 6.014438591970396,
+ 6.858830049237097,
+ 7.804668263503327,
+ 8.851768731513417,
+ 9.99600492938444,
+ 11.228766118181639,
+ 12.536757560834843,
+ 13.902257701387796,
+ 15.303885189125953,
+ 16.717837610115794,
+ 18.119465097853947,
+ 19.484965238406907,
+ 20.792956681060105,
+ 22.02571786985731,
+ 23.16995406772833,
+ 24.217054535738416,
+ 25.16289275000465,
+ 26.007284207271347,
+ 26.753240849586767,
+ 27.40615325712662,
+ 27.973003419175363,
+ 28.461674954469114,
+ 28.880393889607006,
+ 29.237306864684626,
+ 29.540186419591297,
+ 29.79624387177199,
+ 30.01202719065413,
+ 30.193382037992453,
+ 30.34545697551969,
+ 30.47273746338473,
+ 30.579096895249787,
+ 30.66785612408345,
+ 30.741845563814174,
+ 30.80346599254902,
+ 30.85474569563567,
+ 30.897392663720595,
+ 30.932841297560394,
+ 30.962293553185553,
+ 30.986754758742034,
+ 31.007064503249293,
+ 31.02392307921529
+ ]
+ },
+ "rope_theta": 10000.0,
+ "slice_config": {
+ "max_slice_nums": 9,
+ "model_type": "minicpmv",
+ "patch_size": 14,
+ "scale_resolution": 448
+ },
+ "slice_mode": true,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.51.0",
+ "use_cache": true,
+ "use_image_id": true,
+ "version": 4.0,
+ "vision_batch_size": 16,
+ "vision_config": {
+ "_attn_implementation_autoset": true,
+ "attention_dropout": 0.0,
+ "hidden_act": "gelu_pytorch_tanh",
+ "hidden_size": 1152,
+ "image_size": 980,
+ "intermediate_size": 4304,
+ "layer_norm_eps": 1e-06,
+ "model_type": "siglip_vision_model",
+ "num_attention_heads": 16,
+ "num_channels": 3,
+ "num_hidden_layers": 27,
+ "patch_size": 14
+ },
+ "vocab_size": 73448
+}
diff --git a/minicpmv4_tokenizer/configuration_minicpm.py b/minicpmv4_tokenizer/configuration_minicpm.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4752b4efe85786f137340dc5912bbf9d8ffe26
--- /dev/null
+++ b/minicpmv4_tokenizer/configuration_minicpm.py
@@ -0,0 +1,118 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MiniCPM model configuration"""
+
+import os
+from typing import Union
+
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import logging
+from .modeling_navit_siglip import SiglipVisionConfig
+
+from transformers import LlamaConfig
+
+logger = logging.get_logger(__name__)
+
+
+class MiniCPMVSliceConfig(PretrainedConfig):
+ model_type = "minicpmv"
+
+ def __init__(
+ self,
+ patch_size=14,
+ max_slice_nums=9,
+ scale_resolution=448,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.patch_size = patch_size
+ self.max_slice_nums = max_slice_nums
+ self.scale_resolution = scale_resolution
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ if config_dict.get("model_type") == "minicpmv":
+ config_dict = config_dict["slice_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+class MiniCPMVConfig(LlamaConfig):
+ model_type = "minicpmv"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ default_vision_config = {
+ "hidden_size": 1152,
+ "image_size": 980,
+ "intermediate_size": 4304,
+ "model_type": "siglip",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 27,
+ "patch_size": 14,
+ }
+
+ def __init__(
+ self,
+ use_cache=True,
+ query_num=64,
+ image_size=448,
+ drop_vision_last_layer=True,
+ batch_vision_input=True,
+ slice_config=None,
+ vision_config=None,
+ use_image_id=True,
+ vision_batch_size=16,
+ **kwargs,
+ ):
+ self.use_cache = use_cache
+ self.query_num = query_num
+ self.image_size = image_size
+ self.drop_vision_last_layer = drop_vision_last_layer
+ self.batch_vision_input = batch_vision_input
+ self.use_image_id = use_image_id
+ self.vision_batch_size = vision_batch_size
+
+ if slice_config is None:
+ self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
+ else:
+ self.slice_config = MiniCPMVSliceConfig(**slice_config)
+ self.slice_mode = True
+
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes
+ if vision_config is None:
+ self.vision_config = SiglipVisionConfig(**self.default_vision_config)
+ logger.info("vision_config is None, using default vision config")
+ elif isinstance(vision_config, dict):
+ self.vision_config = SiglipVisionConfig(**vision_config)
+ elif isinstance(vision_config, SiglipVisionConfig):
+ self.vision_config = vision_config
+
+ self.patch_size = self.vision_config.patch_size
+
+ super().__init__(**kwargs)
diff --git a/minicpmv4_tokenizer/generation_config.json b/minicpmv4_tokenizer/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb6a92ca912734f18bce7582e63792f456fc3343
--- /dev/null
+++ b/minicpmv4_tokenizer/generation_config.json
@@ -0,0 +1,10 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": [
+ 2,
+ 73440
+ ],
+ "pad_token_id": 2,
+ "transformers_version": "4.51.0"
+}
diff --git a/minicpmv4_tokenizer/image_processing_minicpmv.py b/minicpmv4_tokenizer/image_processing_minicpmv.py
new file mode 100644
index 0000000000000000000000000000000000000000..67ff552fbd59b6142f0969dfe00e19edfb1127bf
--- /dev/null
+++ b/minicpmv4_tokenizer/image_processing_minicpmv.py
@@ -0,0 +1,418 @@
+from typing import Optional, Union, Dict, Any, List
+
+import torch
+import math
+import PIL.Image
+import PIL.ImageSequence
+import numpy as np
+import PIL
+from PIL import Image
+
+from transformers.utils import TensorType, requires_backends, is_torch_dtype, is_torch_device
+from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
+from transformers import AutoImageProcessor
+from transformers.image_transforms import to_channel_dimension_format
+from transformers.image_utils import (
+ ImageInput,
+ make_list_of_images,
+ valid_images,
+ is_torch_tensor,
+ is_batched,
+ to_numpy_array,
+ infer_channel_dimension_format,
+ ChannelDimension
+)
+
+
+def recursive_converter(converter, value):
+ if isinstance(value, list):
+ new_value = []
+ for v in value:
+ new_value += [recursive_converter(converter, v)]
+ return new_value
+ else:
+ return converter(value)
+
+
+class MiniCPMVBatchFeature(BatchFeature):
+ r"""
+ Extend from BatchFeature for supporting various image size
+ """
+ def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None):
+ super().__init__(data)
+ self.convert_to_tensors(tensor_type=tensor_type)
+
+ def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
+ if tensor_type is None:
+ return self
+
+ is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type)
+
+ def converter(value):
+ try:
+ if not is_tensor(value):
+ tensor = as_tensor(value)
+ return tensor
+ except: # noqa E722
+ if key == "overflowing_values":
+ raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
+ raise ValueError(
+ "Unable to create tensor, you should probably activate padding "
+ "with 'padding=True' to have batched tensors with the same length."
+ )
+
+
+ for key, value in self.items():
+ self[key] = recursive_converter(converter, value)
+ return self
+
+ def to(self, *args, **kwargs) -> "MiniCPMVBatchFeature":
+ requires_backends(self, ["torch"])
+ import torch
+
+ def cast_tensor(v):
+ # check if v is a floating point
+ if torch.is_floating_point(v):
+ # cast and send to device
+ return v.to(*args, **kwargs)
+ elif device is not None:
+ return v.to(device=device)
+ else:
+ return v
+
+ new_data = {}
+ device = kwargs.get("device")
+ # Check if the args are a device or a dtype
+ if device is None and len(args) > 0:
+ # device should be always the first argument
+ arg = args[0]
+ if is_torch_dtype(arg):
+ # The first argument is a dtype
+ pass
+ elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
+ device = arg
+ else:
+ # it's something else
+ raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
+ # We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
+ for k, v in self.items():
+ new_data[k] = recursive_converter(cast_tensor, v)
+ self.data = new_data
+ return self
+
+
+class MiniCPMVImageProcessor(BaseImageProcessor):
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ max_slice_nums=9,
+ scale_resolution=448,
+ patch_size=14,
+ **kwargs):
+ super().__init__(**kwargs)
+ self.max_slice_nums = max_slice_nums
+ self.scale_resolution = scale_resolution
+ self.patch_size = patch_size
+ self.use_image_id = kwargs.pop("use_image_id", False)
+ self.image_feature_size = kwargs.pop("image_feature_size", 64)
+ self.im_start_token = kwargs.pop("im_start", "")
+ self.im_end_token = kwargs.pop("im_end", "")
+ self.slice_start_token = kwargs.pop("slice_start", "")
+ self.slice_end_token = kwargs.pop("slice_end", "")
+ self.unk_token = kwargs.pop("unk", "")
+ self.im_id_start = kwargs.pop("im_id_start", "")
+ self.im_id_end = kwargs.pop("im_id_end", "")
+ self.slice_mode = kwargs.pop("slice_mode", True)
+ self.mean = np.array(kwargs.pop("norm_mean", [0.5, 0.5, 0.5]))
+ self.std = np.array(kwargs.pop("norm_std", [0.5, 0.5, 0.5]))
+ self.version = kwargs.pop("version", 2.0)
+
+ def ensure_divide(self, length, patch_size):
+ return max(round(length / patch_size) * patch_size, patch_size)
+
+ def find_best_resize(self,
+ original_size,
+ scale_resolution,
+ patch_size,
+ allow_upscale=False):
+ width, height = original_size
+ if (width * height >
+ scale_resolution * scale_resolution) or allow_upscale:
+ r = width / height
+ height = int(scale_resolution / math.sqrt(r))
+ width = int(height * r)
+ best_width = self.ensure_divide(width, patch_size)
+ best_height = self.ensure_divide(height, patch_size)
+ return (best_width, best_height)
+
+ def get_refine_size(self,
+ original_size,
+ grid,
+ scale_resolution,
+ patch_size,
+ allow_upscale=False):
+ width, height = original_size
+ grid_x, grid_y = grid
+
+ refine_width = self.ensure_divide(width, grid_x)
+ refine_height = self.ensure_divide(height, grid_y)
+
+ grid_width = refine_width / grid_x
+ grid_height = refine_height / grid_y
+
+ best_grid_size = self.find_best_resize((grid_width, grid_height),
+ scale_resolution,
+ patch_size,
+ allow_upscale=allow_upscale)
+ refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
+ return refine_size
+
+ def split_to_patches(self, image, grid):
+ patches = []
+ width, height = image.size
+ grid_x = int(width / grid[0])
+ grid_y = int(height / grid[1])
+ for i in range(0, height, grid_y):
+ images = []
+ for j in range(0, width, grid_x):
+ box = (j, i, j + grid_x, i + grid_y)
+ patch = image.crop(box)
+ images.append(patch)
+ patches.append(images)
+ return patches
+
+ def slice_image(
+ self, image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False
+ ):
+ original_size = image.size
+ source_image = None
+ best_grid = self.get_sliced_grid(original_size, max_slice_nums, never_split)
+ patches = []
+
+ if best_grid is None:
+ # dont need to slice, upsample
+ best_size = self.find_best_resize(
+ original_size, scale_resolution, patch_size, allow_upscale=True
+ )
+ source_image = image.resize(best_size, resample=Image.Resampling.BICUBIC)
+ else:
+ # source image, down-sampling and ensure divided by patch_size
+ best_resize = self.find_best_resize(original_size, scale_resolution, patch_size)
+ source_image = image.copy().resize(best_resize, resample=Image.Resampling.BICUBIC)
+ refine_size = self.get_refine_size(
+ original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
+ )
+ refine_image = image.resize(refine_size, resample=Image.Resampling.BICUBIC)
+ patches = self.split_to_patches(refine_image, best_grid)
+
+ return source_image, patches, best_grid
+
+ def get_grid_placeholder(self, grid):
+ if grid is None:
+ return ""
+ slice_image_placeholder = (
+ self.slice_start_token
+ + self.unk_token * self.image_feature_size
+ + self.slice_end_token
+ )
+
+ cols = grid[0]
+ rows = grid[1]
+ slices = []
+ for i in range(rows):
+ lines = []
+ for j in range(cols):
+ lines.append(slice_image_placeholder)
+ slices.append("".join(lines))
+
+ slice_placeholder = "\n".join(slices)
+ return slice_placeholder
+
+ def get_image_id_placeholder(self, idx=0):
+ return f"{self.im_id_start}{idx}{self.im_id_end}"
+
+ def get_sliced_images(self, image, max_slice_nums=None):
+ slice_images = []
+
+ if not self.slice_mode:
+ return [image]
+
+ max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
+ assert max_slice_nums > 0
+ source_image, patches, sliced_grid = self.slice_image(
+ image,
+ max_slice_nums, # default: 9
+ self.scale_resolution, # default: 448
+ self.patch_size # default: 14
+ )
+
+ slice_images.append(source_image)
+ if len(patches) > 0:
+ for i in range(len(patches)):
+ for j in range(len(patches[0])):
+ slice_images.append(patches[i][j])
+ return slice_images
+
+ def get_sliced_grid(self, image_size, max_slice_nums, nerver_split=False):
+ original_width, original_height = image_size
+ log_ratio = math.log(original_width / original_height)
+ ratio = original_width * original_height / (self.scale_resolution * self.scale_resolution)
+ multiple = min(math.ceil(ratio), max_slice_nums)
+ if multiple <= 1 or nerver_split:
+ return None
+ candidate_split_grids_nums = []
+ for i in [multiple - 1, multiple, multiple + 1]:
+ if i == 1 or i > max_slice_nums:
+ continue
+ candidate_split_grids_nums.append(i)
+
+ candidate_grids = []
+ for split_grids_nums in candidate_split_grids_nums:
+ m = 1
+ while m <= split_grids_nums:
+ if split_grids_nums % m == 0:
+ candidate_grids.append([m, split_grids_nums // m])
+ m += 1
+
+ best_grid = [1, 1]
+ min_error = float("inf")
+ for grid in candidate_grids:
+ error = abs(log_ratio - math.log(grid[0] / grid[1]))
+ if error < min_error:
+ best_grid = grid
+ min_error = error
+
+ return best_grid
+
+ def get_slice_image_placeholder(self, image_size, image_idx=0, max_slice_nums=None, use_image_id=None):
+ max_slice_nums = self.max_slice_nums if max_slice_nums is None else int(max_slice_nums)
+ assert max_slice_nums > 0
+ grid = self.get_sliced_grid(image_size=image_size, max_slice_nums=max_slice_nums)
+
+ image_placeholder = (
+ self.im_start_token
+ + self.unk_token * self.image_feature_size
+ + self.im_end_token
+ )
+ use_image_id = self.use_image_id if use_image_id is None else bool(use_image_id)
+ if use_image_id:
+ final_placeholder = self.get_image_id_placeholder(image_idx) + image_placeholder
+ else:
+ final_placeholder = image_placeholder
+
+ if self.slice_mode:
+ final_placeholder = final_placeholder + self.get_grid_placeholder(grid=grid)
+ return final_placeholder
+
+ def to_pil_image(self, image, rescale=None) -> PIL.Image.Image:
+ """
+ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
+ needed.
+
+ Args:
+ image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
+ The image to convert to the PIL Image format.
+ rescale (`bool`, *optional*):
+ Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
+ default to `True` if the image type is a floating type, `False` otherwise.
+ """
+ if isinstance(image, PIL.Image.Image):
+ return image
+ if is_torch_tensor(image):
+ image = image.numpy()
+
+ if isinstance(image, np.ndarray):
+ if rescale is None:
+ # rescale default to the array being of floating type.
+ rescale = isinstance(image.flat[0], np.floating)
+ # If the channel as been moved to first dim, we put it back at the end.
+ if image.ndim == 3 and image.shape[0] in [1, 3]:
+ image = image.transpose(1, 2, 0)
+ if rescale:
+ image = image * 255
+ image = image.astype(np.uint8)
+ return PIL.Image.fromarray(image)
+ return image
+
+ def reshape_by_patch(self, image):
+ """
+ :param image: shape [3, H, W]
+ :param patch_size:
+ :return: [3, patch_size, HW/patch_size]
+ """
+ image = torch.from_numpy(image)
+ patch_size = self.patch_size
+ patches = torch.nn.functional.unfold(
+ image,
+ (patch_size, patch_size),
+ stride=(patch_size, patch_size)
+ )
+
+ patches = patches.reshape(image.size(0), patch_size, patch_size, -1)
+ patches = patches.permute(0, 1, 3, 2).reshape(image.size(0), patch_size, -1)
+ return patches.numpy()
+
+ def preprocess(
+ self,
+ images: Union[Image.Image, List[Image.Image], List[List[Image.Image]]],
+ do_pad: Optional[bool] = True, # TODO: add pad for MiniCPM-Llama3-V-2_5
+ max_slice_nums: int = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs
+ ) -> MiniCPMVBatchFeature:
+ if isinstance(images, Image.Image):
+ images_list = [[images]]
+ elif isinstance(images[0], Image.Image):
+ images_list = [images]
+ else:
+ images_list = images
+
+ new_images_list = []
+ image_sizes_list = []
+ tgt_sizes_list = []
+
+ for _images in images_list:
+ if _images is None or len(_images) == 0:
+ new_images_list.append([])
+ image_sizes_list.append([])
+ tgt_sizes_list.append([])
+ continue
+ if not valid_images(_images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ _images = [self.to_pil_image(image).convert("RGB") for image in _images]
+ input_data_format = infer_channel_dimension_format(np.array(_images[0]))
+
+ new_images = []
+ image_sizes = [image.size for image in _images]
+ tgt_sizes = []
+ for image in _images:
+ image_patches = self.get_sliced_images(image, max_slice_nums)
+ image_patches = [to_numpy_array(image).astype(np.float32) / 255 for image in image_patches]
+ image_patches = [
+ self.normalize(image=image, mean=self.mean, std=self.std, input_data_format=input_data_format)
+ for image in image_patches
+ ]
+ image_patches = [
+ to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format)
+ for image in image_patches
+ ]
+ for slice_image in image_patches:
+ new_images.append(self.reshape_by_patch(slice_image))
+ tgt_sizes.append(np.array((slice_image.shape[1] // self.patch_size, slice_image.shape[2] // self.patch_size)))
+
+ if tgt_sizes:
+ tgt_sizes = np.vstack(tgt_sizes)
+
+ new_images_list.append(new_images)
+ image_sizes_list.append(image_sizes)
+ tgt_sizes_list.append(tgt_sizes)
+ return MiniCPMVBatchFeature(
+ data={"pixel_values": new_images_list, "image_sizes": image_sizes_list, "tgt_sizes": tgt_sizes_list}, tensor_type=return_tensors
+ )
+
+AutoImageProcessor.register("MiniCPMVImageProcessor", MiniCPMVImageProcessor)
diff --git a/minicpmv4_tokenizer/model.safetensors.index.json b/minicpmv4_tokenizer/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..c57dec39f67009792c1cdfd39452184d809b0e6c
--- /dev/null
+++ b/minicpmv4_tokenizer/model.safetensors.index.json
@@ -0,0 +1,748 @@
+{
+ "metadata": {
+ "total_size": 8119066080
+ },
+ "weight_map": {
+ "llm.lm_head.weight": "model-00002-of-00002.safetensors",
+ "llm.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "llm.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "llm.model.norm.weight": "model-00002-of-00002.safetensors",
+ "resampler.attn.in_proj_bias": "model-00002-of-00002.safetensors",
+ "resampler.attn.in_proj_weight": "model-00002-of-00002.safetensors",
+ "resampler.attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "resampler.attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "resampler.kv_proj.weight": "model-00002-of-00002.safetensors",
+ "resampler.ln_kv.bias": "model-00002-of-00002.safetensors",
+ "resampler.ln_kv.weight": "model-00002-of-00002.safetensors",
+ "resampler.ln_post.bias": "model-00002-of-00002.safetensors",
+ "resampler.ln_post.weight": "model-00002-of-00002.safetensors",
+ "resampler.ln_q.bias": "model-00002-of-00002.safetensors",
+ "resampler.ln_q.weight": "model-00002-of-00002.safetensors",
+ "resampler.proj": "model-00002-of-00002.safetensors",
+ "resampler.query": "model-00002-of-00002.safetensors",
+ "vpm.embeddings.patch_embedding.bias": "model-00002-of-00002.safetensors",
+ "vpm.embeddings.patch_embedding.weight": "model-00002-of-00002.safetensors",
+ "vpm.embeddings.position_embedding.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.1.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.10.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.11.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.12.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.13.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.14.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.15.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.16.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.2.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.3.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.4.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.5.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.6.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.7.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.8.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.layer_norm1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.layer_norm1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.layer_norm2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.layer_norm2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.mlp.fc1.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.mlp.fc1.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.mlp.fc2.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.mlp.fc2.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "vpm.encoder.layers.9.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "vpm.post_layernorm.bias": "model-00002-of-00002.safetensors",
+ "vpm.post_layernorm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/minicpmv4_tokenizer/modeling_minicpmv.py b/minicpmv4_tokenizer/modeling_minicpmv.py
new file mode 100644
index 0000000000000000000000000000000000000000..945f913cc89e91ed78179251908c0f492dfbb1ae
--- /dev/null
+++ b/minicpmv4_tokenizer/modeling_minicpmv.py
@@ -0,0 +1,447 @@
+import math
+from typing import List, Optional
+import json
+import torch
+import torchvision
+
+from threading import Thread
+from copy import deepcopy
+from PIL import Image
+from transformers import AutoProcessor, TextIteratorStreamer
+
+from .configuration_minicpm import MiniCPMVConfig
+from transformers import LlamaForCausalLM, LlamaPreTrainedModel
+from .modeling_navit_siglip import SiglipVisionTransformer
+from .resampler import Resampler
+
+
+
+class MiniCPMVPreTrainedModel(LlamaPreTrainedModel):
+ config_class = MiniCPMVConfig
+
+
+class MiniCPMV(MiniCPMVPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.llm = LlamaForCausalLM(config)
+ self.vpm = self.init_vision_module()
+ self.vision_dim = self.vpm.embed_dim
+ self.embed_dim = self.llm.config.hidden_size
+ self.resampler = self.init_resampler(self.embed_dim, self.vision_dim)
+ self.processor = None
+
+ self.terminators = ['<|im_end|>', '']
+
+ def init_vision_module(self):
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit add tgt_sizes
+ if self.config._attn_implementation == 'flash_attention_2':
+ self.config.vision_config._attn_implementation = 'flash_attention_2'
+ else:
+ # not suport sdpa
+ self.config.vision_config._attn_implementation = 'eager'
+ model = SiglipVisionTransformer(self.config.vision_config)
+ if self.config.drop_vision_last_layer:
+ model.encoder.layers = model.encoder.layers[:-1]
+
+ setattr(model, 'embed_dim', model.embeddings.embed_dim)
+ setattr(model, 'patch_size', model.embeddings.patch_size)
+
+ return model
+
+ def init_resampler(self, embed_dim, vision_dim):
+ return Resampler(
+ num_queries=self.config.query_num,
+ embed_dim=embed_dim,
+ num_heads=embed_dim // 128,
+ kv_dim=vision_dim,
+ adaptive=True
+ )
+
+ def get_input_embeddings(self):
+ return self.llm.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.llm.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.llm.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.llm.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.llm = decoder
+
+ def get_decoder(self):
+ return self.llm
+
+ def get_vllm_embedding(self, data):
+ if 'vision_hidden_states' not in data:
+ dtype = self.llm.model.embed_tokens.weight.dtype
+ device = self.llm.model.embed_tokens.weight.device
+ tgt_sizes = data['tgt_sizes']
+ pixel_values_list = data['pixel_values']
+ vision_hidden_states = []
+ all_pixel_values = []
+ img_cnt = []
+ for pixel_values in pixel_values_list:
+ img_cnt.append(len(pixel_values))
+ all_pixel_values.extend([i.flatten(end_dim=1).permute(1, 0) for i in pixel_values])
+
+ # exist image
+ if all_pixel_values:
+ tgt_sizes = [tgt_size for tgt_size in tgt_sizes if isinstance(tgt_size, torch.Tensor)]
+ tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
+
+ max_patches = torch.max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
+
+ all_pixel_values = torch.nn.utils.rnn.pad_sequence(all_pixel_values, batch_first=True,
+ padding_value=0.0)
+ B, L, _ = all_pixel_values.shape
+ all_pixel_values = all_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
+
+ patch_attn_mask = torch.zeros((B, 1, max_patches), dtype=torch.bool, device=device)
+ for i in range(B):
+ patch_attn_mask[i, 0, :tgt_sizes[i][0] * tgt_sizes[i][1]] = True
+
+ vision_batch_size = self.config.vision_batch_size
+ all_pixel_values = all_pixel_values.type(dtype).to(device=device)
+ if B > vision_batch_size:
+ hs = []
+ for i in range(0, B, vision_batch_size):
+ start_idx = i
+ end_idx = i + vision_batch_size
+ tmp_hs = self.vpm(all_pixel_values[start_idx:end_idx], patch_attention_mask=patch_attn_mask[start_idx:end_idx], tgt_sizes=tgt_sizes[start_idx:end_idx]).last_hidden_state
+ hs.append(tmp_hs)
+ vision_embedding = torch.cat(hs, dim=0)
+ else:
+ vision_embedding = self.vpm(all_pixel_values, patch_attention_mask=patch_attn_mask, tgt_sizes=tgt_sizes).last_hidden_state
+ vision_embedding = self.resampler(vision_embedding, tgt_sizes)
+
+ start = 0
+ for pixel_values in pixel_values_list:
+ img_cnt = len(pixel_values)
+ if img_cnt > 0:
+ vision_hidden_states.append(vision_embedding[start: start + img_cnt])
+ start += img_cnt
+ else:
+ vision_hidden_states.append([])
+ else: # no image
+ if self.training:
+ dummy_image = torch.zeros(
+ (1, 3, 224, 224),
+ device=device, dtype=dtype
+ )
+ tgt_sizes = torch.Tensor([[(224 // self.config.patch_size), math.ceil(224 / self.config.patch_size)]]).type(torch.int32)
+ dummy_feature = self.resampler(self.vpm(dummy_image).last_hidden_state, tgt_sizes)
+ else:
+ dummy_feature = []
+ for _ in range(len(pixel_values_list)):
+ vision_hidden_states.append(dummy_feature)
+
+ else:
+ vision_hidden_states = data['vision_hidden_states']
+
+ if hasattr(self.llm.config, 'scale_emb'):
+ vllm_embedding = self.llm.model.embed_tokens(data['input_ids']) * self.llm.config.scale_emb
+ else:
+ vllm_embedding = self.llm.model.embed_tokens(data['input_ids'])
+
+ vision_hidden_states = [i.type(vllm_embedding.dtype) if isinstance(
+ i, torch.Tensor) else i for i in vision_hidden_states]
+
+ bs = len(data['input_ids'])
+ device = vllm_embedding.device
+ embed_dim = vllm_embedding.shape[-1]
+
+ new_vllm_embeddings = []
+
+ for i in range(bs):
+ cur_vs_hs = vision_hidden_states[i]
+ cur_vllm_emb = vllm_embedding[i]
+
+ if len(cur_vs_hs) == 0:
+ new_vllm_embeddings.append(cur_vllm_emb)
+ continue
+
+ cur_image_bound = data['image_bound'][i]
+
+ if len(cur_image_bound) > 0:
+ image_indices = torch.stack([
+ torch.arange(r[0], r[1], dtype=torch.long)
+ for r in cur_image_bound
+ ], dim=0).flatten().to(device)
+
+ indices_expanded = image_indices.view(-1, 1).expand(-1, embed_dim)
+ vision_features = cur_vs_hs.view(-1, embed_dim)
+
+ updated_emb = cur_vllm_emb.scatter(0, indices_expanded, vision_features)
+ new_vllm_embeddings.append(updated_emb)
+ elif self.training:
+ dummy_term = cur_vs_hs[0].sum() * 0
+ new_vllm_embeddings.append(cur_vllm_emb + dummy_term)
+ else:
+ new_vllm_embeddings.append(cur_vllm_emb)
+
+ vllm_embedding = torch.stack(new_vllm_embeddings, dim=0)
+
+ return vllm_embedding, vision_hidden_states
+
+ def forward(self, data=None, **kwargs):
+ if isinstance(data, torch.Tensor):
+ attention_mask = torch.ones_like(data, dtype=torch.bool)
+ kwargs = {'attention_mask': attention_mask}
+ return self.llm(
+ input_ids=data,
+ **kwargs
+ )
+
+ if data is None:
+ data = {
+ "input_ids": kwargs.pop("input_ids", None),
+ "pixel_values": kwargs.pop("pixel_values", None),
+ "image_bound": kwargs.pop("image_bound", None),
+ "tgt_sizes": kwargs.pop("tgt_sizes", None),
+ "position_ids": kwargs.pop("position_ids", None),
+ }
+ else:
+ kwargs.pop("input_ids", None)
+ kwargs.pop("pixel_values", None)
+ kwargs.pop("image_bound", None)
+ kwargs.pop("tgt_sizes", None)
+ kwargs.pop("position_ids", None)
+ kwargs.pop("inputs_embeds", None)
+
+ vllm_embedding, vision_hidden_states = self.get_vllm_embedding(data)
+ position_ids = data["position_ids"]
+ if position_ids.dtype != torch.int64:
+ position_ids = position_ids.long()
+
+ return self.llm(
+ input_ids=None,
+ position_ids=position_ids,
+ inputs_embeds=vllm_embedding,
+ **kwargs
+ )
+
+ def _decode(self, inputs_embeds, tokenizer, attention_mask, decode_text=False, **kwargs):
+ terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
+ output = self.llm.generate(
+ inputs_embeds=inputs_embeds,
+ pad_token_id=0,
+ eos_token_id=terminators,
+ attention_mask=attention_mask,
+ **kwargs
+ )
+ if decode_text:
+ return self._decode_text(output, tokenizer)
+ return output
+
+ def _decode_stream(self, inputs_embeds, tokenizer, **kwargs):
+ terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
+ streamer = TextIteratorStreamer(tokenizer=tokenizer)
+ generation_kwargs = {
+ 'inputs_embeds': inputs_embeds,
+ 'pad_token_id': 0,
+ 'eos_token_id': terminators,
+ 'streamer': streamer
+ }
+ generation_kwargs.update(kwargs)
+
+ thread = Thread(target=self.llm.generate, kwargs=generation_kwargs)
+ thread.start()
+
+ return streamer
+
+ def _decode_text(self, result_ids, tokenizer):
+ terminators = [tokenizer.convert_tokens_to_ids(i) for i in self.terminators]
+ result_text = []
+ for result in result_ids:
+ result = result[result != 0]
+ if result[0] == tokenizer.bos_id:
+ result = result[1:]
+ if result[-1] in terminators:
+ result = result[:-1]
+ result_text.append(tokenizer.decode(result).strip())
+ return result_text
+
+ def generate(
+ self,
+ input_ids=None,
+ pixel_values=None,
+ tgt_sizes=None,
+ image_bound=None,
+ attention_mask=None,
+ tokenizer=None,
+ vision_hidden_states=None,
+ return_vision_hidden_states=False,
+ stream=False,
+ decode_text=False,
+ **kwargs
+ ):
+ assert input_ids is not None
+ assert len(input_ids) == len(pixel_values)
+
+ model_inputs = {
+ "input_ids": input_ids,
+ "image_bound": image_bound,
+ }
+
+ if vision_hidden_states is None:
+ model_inputs["pixel_values"] = pixel_values
+ model_inputs['tgt_sizes'] = tgt_sizes
+ else:
+ model_inputs["vision_hidden_states"] = vision_hidden_states
+
+ with torch.inference_mode():
+ (
+ model_inputs["inputs_embeds"],
+ vision_hidden_states,
+ ) = self.get_vllm_embedding(model_inputs)
+
+ if stream:
+ result = self._decode_stream(model_inputs["inputs_embeds"], tokenizer, **kwargs)
+ else:
+ result = self._decode(model_inputs["inputs_embeds"], tokenizer, attention_mask, decode_text=decode_text, **kwargs)
+
+ if return_vision_hidden_states:
+ return result, vision_hidden_states
+
+ return result
+
+ def chat(
+ self,
+ image=None,
+ msgs=None,
+ tokenizer=None,
+ processor=None,
+ vision_hidden_states=None,
+ max_new_tokens=2048,
+ min_new_tokens=0,
+ sampling=True,
+ max_inp_length=32768,
+ system_prompt='',
+ stream=False,
+ max_slice_nums=None,
+ use_image_id=None,
+ **kwargs
+ ):
+ if isinstance(msgs[0], list):
+ batched = True
+ else:
+ batched = False
+ msgs_list = msgs
+ images_list = image
+
+ if batched is False:
+ images_list, msgs_list = [images_list], [msgs_list]
+ else:
+ assert images_list is None, "Please integrate image to msgs when using batch inference."
+ images_list = [None] * len(msgs_list)
+ assert len(images_list) == len(msgs_list), "The batch dim of images_list and msgs_list should be the same."
+
+ if processor is None:
+ if self.processor is None:
+ self.processor = AutoProcessor.from_pretrained(self.config._name_or_path, trust_remote_code=True)
+ processor = self.processor
+
+ assert self.config.query_num == processor.image_processor.image_feature_size, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
+ assert self.config.patch_size == processor.image_processor.patch_size, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
+ assert self.config.use_image_id == processor.image_processor.use_image_id, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
+ assert self.config.slice_config.max_slice_nums == processor.image_processor.max_slice_nums, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
+ assert self.config.slice_mode == processor.image_processor.slice_mode, "These two values should be the same. Check `config.json` and `preprocessor_config.json`."
+
+ prompts_lists = []
+ input_images_lists = []
+ for image, msgs in zip(images_list, msgs_list):
+ if isinstance(msgs, str):
+ msgs = json.loads(msgs)
+ copy_msgs = deepcopy(msgs)
+
+ assert len(msgs) > 0, "msgs is empty"
+ assert sampling or not stream, "if use stream mode, make sure sampling=True"
+
+ if image is not None and isinstance(copy_msgs[0]["content"], str):
+ copy_msgs[0]["content"] = [image, copy_msgs[0]["content"]]
+
+ images = []
+ for i, msg in enumerate(copy_msgs):
+ role = msg["role"]
+ content = msg["content"]
+ assert role in ["user", "assistant"]
+ if i == 0:
+ assert role == "user", "The role of first msg should be user"
+ if isinstance(content, str):
+ content = [content]
+ cur_msgs = []
+ for c in content:
+ if isinstance(c, Image.Image):
+ images.append(c)
+ cur_msgs.append("(./)")
+ elif isinstance(c, str):
+ cur_msgs.append(c)
+ msg["content"] = "\n".join(cur_msgs)
+
+ if system_prompt:
+ sys_msg = {'role': 'system', 'content': system_prompt}
+ copy_msgs = [sys_msg] + copy_msgs
+
+ prompts_lists.append(processor.tokenizer.apply_chat_template(copy_msgs, tokenize=False, add_generation_prompt=True))
+ input_images_lists.append(images)
+
+ inputs = processor(
+ prompts_lists,
+ input_images_lists,
+ max_slice_nums=max_slice_nums,
+ use_image_id=use_image_id,
+ return_tensors="pt",
+ max_length=max_inp_length
+ ).to(self.device)
+
+ if sampling:
+ generation_config = {
+ "top_p": 0.8,
+ "top_k": 100,
+ "temperature": 0.7,
+ "do_sample": True,
+ "repetition_penalty": 1.05
+ }
+ else:
+ generation_config = {
+ "num_beams": 3,
+ "repetition_penalty": 1.2,
+ }
+
+ if min_new_tokens > 0:
+ generation_config['min_new_tokens'] = min_new_tokens
+
+ generation_config.update(
+ (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
+ )
+
+ inputs.pop("image_sizes")
+ with torch.inference_mode():
+ res = self.generate(
+ **inputs,
+ tokenizer=tokenizer,
+ max_new_tokens=max_new_tokens,
+ vision_hidden_states=vision_hidden_states,
+ stream=stream,
+ decode_text=True,
+ **generation_config
+ )
+
+ if stream:
+ def stream_gen():
+ for text in res:
+ for term in self.terminators:
+ text = text.replace(term, '')
+ yield text
+ return stream_gen()
+
+ else:
+ if batched:
+ answer = res
+ else:
+ answer = res[0]
+ return answer
diff --git a/minicpmv4_tokenizer/modeling_navit_siglip.py b/minicpmv4_tokenizer/modeling_navit_siglip.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fe732cf0d5884613003c00bccea0c5ae64a1905
--- /dev/null
+++ b/minicpmv4_tokenizer/modeling_navit_siglip.py
@@ -0,0 +1,937 @@
+# coding=utf-8
+# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Siglip model. """
+# Copied from HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit and add tgt_sizes
+
+
+import os
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn.init import _calculate_fan_in_and_fan_out
+
+from transformers.activations import ACT2FN
+from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
+from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from transformers.modeling_utils import PreTrainedModel
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ logging,
+ replace_return_docstrings,
+)
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+class SiglipVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a
+ Siglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_channels (`int`, *optional*, defaults to 3):
+ Number of channels in the input images.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ Example:
+ ```python
+ >>> from transformers import SiglipVisionConfig, SiglipVisionModel
+ >>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration
+ >>> configuration = SiglipVisionConfig()
+ >>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration
+ >>> model = SiglipVisionModel(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "siglip_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ intermediate_size=3072,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ num_channels=3,
+ image_size=224,
+ patch_size=16,
+ hidden_act="gelu_pytorch_tanh",
+ layer_norm_eps=1e-6,
+ attention_dropout=0.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from SiglipConfig
+ if config_dict.get("model_type") == "siglip":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+_CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224"
+
+SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "google/siglip-base-patch16-224",
+ # See all SigLIP models at https://huggingface.co/models?filter=siglip
+]
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def _trunc_normal_(tensor, mean, std, a, b):
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
+ def norm_cdf(x):
+ # Computes standard normal cumulative distribution function
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
+
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
+ warnings.warn(
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
+ "The distribution of values may be incorrect.",
+ stacklevel=2,
+ )
+
+ # Values are generated by using a truncated uniform distribution and
+ # then using the inverse CDF for the normal distribution.
+ # Get upper and lower cdf values
+ l = norm_cdf((a - mean) / std)
+ u = norm_cdf((b - mean) / std)
+
+ # Uniformly fill tensor with values from [l, u], then translate to
+ # [2l-1, 2u-1].
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
+
+ # Use inverse cdf transform for normal distribution to get truncated
+ # standard normal
+ if tensor.dtype in [torch.float16, torch.bfloat16]:
+ # The `erfinv_` op is not (yet?) defined in float16+cpu, bfloat16+gpu
+ og_dtype = tensor.dtype
+ tensor = tensor.to(torch.float32)
+ tensor.erfinv_()
+ tensor = tensor.to(og_dtype)
+ else:
+ tensor.erfinv_()
+
+ # Transform to proper mean, std
+ tensor.mul_(std * math.sqrt(2.0))
+ tensor.add_(mean)
+
+ # Clamp to ensure it's in the proper range
+ if tensor.dtype == torch.float16:
+ # The `clamp_` op is not (yet?) defined in float16+cpu
+ tensor = tensor.to(torch.float32)
+ tensor.clamp_(min=a, max=b)
+ tensor = tensor.to(torch.float16)
+ else:
+ tensor.clamp_(min=a, max=b)
+
+
+def trunc_normal_tf_(
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
+) -> torch.Tensor:
+ """Fills the input Tensor with values drawn from a truncated
+ normal distribution. The values are effectively drawn from the
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
+ with values outside :math:`[a, b]` redrawn until they are within
+ the bounds. The method used for generating the random values works
+ best when :math:`a \\leq \text{mean} \\leq b`.
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
+ and the result is subsquently scaled and shifted by the mean and std args.
+ Args:
+ tensor: an n-dimensional `torch.Tensor`
+ mean: the mean of the normal distribution
+ std: the standard deviation of the normal distribution
+ a: the minimum cutoff value
+ b: the maximum cutoff value
+ """
+ with torch.no_grad():
+ _trunc_normal_(tensor, 0, 1.0, a, b)
+ tensor.mul_(std).add_(mean)
+
+
+def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
+ if mode == "fan_in":
+ denom = fan_in
+ elif mode == "fan_out":
+ denom = fan_out
+ elif mode == "fan_avg":
+ denom = (fan_in + fan_out) / 2
+
+ variance = scale / denom
+
+ if distribution == "truncated_normal":
+ # constant is stddev of standard normal truncated to (-2, 2)
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
+ elif distribution == "normal":
+ with torch.no_grad():
+ tensor.normal_(std=math.sqrt(variance))
+ elif distribution == "uniform":
+ bound = math.sqrt(3 * variance)
+ with torch.no_grad():
+ tensor.uniform_(-bound, bound)
+ else:
+ raise ValueError(f"invalid distribution {distribution}")
+
+
+def lecun_normal_(tensor):
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
+
+
+def default_flax_embed_init(tensor):
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
+class SiglipVisionModelOutput(ModelOutput):
+ """
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
+ Args:
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class SiglipVisionEmbeddings(nn.Module):
+ def __init__(self, config: SiglipVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ padding="valid",
+ )
+
+ self.num_patches_per_side = self.image_size // self.patch_size
+ self.num_patches = self.num_patches_per_side**2
+ self.num_positions = self.num_patches
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor, tgt_sizes: Optional[torch.IntTensor]=None) -> torch.Tensor:
+ batch_size = pixel_values.size(0)
+
+ patch_embeds = self.patch_embedding(pixel_values)
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
+
+ max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3)
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
+ position_ids = torch.full(
+ size=(
+ batch_size,
+ max_nb_patches_h * max_nb_patches_w,
+ ),
+ fill_value=0,
+ )
+
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
+ if tgt_sizes is not None:
+ nb_patches_h = tgt_sizes[batch_idx][0]
+ nb_patches_w = tgt_sizes[batch_idx][1]
+ else:
+ nb_patches_h = p_attn_mask[:, 0].sum()
+ nb_patches_w = p_attn_mask[0].sum()
+
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
+
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
+
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
+
+ position_ids = position_ids.to(self.position_embedding.weight.device)
+
+ embeddings = embeddings + self.position_embedding(position_ids)
+ return embeddings
+
+
+class SiglipAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ batch_size, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+
+ k_v_seq_len = key_states.shape[-2]
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
+
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights
+
+
+class SiglipFlashAttention2(SiglipAttention):
+ """
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.is_causal = False # Hack to make sure we don't use a causal mask
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ # cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ # query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ # if past_key_value is not None:
+ # cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ # key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ "The input hidden states seems to be silently casted in float32, this might be related to the fact"
+ " you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
+class SiglipMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->Siglip
+class SiglipEncoderLayer(nn.Module):
+ def __init__(self, config: SiglipVisionConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self.self_attn = (
+ SiglipAttention(config)
+ if not self._use_flash_attention_2
+ else SiglipFlashAttention2(config)
+ )
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = SiglipMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
+ attention_mask (`torch.FloatTensor`):
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class SiglipPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SiglipVisionConfig
+ base_model_prefix = "siglip"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+
+ if isinstance(module, SiglipVisionEmbeddings):
+ width = self.config.hidden_size
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
+ elif isinstance(module, nn.Embedding):
+ default_flax_embed_init(module.weight)
+ elif isinstance(module, SiglipAttention):
+ nn.init.normal_(module.q_proj.weight)
+ nn.init.normal_(module.k_proj.weight)
+ nn.init.normal_(module.v_proj.weight)
+ nn.init.normal_(module.out_proj.weight)
+ nn.init.zeros_(module.q_proj.bias)
+ nn.init.zeros_(module.k_proj.bias)
+ nn.init.zeros_(module.v_proj.bias)
+ nn.init.zeros_(module.out_proj.bias)
+ elif isinstance(module, SiglipMLP):
+ nn.init.normal_(module.fc1.weight)
+ nn.init.normal_(module.fc2.weight)
+ nn.init.normal_(module.fc1.bias, std=1e-6)
+ nn.init.normal_(module.fc2.bias, std=1e-6)
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
+ lecun_normal_(module.weight)
+ if module.bias is not None:
+ nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+SIGLIP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+ Parameters:
+ config ([`SiglipVisionConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SIGLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->Siglip
+class SiglipEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`SiglipEncoderLayer`].
+ Args:
+ config: SiglipConfig
+ """
+
+ def __init__(self, config: SiglipVisionConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ # Ignore copy
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for encoder_layer in self.layers:
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+@add_start_docstrings(
+ """The vision model from SigLIP without any head or projection on top.""",
+ SIGLIP_START_DOCSTRING
+)
+class SiglipVisionTransformer(SiglipPreTrainedModel):
+ config_class = SiglipVisionConfig
+ main_input_name = "pixel_values"
+ _supports_flash_attn_2 = True
+
+ def __init__(self, config: SiglipVisionConfig):
+ super().__init__(config)
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = SiglipVisionEmbeddings(config)
+ self.encoder = SiglipEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.embeddings.patch_embedding
+
+ @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
+ def forward(
+ self,
+ pixel_values,
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
+ tgt_sizes: Optional[torch.IntTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ batch_size = pixel_values.size(0)
+ if patch_attention_mask is None:
+ patch_attention_mask = torch.ones(
+ size=(
+ batch_size,
+ pixel_values.size(2) // self.config.patch_size,
+ pixel_values.size(3) // self.config.patch_size,
+ ),
+ dtype=torch.bool,
+ device=pixel_values.device,
+ )
+
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, tgt_sizes=tgt_sizes)
+
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
+ if not torch.any(~patch_attention_mask):
+ attention_mask=None
+ else:
+ attention_mask = (
+ _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
+ if not self._use_flash_attention_2
+ else patch_attention_mask
+ )
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ if not return_dict:
+ return (last_hidden_state, None) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=None,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
\ No newline at end of file
diff --git a/minicpmv4_tokenizer/preprocessor_config.json b/minicpmv4_tokenizer/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..951fd9164475e0c264b68745132c3bc53919a976
--- /dev/null
+++ b/minicpmv4_tokenizer/preprocessor_config.json
@@ -0,0 +1,24 @@
+{
+ "image_processor_type": "MiniCPMVImageProcessor",
+ "auto_map": {
+ "AutoProcessor": "processing_minicpmv.MiniCPMVProcessor",
+ "AutoImageProcessor": "image_processing_minicpmv.MiniCPMVImageProcessor"
+ },
+ "processor_class": "MiniCPMVProcessor",
+ "max_slice_nums": 9,
+ "scale_resolution": 448,
+ "patch_size": 14,
+ "use_image_id": true,
+ "image_feature_size": 64,
+ "im_start": "",
+ "im_end": "",
+ "slice_start": "",
+ "slice_end": "",
+ "unk": "",
+ "im_id_start": "",
+ "im_id_end": "",
+ "slice_mode": true,
+ "norm_mean": [0.5, 0.5, 0.5],
+ "norm_std": [0.5, 0.5, 0.5],
+ "version": 3.0
+}
\ No newline at end of file
diff --git a/minicpmv4_tokenizer/processing_minicpmv.py b/minicpmv4_tokenizer/processing_minicpmv.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d89205da5513f0a49bc3d8e2b38d7fc9ebddf68
--- /dev/null
+++ b/minicpmv4_tokenizer/processing_minicpmv.py
@@ -0,0 +1,238 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for MiniCPMV.
+"""
+
+from typing import List, Optional, Union, Dict, Any
+import torch
+import re
+
+from transformers.image_processing_utils import BatchFeature
+from transformers.image_utils import ImageInput
+from transformers.processing_utils import ProcessorMixin
+from transformers.tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from transformers.utils import TensorType, requires_backends, is_torch_dtype, is_torch_device
+
+from .image_processing_minicpmv import MiniCPMVBatchFeature
+
+
+class MiniCPMVProcessor(ProcessorMixin):
+ r"""
+ Constructs a MiniCPMV processor which wraps a MiniCPMV image processor and a MiniCPMV tokenizer into a single processor.
+
+ [`MiniCPMVProcessor`] offers all the functionalities of [`MiniCPMVImageProcessor`] and [`LlamaTokenizerWrapper`]. See the
+ [`~MiniCPMVProcessor.__call__`] and [`~MiniCPMVProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`MiniCPMVImageProcessor`], *optional*):
+ The image processor is a required input.
+ tokenizer ([`LlamaTokenizerWrapper`], *optional*):
+ The tokenizer is a required input.
+ """
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "AutoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ super().__init__(image_processor, tokenizer)
+ self.version = image_processor.version
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ images: ImageInput = None,
+ max_length: Optional[int] = None,
+ do_pad: Optional[bool] = True,
+ max_slice_nums: int = None,
+ use_image_id: bool = None,
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
+ **kwargs
+ ) -> MiniCPMVBatchFeature:
+
+ if images is not None:
+ image_inputs = self.image_processor(images, do_pad=do_pad, max_slice_nums=max_slice_nums, return_tensors=return_tensors)
+ return self._convert_images_texts_to_inputs(image_inputs, text, max_slice_nums=max_slice_nums, use_image_id=use_image_id, max_length=max_length, **kwargs)
+
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ output_ids = args[0]
+ result_text = []
+ for result in output_ids:
+ result = result[result != 0]
+ if result[0] == self.tokenizer.bos_id:
+ result = result[1:]
+ if result[-1] == self.tokenizer.eos_id:
+ result = result[:-1]
+ result_text.append(self.tokenizer.decode(result, *args[1:], **kwargs).strip())
+ return result_text
+ # return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ result = args[0]
+ result = result[result != 0]
+ if result[0] == self.tokenizer.bos_id:
+ result = result[1:]
+ if result[-1] == self.tokenizer.eos_id or (hasattr(self.tokenizer, "eot_id") and result[-1] == self.tokenizer.eot_id):
+ result = result[:-1]
+ return self.tokenizer.decode(result, *args[1:], **kwargs).strip()
+
+ def _convert(
+ self, input_str, max_inp_length: Optional[int] = None
+ ):
+ input_ids = self.tokenizer.encode(input_str)
+ if max_inp_length is not None:
+ input_ids = input_ids[:max_inp_length]
+ input_ids = torch.tensor(input_ids, dtype=torch.int32)
+
+ start_cond = (input_ids == self.tokenizer.im_start_id) | (input_ids == self.tokenizer.slice_start_id)
+ end_cond = (input_ids == self.tokenizer.im_end_id) | (input_ids == self.tokenizer.slice_end_id)
+
+ image_start_tokens = torch.where(start_cond)[0]
+ image_start_tokens += 1
+ image_end_tokens = torch.where(end_cond)[0]
+
+ valid_image_nums = max(len(image_start_tokens), len(image_end_tokens))
+
+ image_bounds = torch.hstack(
+ [
+ image_start_tokens[:valid_image_nums].unsqueeze(-1),
+ image_end_tokens[:valid_image_nums].unsqueeze(-1),
+ ]
+ )
+ return input_ids, image_bounds
+
+ def _convert_images_texts_to_inputs(
+ self,
+ images,
+ texts: Union[str, List[str]],
+ truncation=None,
+ max_length=None,
+ max_slice_nums=None,
+ use_image_id=None,
+ return_tensors=None,
+ **kwargs
+ ):
+ if images is None or not len(images):
+ model_inputs = self.tokenizer(texts, return_tensors=return_tensors, truncation=truncation, max_length=max_length, **kwargs)
+ return MiniCPMVBatchFeature(data={**model_inputs})
+
+ pattern = "(./)"
+ images, image_sizes, tgt_sizes = images["pixel_values"], images["image_sizes"], images["tgt_sizes"]
+
+ if isinstance(texts, str):
+ texts = [texts]
+ input_ids_list = []
+ image_bounds_list = []
+ for index, text in enumerate(texts):
+ image_tags = re.findall(pattern, text)
+ assert len(image_tags) == len(image_sizes[index])
+ text_chunks = text.split(pattern)
+ final_text = ""
+ for i in range(len(image_tags)):
+ final_text = final_text + text_chunks[i] + \
+ self.image_processor.get_slice_image_placeholder(
+ image_sizes[index][i],
+ i,
+ max_slice_nums,
+ use_image_id
+ )
+ final_text += text_chunks[-1]
+ input_ids, image_bounds = self._convert(final_text, max_length)
+ input_ids_list.append(input_ids)
+ image_bounds_list.append(image_bounds)
+ padded_input_ids, padding_lengths = self.pad(
+ input_ids_list,
+ padding_side="left"
+ )
+ attention_mask = torch.ones_like(padded_input_ids, dtype=torch.bool)
+ for i, length in enumerate(padding_lengths):
+ image_bounds_list[i] = image_bounds_list[i] + length
+ attention_mask[i, :length] = False
+
+ return MiniCPMVBatchFeature(data={
+ "input_ids": padded_input_ids,
+ "attention_mask": attention_mask,
+ "pixel_values": images,
+ "image_sizes": image_sizes,
+ "image_bound": image_bounds_list,
+ "tgt_sizes": tgt_sizes
+ })
+
+ @property
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+
+ def pad(self, inputs, max_length=None, padding_value=0, padding_side="left"):
+ items = []
+ if isinstance(inputs[0], list):
+ assert isinstance(inputs[0][0], torch.Tensor)
+ for it in inputs:
+ for tr in it:
+ items.append(tr)
+ else:
+ assert isinstance(inputs[0], torch.Tensor)
+ items = inputs
+
+ batch_size = len(items)
+ shape = items[0].shape
+ dim = len(shape)
+ assert dim <= 2
+ if max_length is None:
+ max_length = 0
+ max_length = max(max_length, max(item.shape[-1] for item in items))
+ min_length = min(item.shape[-1] for item in items)
+ dtype = items[0].dtype
+
+ if dim == 0:
+ return torch.stack([item for item in items], dim=0), [0]
+ elif dim == 1:
+ if max_length == min_length:
+ return torch.stack([item for item in items], dim=0), [0] * batch_size
+ tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
+ else:
+ tensor = (
+ torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype)
+ + padding_value
+ )
+
+ padding_length = []
+ for i, item in enumerate(items):
+ if dim == 1:
+ if padding_side == "left":
+ tensor[i, -len(item) :] = item.clone()
+ else:
+ tensor[i, : len(item)] = item.clone()
+ elif dim == 2:
+ if padding_side == "left":
+ tensor[i, -len(item) :, :] = item.clone()
+ else:
+ tensor[i, : len(item), :] = item.clone()
+ padding_length.append(tensor.shape[-1] - len(item))
+
+ return tensor, padding_length
diff --git a/minicpmv4_tokenizer/resampler.py b/minicpmv4_tokenizer/resampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdec5f777789a190e5ba6a6727f24e13ba1129cb
--- /dev/null
+++ b/minicpmv4_tokenizer/resampler.py
@@ -0,0 +1,782 @@
+from functools import partial
+from typing import Optional, Tuple, List
+import numpy as np
+import warnings
+
+import torch
+from torch import nn
+from torch import Tensor
+import torch.nn.functional as F
+from torch.nn.functional import *
+from torch.nn.modules.activation import *
+from torch.nn.init import trunc_normal_, constant_, xavier_normal_, xavier_uniform_
+
+from transformers.integrations import is_deepspeed_zero3_enabled
+
+def get_2d_sincos_pos_embed(embed_dim, image_size):
+ """
+ image_size: image_size or (image_height, image_width)
+ return:
+ pos_embed: [image_height, image_width, embed_dim]
+ """
+ if isinstance(image_size, int):
+ grid_h_size, grid_w_size = image_size, image_size
+ else:
+ grid_h_size, grid_w_size = image_size[0], image_size[1]
+
+ grid_h = np.arange(grid_h_size, dtype=np.float32)
+ grid_w = np.arange(grid_w_size, dtype=np.float32)
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
+ grid = np.stack(grid, axis=0)
+
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
+ return pos_embed
+
+
+def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
+ assert embed_dim % 2 == 0
+
+ # use half of dimensions to encode grid_h
+ emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
+ emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
+
+ emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
+ return emb
+
+
+def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (H, W)
+ out: (H, W, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000 ** omega # (D/2,)
+
+ out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
+
+ emb_sin = np.sin(out) # (H, W, D/2)
+ emb_cos = np.cos(out) # (H, W, D/2)
+
+ emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
+ return emb
+
+
+class Resampler(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ given learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (batch_size, num_queries, embed_dim)
+ """
+
+ def __init__(
+ self,
+ num_queries,
+ embed_dim,
+ num_heads,
+ kv_dim=None,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ adaptive=False,
+ max_size=(70, 70),
+ ):
+ super().__init__()
+ self.num_queries = num_queries
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.adaptive = adaptive
+ self.max_size = max_size
+
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+
+ if kv_dim is not None and kv_dim != embed_dim:
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
+ else:
+ self.kv_proj = nn.Identity()
+
+ self.attn = MultiheadAttention(embed_dim, num_heads)
+ self.ln_q = norm_layer(embed_dim)
+ self.ln_kv = norm_layer(embed_dim)
+
+ self.ln_post = norm_layer(embed_dim)
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
+
+ self._set_2d_pos_cache(self.max_size)
+
+ def _set_2d_pos_cache(self, max_size, device='cpu'):
+ if is_deepspeed_zero3_enabled():
+ device='cuda'
+ pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
+ self.register_buffer("pos_embed", pos_embed, persistent=False)
+
+ def _adjust_pos_cache(self, tgt_sizes, device):
+ max_h = torch.max(tgt_sizes[:, 0])
+ max_w = torch.max(tgt_sizes[:, 1])
+ if max_h > self.max_size[0] or max_w > self.max_size[1]:
+ self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
+ self._set_2d_pos_cache(self.max_size, device)
+
+ def _initialize_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def forward(self, x, tgt_sizes=None):
+ assert x.shape[0] == tgt_sizes.shape[0]
+ bs = x.shape[0]
+
+ device = x.device
+ dtype = x.dtype
+
+ patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
+
+ self._adjust_pos_cache(tgt_sizes, device=device)
+
+ max_patch_len = torch.max(patch_len)
+ key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
+
+ pos_embed = []
+ for i in range(bs):
+ tgt_h, tgt_w = tgt_sizes[i]
+ pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
+ key_padding_mask[i, patch_len[i]:] = True
+
+ pos_embed = torch.nn.utils.rnn.pad_sequence(
+ pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
+
+ x = self.kv_proj(x) # B * L * D
+ x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
+
+ q = self.ln_q(self.query) # Q * D
+
+ out = self.attn(
+ self._repeat(q, bs), # Q * B * D
+ x + pos_embed, # L * B * D + L * B * D
+ x,
+ key_padding_mask=key_padding_mask)[0]
+ # out: Q * B * D
+ x = out.permute(1, 0, 2) # B * Q * D
+
+ x = self.ln_post(x)
+ x = x @ self.proj
+ return x
+
+ def _repeat(self, query, N: int):
+ return query.unsqueeze(1).repeat(1, N, 1)
+
+
+class MultiheadAttention(nn.MultiheadAttention):
+ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
+ add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
+ super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
+
+ # rewrite out_proj layer,with nn.Linear
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
+
+ def forward(
+ self,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ key_padding_mask: Optional[Tensor] = None,
+ need_weights: bool = True,
+ attn_mask: Optional[Tensor] = None,
+ average_attn_weights: bool = True,
+ is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
+ why_not_fast_path = ''
+ if ((attn_mask is not None and torch.is_floating_point(attn_mask))
+ or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
+ why_not_fast_path = "floating-point masks are not supported for fast path."
+
+ is_batched = query.dim() == 3
+
+ key_padding_mask = _canonical_mask(
+ mask=key_padding_mask,
+ mask_name="key_padding_mask",
+ other_type=_none_or_dtype(attn_mask),
+ other_name="attn_mask",
+ target_type=query.dtype
+ )
+
+ attn_mask = _canonical_mask(
+ mask=attn_mask,
+ mask_name="attn_mask",
+ other_type=None,
+ other_name="",
+ target_type=query.dtype,
+ check_other=False,
+ )
+
+
+ if not is_batched:
+ why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
+ elif query is not key or key is not value:
+ # When lifting this restriction, don't forget to either
+ # enforce that the dtypes all match or test cases where
+ # they don't!
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
+ elif self.in_proj_weight is None:
+ why_not_fast_path = "in_proj_weight was None"
+ elif query.dtype != self.in_proj_weight.dtype:
+ # this case will fail anyway, but at least they'll get a useful error message.
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
+ elif self.training:
+ why_not_fast_path = "training is enabled"
+ elif (self.num_heads % 2) != 0:
+ why_not_fast_path = "self.num_heads is not even"
+ elif not self.batch_first:
+ why_not_fast_path = "batch_first was not True"
+ elif self.bias_k is not None:
+ why_not_fast_path = "self.bias_k was not None"
+ elif self.bias_v is not None:
+ why_not_fast_path = "self.bias_v was not None"
+ elif self.add_zero_attn:
+ why_not_fast_path = "add_zero_attn was enabled"
+ elif not self._qkv_same_embed_dim:
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
+ elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
+ why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
+ is not supported with NestedTensor input"
+ elif torch.is_autocast_enabled():
+ why_not_fast_path = "autocast is enabled"
+
+ if not why_not_fast_path:
+ tensor_args = (
+ query,
+ key,
+ value,
+ self.in_proj_weight,
+ self.in_proj_bias,
+ self.out_proj.weight,
+ self.out_proj.bias,
+ )
+ # We have to use list comprehensions below because TorchScript does not support
+ # generator expressions.
+ if torch.overrides.has_torch_function(tensor_args):
+ why_not_fast_path = "some Tensor argument has_torch_function"
+ elif _is_make_fx_tracing():
+ why_not_fast_path = "we are running make_fx tracing"
+ elif not all(_check_arg_device(x) for x in tensor_args):
+ why_not_fast_path = ("some Tensor argument's device is neither one of "
+ f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
+ elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
+ why_not_fast_path = ("grad is enabled and at least one of query or the "
+ "input/output projection weights or biases requires_grad")
+ if not why_not_fast_path:
+ merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
+
+ if self.in_proj_bias is not None and self.in_proj_weight is not None:
+ return torch._native_multi_head_attention(
+ query,
+ key,
+ value,
+ self.embed_dim,
+ self.num_heads,
+ self.in_proj_weight,
+ self.in_proj_bias,
+ self.out_proj.weight,
+ self.out_proj.bias,
+ merged_mask,
+ need_weights,
+ average_attn_weights,
+ mask_type)
+
+ any_nested = query.is_nested or key.is_nested or value.is_nested
+ assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
+ f"The fast path was not hit because {why_not_fast_path}")
+
+ if self.batch_first and is_batched:
+ # make sure that the transpose op does not affect the "is" property
+ if key is value:
+ if query is key:
+ query = key = value = query.transpose(1, 0)
+ else:
+ query, key = (x.transpose(1, 0) for x in (query, key))
+ value = key
+ else:
+ query, key, value = (x.transpose(1, 0) for x in (query, key, value))
+
+ if not self._qkv_same_embed_dim:
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
+ query, key, value, self.embed_dim, self.num_heads,
+ self.in_proj_weight, self.in_proj_bias,
+ self.bias_k, self.bias_v, self.add_zero_attn,
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
+ training=self.training,
+ key_padding_mask=key_padding_mask, need_weights=need_weights,
+ attn_mask=attn_mask,
+ use_separate_proj_weight=True,
+ q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
+ v_proj_weight=self.v_proj_weight,
+ average_attn_weights=average_attn_weights,
+ is_causal=is_causal)
+ else:
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
+ query, key, value, self.embed_dim, self.num_heads,
+ self.in_proj_weight, self.in_proj_bias,
+ self.bias_k, self.bias_v, self.add_zero_attn,
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
+ training=self.training,
+ key_padding_mask=key_padding_mask,
+ need_weights=need_weights,
+ attn_mask=attn_mask,
+ average_attn_weights=average_attn_weights,
+ is_causal=is_causal)
+ if self.batch_first and is_batched:
+ return attn_output.transpose(1, 0), attn_output_weights
+ else:
+ return attn_output, attn_output_weights
+
+ def multi_head_attention_forward(
+ self,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ embed_dim_to_check: int,
+ num_heads: int,
+ in_proj_weight: Optional[Tensor],
+ in_proj_bias: Optional[Tensor],
+ bias_k: Optional[Tensor],
+ bias_v: Optional[Tensor],
+ add_zero_attn: bool,
+ dropout_p: float,
+ out_proj_weight: Tensor,
+ out_proj_bias: Optional[Tensor],
+ training: bool = True,
+ key_padding_mask: Optional[Tensor] = None,
+ need_weights: bool = True,
+ attn_mask: Optional[Tensor] = None,
+ use_separate_proj_weight: bool = False,
+ q_proj_weight: Optional[Tensor] = None,
+ k_proj_weight: Optional[Tensor] = None,
+ v_proj_weight: Optional[Tensor] = None,
+ static_k: Optional[Tensor] = None,
+ static_v: Optional[Tensor] = None,
+ average_attn_weights: bool = True,
+ is_causal: bool = False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
+
+ is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
+
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
+ # is batched, run the computation and before returning squeeze the
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
+ if not is_batched:
+ # unsqueeze if the input is unbatched
+ query = query.unsqueeze(1)
+ key = key.unsqueeze(1)
+ value = value.unsqueeze(1)
+ if key_padding_mask is not None:
+ key_padding_mask = key_padding_mask.unsqueeze(0)
+
+ # set up shape vars
+ tgt_len, bsz, embed_dim = query.shape
+ src_len, _, _ = key.shape
+
+ key_padding_mask = _canonical_mask(
+ mask=key_padding_mask,
+ mask_name="key_padding_mask",
+ other_type=_none_or_dtype(attn_mask),
+ other_name="attn_mask",
+ target_type=query.dtype
+ )
+
+ if is_causal and attn_mask is None:
+ raise RuntimeError(
+ "Need attn_mask if specifying the is_causal hint. "
+ "You may use the Transformer module method "
+ "`generate_square_subsequent_mask` to create this mask."
+ )
+
+ if is_causal and key_padding_mask is None and not need_weights:
+ # when we have a kpm or need weights, we need attn_mask
+ # Otherwise, we use the is_causal hint go as is_causal
+ # indicator to SDPA.
+ attn_mask = None
+ else:
+ attn_mask = _canonical_mask(
+ mask=attn_mask,
+ mask_name="attn_mask",
+ other_type=None,
+ other_name="",
+ target_type=query.dtype,
+ check_other=False,
+ )
+
+ if key_padding_mask is not None:
+ # We have the attn_mask, and use that to merge kpm into it.
+ # Turn off use of is_causal hint, as the merged mask is no
+ # longer causal.
+ is_causal = False
+
+ assert embed_dim == embed_dim_to_check, \
+ f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
+ if isinstance(embed_dim, torch.Tensor):
+ # embed_dim can be a tensor when JIT tracing
+ head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
+ else:
+ head_dim = embed_dim // num_heads
+ assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
+ if use_separate_proj_weight:
+ # allow MHA to have different embedding dimensions when separate projection weights are used
+ assert key.shape[:2] == value.shape[:2], \
+ f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
+ else:
+ assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
+
+ #
+ # compute in-projection
+ #
+ if not use_separate_proj_weight:
+ assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
+ else:
+ assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
+ assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
+ assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
+ if in_proj_bias is None:
+ b_q = b_k = b_v = None
+ else:
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
+ q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
+
+ # prep attention mask
+
+ if attn_mask is not None:
+ # ensure attn_mask's dim is 3
+ if attn_mask.dim() == 2:
+ correct_2d_size = (tgt_len, src_len)
+ if attn_mask.shape != correct_2d_size:
+ raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
+ attn_mask = attn_mask.unsqueeze(0)
+ elif attn_mask.dim() == 3:
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
+ if attn_mask.shape != correct_3d_size:
+ raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
+ else:
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
+
+ # add bias along batch dimension (currently second)
+ if bias_k is not None and bias_v is not None:
+ assert static_k is None, "bias cannot be added to static key."
+ assert static_v is None, "bias cannot be added to static value."
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
+ if attn_mask is not None:
+ attn_mask = pad(attn_mask, (0, 1))
+ if key_padding_mask is not None:
+ key_padding_mask = pad(key_padding_mask, (0, 1))
+ else:
+ assert bias_k is None
+ assert bias_v is None
+
+ #
+ # reshape q, k, v for multihead attention and make em batch first
+ #
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
+ if static_k is None:
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
+ else:
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
+ assert static_k.size(0) == bsz * num_heads, \
+ f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
+ assert static_k.size(2) == head_dim, \
+ f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
+ k = static_k
+ if static_v is None:
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
+ else:
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
+ assert static_v.size(0) == bsz * num_heads, \
+ f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
+ assert static_v.size(2) == head_dim, \
+ f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
+ v = static_v
+
+ # add zero attention along batch dimension (now first)
+ if add_zero_attn:
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
+ k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
+ v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
+ if attn_mask is not None:
+ attn_mask = pad(attn_mask, (0, 1))
+ if key_padding_mask is not None:
+ key_padding_mask = pad(key_padding_mask, (0, 1))
+
+ # update source sequence length after adjustments
+ src_len = k.size(1)
+
+ # merge key padding and attention masks
+ if key_padding_mask is not None:
+ assert key_padding_mask.shape == (bsz, src_len), \
+ f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
+ key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
+ expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
+ if attn_mask is None:
+ attn_mask = key_padding_mask
+ else:
+ attn_mask = attn_mask + key_padding_mask
+
+ # adjust dropout probability
+ if not training:
+ dropout_p = 0.0
+
+ #
+ # (deep breath) calculate attention and out projection
+ #
+
+ if need_weights:
+ B, Nt, E = q.shape
+ q_scaled = q / math.sqrt(E)
+
+ assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
+
+ if attn_mask is not None:
+ attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
+ else:
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
+ if dropout_p > 0.0:
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
+
+ attn_output = torch.bmm(attn_output_weights, v)
+
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
+ attn_output = self.out_proj(attn_output)
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
+
+ # optionally average attention weights over heads
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
+ if average_attn_weights:
+ attn_output_weights = attn_output_weights.mean(dim=1)
+
+ if not is_batched:
+ # squeeze the output if input was unbatched
+ attn_output = attn_output.squeeze(1)
+ attn_output_weights = attn_output_weights.squeeze(0)
+ return attn_output, attn_output_weights
+ else:
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
+ # in order to match the input for SDPA of (N, num_heads, L, S)
+ if attn_mask is not None:
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
+ attn_mask = attn_mask.unsqueeze(0)
+ else:
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
+
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
+ k = k.view(bsz, num_heads, src_len, head_dim)
+ v = v.view(bsz, num_heads, src_len, head_dim)
+
+ attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
+ attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
+ if not is_batched:
+ # squeeze the output if input was unbatched
+ attn_output = attn_output.squeeze(1)
+ return attn_output, None
+
+
+def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
+ key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
+ # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
+ # and returns if the input is batched or not.
+ # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
+
+ # Shape check.
+ if query.dim() == 3:
+ # Batched Inputs
+ is_batched = True
+ assert key.dim() == 3 and value.dim() == 3, \
+ ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
+ if key_padding_mask is not None:
+ assert key_padding_mask.dim() == 2, \
+ ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
+ f" but found {key_padding_mask.dim()}-D tensor instead")
+ if attn_mask is not None:
+ assert attn_mask.dim() in (2, 3), \
+ ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
+ f" but found {attn_mask.dim()}-D tensor instead")
+ elif query.dim() == 2:
+ # Unbatched Inputs
+ is_batched = False
+ assert key.dim() == 2 and value.dim() == 2, \
+ ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
+
+ if key_padding_mask is not None:
+ assert key_padding_mask.dim() == 1, \
+ ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
+ f" but found {key_padding_mask.dim()}-D tensor instead")
+
+ if attn_mask is not None:
+ assert attn_mask.dim() in (2, 3), \
+ ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
+ f" but found {attn_mask.dim()}-D tensor instead")
+ if attn_mask.dim() == 3:
+ expected_shape = (num_heads, query.shape[0], key.shape[0])
+ assert attn_mask.shape == expected_shape, \
+ (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
+ else:
+ raise AssertionError(
+ f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
+
+ return is_batched
+
+
+def _canonical_mask(
+ mask: Optional[Tensor],
+ mask_name: str,
+ other_type: Optional[DType],
+ other_name: str,
+ target_type: DType,
+ check_other: bool = True,
+) -> Optional[Tensor]:
+
+ if mask is not None:
+ _mask_dtype = mask.dtype
+ _mask_is_float = torch.is_floating_point(mask)
+ if _mask_dtype != torch.bool and not _mask_is_float:
+ raise AssertionError(
+ f"only bool and floating types of {mask_name} are supported")
+ if check_other and other_type is not None:
+ if _mask_dtype != other_type:
+ warnings.warn(
+ f"Support for mismatched {mask_name} and {other_name} "
+ "is deprecated. Use same type for both instead."
+ )
+ if not _mask_is_float:
+ mask = (
+ torch.zeros_like(mask, dtype=target_type)
+ .masked_fill_(mask, float("-inf"))
+ )
+ return mask
+
+
+def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
+ if input is None:
+ return None
+ elif isinstance(input, torch.Tensor):
+ return input.dtype
+ raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
+
+def _in_projection_packed(
+ q: Tensor,
+ k: Tensor,
+ v: Tensor,
+ w: Tensor,
+ b: Optional[Tensor] = None,
+) -> List[Tensor]:
+ r"""
+ Performs the in-projection step of the attention operation, using packed weights.
+ Output is a triple containing projection tensors for query, key and value.
+ Args:
+ q, k, v: query, key and value tensors to be projected. For self-attention,
+ these are typically the same tensor; for encoder-decoder attention,
+ k and v are typically the same tensor. (We take advantage of these
+ identities for performance if they are present.) Regardless, q, k and v
+ must share a common embedding dimension; otherwise their shapes may vary.
+ w: projection weights for q, k and v, packed into a single tensor. Weights
+ are packed along dimension 0, in q, k, v order.
+ b: optional projection biases for q, k and v, packed into a single tensor
+ in q, k, v order.
+ Shape:
+ Inputs:
+ - q: :math:`(..., E)` where E is the embedding dimension
+ - k: :math:`(..., E)` where E is the embedding dimension
+ - v: :math:`(..., E)` where E is the embedding dimension
+ - w: :math:`(E * 3, E)` where E is the embedding dimension
+ - b: :math:`E * 3` where E is the embedding dimension
+ Output:
+ - in output list :math:`[q', k', v']`, each output tensor will have the
+ same shape as the corresponding input tensor.
+ """
+ E = q.size(-1)
+ if k is v:
+ if q is k:
+ # self-attention
+ proj = linear(q, w, b)
+ # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
+ proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
+ return proj[0], proj[1], proj[2]
+ else:
+ # encoder-decoder attention
+ w_q, w_kv = w.split([E, E * 2])
+ if b is None:
+ b_q = b_kv = None
+ else:
+ b_q, b_kv = b.split([E, E * 2])
+ q_proj = linear(q, w_q, b_q)
+ kv_proj = linear(k, w_kv, b_kv)
+ # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
+ kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
+ return (q_proj, kv_proj[0], kv_proj[1])
+ else:
+ w_q, w_k, w_v = w.chunk(3)
+ if b is None:
+ b_q = b_k = b_v = None
+ else:
+ b_q, b_k, b_v = b.chunk(3)
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
+
+
+def _in_projection(
+ q: Tensor,
+ k: Tensor,
+ v: Tensor,
+ w_q: Tensor,
+ w_k: Tensor,
+ w_v: Tensor,
+ b_q: Optional[Tensor] = None,
+ b_k: Optional[Tensor] = None,
+ b_v: Optional[Tensor] = None,
+) -> Tuple[Tensor, Tensor, Tensor]:
+ r"""
+ Performs the in-projection step of the attention operation. This is simply
+ a triple of linear projections, with shape constraints on the weights which
+ ensure embedding dimension uniformity in the projected outputs.
+ Output is a triple containing projection tensors for query, key and value.
+ Args:
+ q, k, v: query, key and value tensors to be projected.
+ w_q, w_k, w_v: weights for q, k and v, respectively.
+ b_q, b_k, b_v: optional biases for q, k and v, respectively.
+ Shape:
+ Inputs:
+ - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
+ number of leading dimensions.
+ - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
+ number of leading dimensions.
+ - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
+ number of leading dimensions.
+ - w_q: :math:`(Eq, Eq)`
+ - w_k: :math:`(Eq, Ek)`
+ - w_v: :math:`(Eq, Ev)`
+ - b_q: :math:`(Eq)`
+ - b_k: :math:`(Eq)`
+ - b_v: :math:`(Eq)`
+ Output: in output triple :math:`(q', k', v')`,
+ - q': :math:`[Qdims..., Eq]`
+ - k': :math:`[Kdims..., Eq]`
+ - v': :math:`[Vdims..., Eq]`
+ """
+ Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
+ assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
+ assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
+ assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
+ assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
+ assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
+ assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
\ No newline at end of file
diff --git a/minicpmv4_tokenizer/special_tokens_map.json b/minicpmv4_tokenizer/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..8619dda6f3eb6d60d0a1bb274820054e46f41699
--- /dev/null
+++ b/minicpmv4_tokenizer/special_tokens_map.json
@@ -0,0 +1,81 @@
+{
+ "additional_special_tokens": [
+ {
+ "content": "<|im_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|im_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|tool_call|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|execute_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|execute_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+ ],
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/minicpmv4_tokenizer/tokenization_minicpmv_fast.py b/minicpmv4_tokenizer/tokenization_minicpmv_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..b863f1b9b6ffc6c071dc8516957dcfb491ca88e5
--- /dev/null
+++ b/minicpmv4_tokenizer/tokenization_minicpmv_fast.py
@@ -0,0 +1,66 @@
+from transformers import LlamaTokenizerFast
+
+
+class MiniCPMVTokenizerFast(LlamaTokenizerFast):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.im_start = ""
+ self.im_end = ""
+ self.ref_start = "["
+ self.ref_end = "]"
+ self.box_start = ""
+ self.box_end = ""
+ self.quad_start = ""
+ self.quad_end = ""
+ self.slice_start = ""
+ self.slice_end = ""
+ self.im_id_start = ""
+ self.im_id_end = ""
+
+ @property
+ def eos_id(self):
+ return self.eos_token_id
+
+ @property
+ def bos_id(self):
+ return self.bos_token_id
+
+ @property
+ def unk_id(self):
+ return self.unk_token_id
+
+ @property
+ def im_start_id(self):
+ return self.convert_tokens_to_ids(self.im_start)
+
+ @property
+ def im_end_id(self):
+ return self.convert_tokens_to_ids(self.im_end)
+
+ @property
+ def slice_start_id(self):
+ return self.convert_tokens_to_ids(self.slice_start)
+
+ @property
+ def slice_end_id(self):
+ return self.convert_tokens_to_ids(self.slice_end)
+
+ @property
+ def im_id_start_id(self):
+ return self.convert_tokens_to_ids(self.im_id_start)
+
+ @property
+ def im_id_end_id(self):
+ return self.convert_tokens_to_ids(self.im_id_end)
+
+ @property
+ def newline_id(self):
+ return self.convert_tokens_to_ids('\n')
+
+ @staticmethod
+ def escape(text: str) -> str:
+ return text
+
+ @staticmethod
+ def unescape(text: str) -> str:
+ return text
diff --git a/minicpmv4_tokenizer/tokenizer.json b/minicpmv4_tokenizer/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..a79a8753aa25a89b615f521361b356f63188e48b
--- /dev/null
+++ b/minicpmv4_tokenizer/tokenizer.json
@@ -0,0 +1,178681 @@
+{
+ "version": "1.0",
+ "truncation": null,
+ "padding": null,
+ "added_tokens": [
+ {
+ "id": 0,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 1,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 2,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73440,
+ "content": "<|im_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73441,
+ "content": "<|im_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73442,
+ "content": "<|tool_call|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73443,
+ "content": "<|execute_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73444,
+ "content": "<|execute_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73445,
+ "content": "<|fim_prefix|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73446,
+ "content": "<|fim_middle|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 73447,
+ "content": "<|fim_suffix|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 101,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 102,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 103,
+ "content": "[",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 104,
+ "content": "]",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 105,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 106,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 107,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 108,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 109,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 110,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 111,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 112,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 113,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 114,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 115,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 116,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 117,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 118,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 119,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 120,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 121,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 122,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 123,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 124,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 125,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 126,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 127,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 129,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 130,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 131,
+ "content": "<|audio_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 132,
+ "content": "<|audio|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 133,
+ "content": "<|audio_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 134,
+ "content": "<|spk_bos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 135,
+ "content": "<|spk|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 136,
+ "content": "<|spk_eos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 137,
+ "content": "<|tts_bos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 138,
+ "content": "<|tts_eos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 139,
+ "content": "<|listen|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 140,
+ "content": "<|speak|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 141,
+ "content": "<|interrupt|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 142,
+ "content": "<|vad_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 143,
+ "content": "<|vad_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 144,
+ "content": "<|emotion_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 145,
+ "content": "<|emotion_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 146,
+ "content": "<|speed_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 147,
+ "content": "<|speed_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 148,
+ "content": "<|pitch_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 149,
+ "content": "<|pitch_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 150,
+ "content": "<|timbre_0|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 151,
+ "content": "<|timbre_1|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 152,
+ "content": "<|timbre_2|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 153,
+ "content": "<|timbre_3|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 154,
+ "content": "<|timbre_4|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 155,
+ "content": "<|timbre_5|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 156,
+ "content": "<|timbre_6|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 157,
+ "content": "<|timbre_7|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 158,
+ "content": "<|timbre_8|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 159,
+ "content": "<|timbre_9|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 160,
+ "content": "<|timbre_10|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 161,
+ "content": "<|timbre_11|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 162,
+ "content": "<|timbre_12|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 163,
+ "content": "<|timbre_13|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 164,
+ "content": "<|timbre_14|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 165,
+ "content": "<|timbre_15|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 166,
+ "content": "<|timbre_16|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 167,
+ "content": "<|timbre_17|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 168,
+ "content": "<|timbre_18|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 169,
+ "content": "<|timbre_19|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 170,
+ "content": "<|timbre_20|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 171,
+ "content": "<|timbre_21|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 172,
+ "content": "<|timbre_22|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 173,
+ "content": "<|timbre_23|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 174,
+ "content": "<|timbre_24|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 175,
+ "content": "<|timbre_25|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 176,
+ "content": "<|timbre_26|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 177,
+ "content": "<|timbre_27|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 178,
+ "content": "<|timbre_28|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 179,
+ "content": "<|timbre_29|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 180,
+ "content": "<|timbre_30|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 181,
+ "content": "<|timbre_31|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ }
+ ],
+ "normalizer": {
+ "type": "Sequence",
+ "normalizers": [
+ {
+ "type": "Prepend",
+ "prepend": "▁"
+ },
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": " "
+ },
+ "content": "▁"
+ }
+ ]
+ },
+ "pre_tokenizer": null,
+ "post_processor": {
+ "type": "TemplateProcessing",
+ "single": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ }
+ ],
+ "pair": [
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ },
+ {
+ "SpecialToken": {
+ "id": "",
+ "type_id": 1
+ }
+ },
+ {
+ "Sequence": {
+ "id": "B",
+ "type_id": 1
+ }
+ }
+ ],
+ "special_tokens": {
+ "": {
+ "id": "",
+ "ids": [
+ 1
+ ],
+ "tokens": [
+ ""
+ ]
+ }
+ }
+ },
+ "decoder": {
+ "type": "Sequence",
+ "decoders": [
+ {
+ "type": "Replace",
+ "pattern": {
+ "String": "▁"
+ },
+ "content": " "
+ },
+ {
+ "type": "ByteFallback"
+ },
+ {
+ "type": "Fuse"
+ },
+ {
+ "type": "Strip",
+ "content": " ",
+ "start": 1,
+ "stop": 0
+ }
+ ]
+ },
+ "model": {
+ "type": "BPE",
+ "dropout": null,
+ "unk_token": "",
+ "continuing_subword_prefix": null,
+ "end_of_word_suffix": null,
+ "fuse_unk": true,
+ "byte_fallback": true,
+ "ignore_merges": false,
+ "vocab": {
+ "": 0,
+ "": 1,
+ "": 2,
+ "": 3,
+ "": 4,
+ "\n": 5,
+ "\t": 6,
+ "
": 7,
+ "
": 8,
+ "": 9,
+ "": 10,
+ "": 11,
+ "
": 12,
+ "": 13,
+ " | | ": 14,
+ "": 15,
+ "": 16,
+ "": 17,
+ "": 18,
+ "": 21,
+ "": 22,
+ "
": 23,
+ "": 24,
+ "": 25,
+ "": 26,
+ "": 27,
+ "": 28,
+ "": 29,
+ "": 30,
+ "": 31,
+ "": 32,
+ "
": 33,
+ "
": 34,
+ "
": 35,
+ "": 36,
+ "": 37,
+ "": 38,
+ "
": 39,
+ "": 40,
+ "": 41,
+ "
": 42,
+ "": 43,
+ "
": 44,
+ "
": 45,
+ "": 46,
+ "": 47,
+ "
": 48,
+ "": 49,
+ "": 50,
+ "": 51,
+ "0": 52,
+ "1": 53,
+ "2": 54,
+ "3": 55,
+ "4": 56,
+ "5": 57,
+ "6": 58,
+ "7": 59,
+ "8": 60,
+ "9": 61,
+ "+": 62,
+ "-": 63,
+ "=": 64,
+ ",": 65,
+ "。": 66,
+ "!": 67,
+ "?": 68,
+ "、": 69,
+ ":": 70,
+ "¥": 71,
+ ".": 72,
+ "!": 73,
+ "?": 74,
+ "...": 75,
+ "。。。": 76,
+ "。。。。。。": 77,
+ "《": 78,
+ "》": 79,
+ "【": 80,
+ "】": 81,
+ "『": 82,
+ "』": 83,
+ "```": 84,
+ "": 86,
+ "---": 87,
+ "": 88,
+ ";": 89,
+ ".": 90,
+ "=": 91,
+ "<": 92,
+ ">": 93,
+ "-": 94,
+ "+": 95,
+ "%": 96,
+ "‼": 97,
+ "㊣": 98,
+ "/": 99,
+ "|": 100,
+ "": 182,
+ "": 183,
+ "": 184,
+ "": 185,
+ "": 186,
+ "": 187,
+ "": 188,
+ "": 189,
+ "": 190,
+ "": 191,
+ "": 192,
+ "": 193,
+ "": 194,
+ "": 195,
+ "": 196,
+ "": 197,
+ "": 198,
+ "": 199,
+ "": 200,
+ "": 201,
+ "": 202,
+ "": 203,
+ "": 204,
+ "": 205,
+ "": 206,
+ "": 207,
+ "": 208,
+ "": 209,
+ "": 210,
+ "": 211,
+ "": 212,
+ "": 213,
+ "": 214,
+ "": 215,
+ "": 216,
+ "": 217,
+ "": 218,
+ "": 219,
+ "": 220,
+ "": 221,
+ "": 222,
+ "": 223,
+ "": 224,
+ "": 225,
+ "": 226,
+ "": 227,
+ "": 228,
+ "": 229,
+ "": 230,
+ "": 231,
+ "": 232,
+ "": 233,
+ "": 234,
+ "": 235,
+ "": 236,
+ "": 237,
+ "": 238,
+ "": 239,
+ "": 240,
+ "": 241,
+ "": 242,
+ "": 243,
+ "": 244,
+ "": 245,
+ "": 246,
+ "": 247,
+ "": 248,
+ "": 249,
+ "": 250,
+ "": 251,
+ "": 252,
+ "": 253,
+ "": 254,
+ "": 255,
+ "": 256,
+ "": 257,
+ "": 258,
+ "": 259,
+ "": 260,
+ "": 261,
+ "": 262,
+ "": 263,
+ "": 264,
+ "": 265,
+ "": 266,
+ "": 267,
+ "": 268,
+ "": 269,
+ "": 270,
+ "": 271,
+ "": 272,
+ "": 273,
+ "": 274,
+ "": 275,
+ "": 276,
+ "": 277,
+ "": 278,
+ "": 279,
+ "": 280,
+ "": 281,
+ "": 282,
+ "": 283,
+ "": 284,
+ "": 285,
+ "": 286,
+ "": 287,
+ "": 288,
+ "": 289,
+ "": 290,
+ "": 291,
+ "": 292,
+ "": 293,
+ "": 294,
+ "": 295,
+ "": 296,
+ "": 297,
+ "": 298,
+ "": 299,
+ "": 300,
+ "": 301,
+ "": 302,
+ "": 303,
+ "": 304,
+ "": 305,
+ "": 306,
+ "": 307,
+ "": 308,
+ "": 309,
+ "": 310,
+ "": 311,
+ "": 312,
+ "": 313,
+ "": 314,
+ "": 315,
+ "": 316,
+ "": 317,
+ "": 318,
+ "": 319,
+ "": 320,
+ "": 321,
+ "": 322,
+ "": 323,
+ "": 324,
+ "": 325,
+ "": 326,
+ "": 327,
+ "": 328,
+ "": 329,
+ "": 330,
+ "": 331,
+ "": 332,
+ "": 333,
+ "": 334,
+ "": 335,
+ "": 336,
+ "": 337,
+ "": 338,
+ "": 339,
+ "": 340,
+ "": 341,
+ "": 342,
+ "": 343,
+ "": 344,
+ "": 345,
+ "": 346,
+ "": 347,
+ "": 348,
+ "": 349,
+ "": 350,
+ "": 351,
+ "": 352,
+ "": 353,
+ "": 354,
+ "": 355,
+ "": 356,
+ "": 357,
+ "": 358,
+ "": 359,
+ "": 360,
+ "": 361,
+ "": 362,
+ "": 363,
+ "": 364,
+ "": 365,
+ "": 366,
+ "": 367,
+ "": 368,
+ "": 369,
+ "": 370,
+ "": 371,
+ "": 372,
+ "": 373,
+ "": 374,
+ "": 375,
+ "": 376,
+ "": 377,
+ "": 378,
+ "": 379,
+ "": 380,
+ "": 381,
+ "": 382,
+ "": 383,
+ "": 384,
+ "": 385,
+ "": 386,
+ "": 387,
+ "": 388,
+ "": 389,
+ "": 390,
+ "": 391,
+ "": 392,
+ "": 393,
+ "": 394,
+ "": 395,
+ "": 396,
+ "": 397,
+ "": 398,
+ "": 399,
+ "": 400,
+ "": 401,
+ "": 402,
+ "": 403,
+ "": 404,
+ "": 405,
+ "": 406,
+ "": 407,
+ "": 408,
+ "": 409,
+ "": 410,
+ "": 411,
+ "": 412,
+ "": 413,
+ "": 414,
+ "": 415,
+ "": 416,
+ "": 417,
+ "": 418,
+ "": 419,
+ "": 420,
+ "": 421,
+ "": 422,
+ "": 423,
+ "": 424,
+ "": 425,
+ "": 426,
+ "": 427,
+ "": 428,
+ "": 429,
+ "": 430,
+ "": 431,
+ "": 432,
+ "": 433,
+ "": 434,
+ "