diff --git a/checkpoints/DrvtFTPP_G_projectors/A/best.pt b/checkpoints/DrvtFTPP_G_projectors/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..8a1c7fd644c67c2feba44fc30999fa26dc7f1110 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cdd907f3f521c1c67d5bc159ff8c1cf52302d8650061103982fd3a2e53867e0 +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/AT/best.pt b/checkpoints/DrvtFTPP_G_projectors/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..e360381419ba1cd2b65bb5bb2bed01c8009ca463 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:337f7af1ac4d3614875186a0f37d4e750a45b26d4339deac7b134479f880bcca +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/AV/best.pt b/checkpoints/DrvtFTPP_G_projectors/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..fef9bfaa84aaecd47ecf2277c0f4f5b9ad9b77a8 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2b68c599e0f294c0c18887ba07ec6b5e0e5b8bc267ab63478bc85f77aeda7cf +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/T/best.pt b/checkpoints/DrvtFTPP_G_projectors/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..9a838dce53ef20d8760223644fa434bf3bddb2f2 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1245490845c3791afa5fb4a1e94f0552930fc62bb1ec8c029bef3d188eba3800 +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/TV/best.pt b/checkpoints/DrvtFTPP_G_projectors/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..6ad81c1346a58fc6189b6d418000d45f158373e3 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07804fc20484736479a6dff15f56b7ac7aa4dd4390db8791ba382ccba53541cb +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/V/best.pt b/checkpoints/DrvtFTPP_G_projectors/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..e31a2b92c55de56bf4064cf26f1d56605570dbed --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0abc3db3f0e140926ea8cc191133649cf1bf80a13af8056c67a17783658a76e9 +size 10545547 diff --git a/checkpoints/DrvtFTPP_G_projectors/mix/best.pt b/checkpoints/DrvtFTPP_G_projectors/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..3fff077ad2dae2b1ff1bb699e33f35df1a2d3e96 --- /dev/null +++ b/checkpoints/DrvtFTPP_G_projectors/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88abd54902a0ea226ea9a40dd6f48e0d8fac9d8c5fe5f0803280f0e950193c4d +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/A/best.pt b/checkpoints/DrvtFTPP_M_projectors/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..fc04ddbe993e361ddd5571375f31e64ff6f5b0cc --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cad30b7e2e96c8faeaf5b7444f58554d29103562b1457ccc8fe17e4044c4514 +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/AT/best.pt b/checkpoints/DrvtFTPP_M_projectors/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..d8ef0faffbc42cbbe51069127e73851d456a1392 --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9083b09b55752511aa063ae8ca65f7cf4c229ad6cec128c88b48d41aa9afaaa9 +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/AV/best.pt b/checkpoints/DrvtFTPP_M_projectors/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..01bc6008e8aafc574001e3629ee49e144ff019c4 --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c11d366946d3910ecfe82544479018f03021a11706247e9a1f2eb29feddb656 +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/T/best.pt b/checkpoints/DrvtFTPP_M_projectors/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..445fcc008e057d7da6681599fd5f05b7cd0c74ed --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:351510cf2ab34afef47c520a7b333c523dae419902c135f703f4d3d7bf65c580 +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/TV/best.pt b/checkpoints/DrvtFTPP_M_projectors/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..a642f85043ec0fb24c4f4445db0b10e2db73b162 --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:353282f8329555203cffd6609c7a525ecd4ab05fd304ace739a13ede3ffeea8d +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/V/best.pt b/checkpoints/DrvtFTPP_M_projectors/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..83751165101d551461fabfe1f7b32e15260a0608 --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3edbbcbfedff32adaf7780437b994a40a0cba8790364b3236923f4b046cfee5d +size 10545547 diff --git a/checkpoints/DrvtFTPP_M_projectors/mix/best.pt b/checkpoints/DrvtFTPP_M_projectors/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..58f9067611e17f08f4f66329c819fc32cadfe6e7 --- /dev/null +++ b/checkpoints/DrvtFTPP_M_projectors/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06dfc8d6f8213cb88c2893da3b0eb7692ab519b10a19c712fb4f8f2600ab3aa9 +size 10545547 diff --git a/checkpoints/DrvtFT_audio_with_head.pt b/checkpoints/DrvtFT_audio_with_head.pt new file mode 100644 index 0000000000000000000000000000000000000000..f98446934f08229db7d1f45cb7d632d30e3c1fa1 --- /dev/null +++ b/checkpoints/DrvtFT_audio_with_head.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92eed730638818bc3a30303890157c4261c34b55ed5b7629d543744dfb0db3a3 +size 448190294 diff --git a/checkpoints/Drvt_projectors/A/best.pt b/checkpoints/Drvt_projectors/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..955e06a3a784c3cd3dfe2a9b199221fd346674ce --- /dev/null +++ b/checkpoints/Drvt_projectors/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aef16fc2c4ada977d51114bd7965e622a01a6048e84bfa8929d71fc494171f46 +size 23137715 diff --git a/checkpoints/Drvt_projectors/AT/best.pt b/checkpoints/Drvt_projectors/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..1829f1395180407e44d39698cd84998723033a86 --- /dev/null +++ b/checkpoints/Drvt_projectors/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb6a1a3c302cfad5eaf522789969d640890a0a8a0b66769791064c72d3ecf766 +size 23137715 diff --git a/checkpoints/Drvt_projectors/AV/best.pt b/checkpoints/Drvt_projectors/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..1740619d1dc57ea11c9235a525c3b860412f3612 --- /dev/null +++ b/checkpoints/Drvt_projectors/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b50cae8217774821a206fc08e323ccf6c6eb64b9dbcd9e77e4014dd6d2da4530 +size 23137715 diff --git a/checkpoints/Drvt_projectors/T/best.pt b/checkpoints/Drvt_projectors/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..94a25d66d44468c6eb54875d82529b6f47008366 --- /dev/null +++ b/checkpoints/Drvt_projectors/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c01a024f4b919a665967b9e10db02765710453df297c74401243c25aacdee9c5 +size 23137715 diff --git a/checkpoints/Drvt_projectors/TV/best.pt b/checkpoints/Drvt_projectors/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..af91defb1b1e7723ca330d29b3479098b1f786c0 --- /dev/null +++ b/checkpoints/Drvt_projectors/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42aec0545ea9e67a94449d1f1d5e378bc989a3f95f0ab506e39dfeb67c549aa4 +size 23137715 diff --git a/checkpoints/Drvt_projectors/V/best.pt b/checkpoints/Drvt_projectors/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..dfd605e5a8ee539619444e0aebc45aa4f5751d93 --- /dev/null +++ b/checkpoints/Drvt_projectors/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:060286c55b887bc0906b27da771565a109de8a0c55c317389ca698c715ba594f +size 23137715 diff --git a/checkpoints/Drvt_projectors/mix/best.pt b/checkpoints/Drvt_projectors/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..3c1e6b7f86f62cb1fd15dd369647b4bfc99584bb --- /dev/null +++ b/checkpoints/Drvt_projectors/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b817758bbf078bad9c0753988860e976c969c1e91516aafc015d0c66f10e8417 +size 23137715 diff --git a/checkpoints/Drvt_projectors_mini/A/best.pt b/checkpoints/Drvt_projectors_mini/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..696ebb248df682fd295b83bd6dcf3b0ba83baf70 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:585a6c5093e6fe0ef79c147f741b262667623d018f9b4ac05b79b21a32ef3ef7 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/AT/best.pt b/checkpoints/Drvt_projectors_mini/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..3ddbd10ba6f39e019cb4ea6798e5fbfc29def6b5 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e72e12d0bf3f4c54139b2d0fa463f2c8a9a19d45feb879029713a4833c7e5ce6 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/AV/best.pt b/checkpoints/Drvt_projectors_mini/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..8e8887e1d58ce6abf8cb09bcbde90c34041094eb --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17122f541cacc387bd42e17d837a3cc92d1480090c73e5680482639164c401e7 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/T/best.pt b/checkpoints/Drvt_projectors_mini/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..6b7364e8899a0c40ba9800a42a84a9c526311437 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cc41b0abb0438b7d9f4ea310a8c1c8377025c81717bcb5282fbc907416614e8 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/TV/best.pt b/checkpoints/Drvt_projectors_mini/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..30fb1c0a4dd63e7346e25efe9beadff4a5744da9 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11a6333df4c641f428c37c5d8fe4518d9793988c5fddd2293e2df57d09b9de18 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/V/best.pt b/checkpoints/Drvt_projectors_mini/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..7f135ed24d76b2bc9fdf1eaa6c5cc1de26ed44e0 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0c7785c2f204b5245a4c59bdf7e14ba18a2126430a74da4124eea7bc76cc255 +size 14739851 diff --git a/checkpoints/Drvt_projectors_mini/mix/best.pt b/checkpoints/Drvt_projectors_mini/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..a44fc63b562603333282a0e89caef4e2306b4b56 --- /dev/null +++ b/checkpoints/Drvt_projectors_mini/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:223c95fbd7aa7b938b816fd74d15706ed315b8e538233b09facf2aa5526983fa +size 14739851 diff --git a/checkpoints/IBPP_G_projectors/A/best.pt b/checkpoints/IBPP_G_projectors/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..488d83fc09d1600727dbac6cfb14db83d2b91f4e --- /dev/null +++ b/checkpoints/IBPP_G_projectors/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8681003740d414c7d4296250fe5e007ad5564f7192e8c91e4e91a08267b0df8 +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/AT/best.pt b/checkpoints/IBPP_G_projectors/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..53de90273d6cd5be4207ec7e9e81523b5025b52f --- /dev/null +++ b/checkpoints/IBPP_G_projectors/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12bb18d3370f0bb4b990db334fa0ab20914ef0a1be11bf8ce0fa7e9b42f6b9c7 +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/AV/best.pt b/checkpoints/IBPP_G_projectors/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..ae8371036ca5051a6f24898d0ee03af520c943f6 --- /dev/null +++ b/checkpoints/IBPP_G_projectors/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71dc39ebdb6fc1dd7a6bc10e01d2d0f94d4202934ad293ba3d0c888a81e01c4 +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/T/best.pt b/checkpoints/IBPP_G_projectors/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..9c5f42dd52334b534ac98e649365434d866bcce2 --- /dev/null +++ b/checkpoints/IBPP_G_projectors/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea7004adee9d2200cf8d03598b356477b85ac79afd78e5125fb21003f56c7c2f +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/TV/best.pt b/checkpoints/IBPP_G_projectors/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..4f8218e0b6dde0538ccdfd8a584017b4b7245440 --- /dev/null +++ b/checkpoints/IBPP_G_projectors/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d180f7b38df7f9973e7baf06393c46c2fed23f74485a7b57ebf23f139cc406f7 +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/V/best.pt b/checkpoints/IBPP_G_projectors/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..b1fd05b4477b00d54500d2aa97f53c1a8f292531 --- /dev/null +++ b/checkpoints/IBPP_G_projectors/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:117ee65328706d7ab310a59a303656ff6114d23f822b97a86531b843fdf88976 +size 12647819 diff --git a/checkpoints/IBPP_G_projectors/mix/best.pt b/checkpoints/IBPP_G_projectors/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..5818e6d4416a68b9fa7ecc9b16b35568042e0d0b --- /dev/null +++ b/checkpoints/IBPP_G_projectors/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf38dd5fe3e36ebfddc7f589738e6d3d4d7e983bdf7d3f2af2a7c4bbfb5edd60 +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/A/best.pt b/checkpoints/IBPP_M_projectors/A/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..ce2c1e5fc2612efcd7e323824164fa3976fd9d73 --- /dev/null +++ b/checkpoints/IBPP_M_projectors/A/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6e262081db30edb8150051563b0e7da007a925eadb01fb94d4349572da80649 +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/AT/best.pt b/checkpoints/IBPP_M_projectors/AT/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..dfb74612752fd5e598bc77b974bb5b4cda91283c --- /dev/null +++ b/checkpoints/IBPP_M_projectors/AT/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e765e8916f0ccffa9d0d0199ff4936135bd28c488e392b29237e0d6858203cca +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/AV/best.pt b/checkpoints/IBPP_M_projectors/AV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..3371da23764a2e9fad01b9032494a2d1d2df4fd6 --- /dev/null +++ b/checkpoints/IBPP_M_projectors/AV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a80860d133cfb7c50c2419481b8dee1d98ab28e604d56ea5d14e7dd6ec16784c +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/T/best.pt b/checkpoints/IBPP_M_projectors/T/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..a9493e3b97768ea2dea4ce8c2a053396eb0ba26f --- /dev/null +++ b/checkpoints/IBPP_M_projectors/T/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2e5ee0ca99f0070ce6660c46e20829e615db8b2d3a0010ac64d1b830808951 +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/TV/best.pt b/checkpoints/IBPP_M_projectors/TV/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..cec3b3c2a30703349dacdca381f57ee39c49b6a6 --- /dev/null +++ b/checkpoints/IBPP_M_projectors/TV/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5482c7a73ca56b9b16b746d61c03b20279cebf049a9c00403577b2ab0adebddb +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/V/best.pt b/checkpoints/IBPP_M_projectors/V/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..4a76a8c5aee93e91e5418069afbd9798d3172f47 --- /dev/null +++ b/checkpoints/IBPP_M_projectors/V/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86bb1c9bd9cc69db0447404482bba2fe7d5817db4b68cf5a5540e805563fd844 +size 12647819 diff --git a/checkpoints/IBPP_M_projectors/mix/best.pt b/checkpoints/IBPP_M_projectors/mix/best.pt new file mode 100644 index 0000000000000000000000000000000000000000..4f281beb40ac78a45ba045d9924df1192e03ba03 --- /dev/null +++ b/checkpoints/IBPP_M_projectors/mix/best.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df6e05d23288d534b5a8ee4eeedd27cfda6fb98cfd211848744fc38a12cb3ab4 +size 12647819 diff --git a/checkpoints/InternVL-14B-224px/README.md b/checkpoints/InternVL-14B-224px/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f751b1c5add33767560c4a4641637fbec9cdcd53 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/README.md @@ -0,0 +1,123 @@ +--- +license: mit +datasets: +- laion/laion2B-en +- laion/laion-coco +- laion/laion2B-multi +- kakaobrain/coyo-700m +- conceptual_captions +- wanng/wukong100m +--- + +# Model Card for InternVL-14B-224px + +## What is InternVL? + +\[[Paper](https://arxiv.org/abs/2312.14238)\] \[[GitHub](https://github.com/OpenGVLab/InternVL)\] + +InternVL scales up the ViT to _**6B parameters**_ and aligns it with LLM. + +It is _**the largest open-source vision/vision-language foundation model (14B)**_ to date, achieving _**32 state-of-the-art**_ performances on a wide range of tasks such as visual perception, cross-modal retrieval, multimodal dialogue, etc. + + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/64119264f0f81eb569e0d569/f1jTYvyxyYbHRalvgtKY2.png) + + +## Model Details +- **Model Type:** vision-language foundation model +- **Model Stats:** + - Params: 14B + - Image size: 224 x 224 +- **Pretrain Dataset:** LAION-en, LAION-COCO, COYO, CC12M, CC3M, SBU, Wukong, LAION-multi + +## Model Usage + +```python +import torch +from PIL import Image +from transformers import AutoModel, CLIPImageProcessor +from transformers import AutoTokenizer + + +model = AutoModel.from_pretrained( + 'OpenGVLab/InternVL-14B-224px', + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + trust_remote_code=True).cuda().eval() + +image_processor = CLIPImageProcessor.from_pretrained('OpenGVLab/InternVL-14B-224px') + +tokenizer = AutoTokenizer.from_pretrained( + 'OpenGVLab/InternVL-14B-224px', use_fast=False, add_eos_token=True) +tokenizer.pad_token_id = 0 # set pad_token_id to 0 + +images = [ + Image.open('./examples/image1.jpg').convert('RGB'), + Image.open('./examples/image2.jpg').convert('RGB'), + Image.open('./examples/image3.jpg').convert('RGB') +] +prefix = 'summarize:' +texts = [ + prefix + 'a photo of a red panda', # English + prefix + '一张熊猫的照片', # Chinese + prefix + '二匹の猫の写真' # Japanese +] + +pixel_values = image_processor(images=images, return_tensors='pt').pixel_values +pixel_values = pixel_values.to(torch.bfloat16).cuda() +input_ids = tokenizer(texts, return_tensors='pt', max_length=80, + truncation=True, padding='max_length').input_ids.cuda() + +# InternVL-C +logits_per_image, logits_per_text = model( + image=pixel_values, text=input_ids, mode='InternVL-C') +probs = logits_per_image.softmax(dim=-1) +# tensor([[9.9609e-01, 5.2185e-03, 6.0070e-08], +# [2.2949e-02, 9.7656e-01, 5.9903e-06], +# [3.2932e-06, 7.4863e-05, 1.0000e+00]], device='cuda:0', +# dtype=torch.bfloat16, grad_fn=) + +# InternVL-G +logits_per_image, logits_per_text = model( + image=pixel_values, text=input_ids, mode='InternVL-G') +probs = logits_per_image.softmax(dim=-1) +# tensor([[9.9609e-01, 3.1738e-03, 3.6322e-08], +# [8.6060e-03, 9.9219e-01, 2.8759e-06], +# [1.7583e-06, 3.1233e-05, 1.0000e+00]], device='cuda:0', +# dtype=torch.bfloat16, grad_fn=) + +# please set add_eos_token to False for generation +tokenizer.add_eos_token = False +image = Image.open('./examples/image1.jpg').convert('RGB') +pixel_values = image_processor(images=image, return_tensors='pt').pixel_values +pixel_values = pixel_values.to(torch.bfloat16).cuda() + +tokenized = tokenizer("English caption:", return_tensors='pt') +pred = model.generate( + pixel_values=pixel_values, + input_ids=tokenized.input_ids.cuda(), + attention_mask=tokenized.attention_mask.cuda(), + num_beams=5, + min_new_tokens=8, +) +caption = tokenizer.decode(pred[0].cpu(), skip_special_tokens=True).strip() +# English caption: a red panda sitting on top of a wooden platform +``` + +## Citation + +If you find this project useful in your research, please consider cite: + +```BibTeX +@article{chen2023internvl, + title={InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks}, + author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and Li, Bin and Luo, Ping and Lu, Tong and Qiao, Yu and Dai, Jifeng}, + journal={arXiv preprint arXiv:2312.14238}, + year={2023} +} +``` + + +## Acknowledgement + +InternVL is built with reference to the code of the following projects: [OpenAI CLIP](https://github.com/openai/CLIP), [Open CLIP](https://github.com/mlfoundations/open_clip), [CLIP Benchmark](https://github.com/LAION-AI/CLIP_benchmark), [EVA](https://github.com/baaivision/EVA/tree/master), [InternImage](https://github.com/OpenGVLab/InternImage), [ViT-Adapter](https://github.com/czczup/ViT-Adapter), [MMSegmentation](https://github.com/open-mmlab/mmsegmentation), [Transformers](https://github.com/huggingface/transformers), [DINOv2](https://github.com/facebookresearch/dinov2), [BLIP-2](https://github.com/salesforce/LAVIS/tree/main/projects/blip2), [Qwen-VL](https://github.com/QwenLM/Qwen-VL/tree/master/eval_mm), and [LLaVA-1.5](https://github.com/haotian-liu/LLaVA). Thanks for their awesome work! diff --git a/checkpoints/InternVL-14B-224px/__init__.py b/checkpoints/InternVL-14B-224px/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cac6801a1b33d20b298080d24e9998e456b36874 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/__init__.py @@ -0,0 +1,87 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2023 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torchvision.transforms as T +from torchvision.transforms import InterpolationMode +from transformers import LlamaTokenizer + +from .configuration_intern_vit import InternVisionConfig +from .configuration_internvl import InternVLConfig +from .modeling_intern_vit import InternVisionModel +from .modeling_internvl import InternVL_C, InternVL_G, InternVLModel + +__all__ = ['InternVisionConfig', 'InternVisionModel', 'InternVLConfig', + 'InternVLModel', 'InternVL_C', 'InternVL_G'] + + +# Prefix the text "summarize:" +class InternVLTokenizer(nn.Module): + def __init__(self, model_path): + super(InternVLTokenizer, self).__init__() + self.tokenizer = LlamaTokenizer.from_pretrained(model_path) + self.tokenizer.pad_token = ' ' # allow padding + self.tokenizer.add_eos_token = True + + def forward(self, text, prefix='summarize:'): + if type(text) == str: + text = prefix + text + elif type(text) == list: + text = [prefix + item for item in text] + text = self.tokenizer(text, return_tensors='pt', max_length=80, truncation=True, padding='max_length').input_ids + return text + + +def build_transform(task, image_size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): + if task == 'retrieval': + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=mean, std=std)]) + else: + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize(image_size, interpolation=InterpolationMode.BICUBIC), + T.CenterCrop(image_size), + T.ToTensor(), + T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) + return transform + + +def load_internvl_c_huggingface(ckpt_path, device, task): + model = InternVL_C.from_pretrained(ckpt_path, torch_dtype=torch.float16).to(device) + if model.config.use_backbone_lora: + model.vision_model.merge_and_unload() + model.vision_model = model.vision_model.model + if model.config.use_qllama_lora: + model.qllama.merge_and_unload() + model.qllama = model.qllama.model + if model.config.force_image_size is not None: + image_size = model.config.force_image_size + else: + image_size = model.config.vision_config.image_size + transform = build_transform(task, image_size) + tokenizer = InternVLTokenizer(ckpt_path) + return model, transform, tokenizer + + +def load_internvl_g_huggingface(ckpt_path, device, task): + model = InternVL_G.from_pretrained(ckpt_path, torch_dtype=torch.float16).to(device) + if model.config.use_backbone_lora: + model.vision_model.merge_and_unload() + model.vision_model = model.vision_model.model + if model.config.use_qllama_lora: + model.qllama.merge_and_unload() + model.qllama = model.qllama.model + if model.config.force_image_size is not None: + image_size = model.config.force_image_size + else: + image_size = model.config.vision_config.image_size + transform = build_transform(task, image_size) + tokenizer = InternVLTokenizer(ckpt_path) + return model, transform, tokenizer diff --git a/checkpoints/InternVL-14B-224px/config.json b/checkpoints/InternVL-14B-224px/config.json new file mode 100644 index 0000000000000000000000000000000000000000..773bceef8764c2eab3a1f3c89b7576c854801940 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/config.json @@ -0,0 +1,190 @@ +{ + "_commit_hash": null, + "_name_or_path": "./", + "architectures": [ + "InternVLModel" + ], + "auto_map": { + "AutoConfig": "configuration_internvl.InternVLConfig", + "AutoModel": "modeling_internvl.InternVLModel" + }, + "attn_pool_num_heads": 16, + "clip_embed_dim": 768, + "force_image_size": null, + "hidden_size": 4096, + "initializer_range": 0.02, + "label_smoothing": 0.0, + "max_txt_len": 32, + "model_type": "internvl", + "num_query_token": 96, + "qllama_config": { + "_name_or_path": "", + "add_cross_attention": false, + "architectures": [ + "LlamaForCausalLM" + ], + "bad_words_ids": null, + "begin_suppress_tokens": null, + "bos_token_id": 1, + "chunk_size_feed_forward": 0, + "cross_attention_frequency": 2, + "cross_attention_hidden_size": null, + "decoder_start_token_id": null, + "diversity_penalty": 0.0, + "do_sample": false, + "early_stopping": false, + "encoder_no_repeat_ngram_size": 0, + "eos_token_id": 2, + "exponential_decay_length_penalty": null, + "finetuning_task": null, + "forced_bos_token_id": null, + "forced_eos_token_id": null, + "hidden_act": "silu", + "hidden_size": 4096, + "id2label": { + "0": "LABEL_0", + "1": "LABEL_1" + }, + "initializer_range": 0.02, + "intermediate_size": 11008, + "is_decoder": false, + "is_encoder_decoder": false, + "label2id": { + "LABEL_0": 0, + "LABEL_1": 1 + }, + "length_penalty": 1.0, + "max_length": 20, + "max_position_embeddings": 2048, + "max_sequence_length": 2048, + "min_length": 0, + "model_type": "llama", + "no_repeat_ngram_size": 0, + "num_attention_heads": 32, + "num_beam_groups": 1, + "num_beams": 1, + "num_hidden_layers": 32, + "num_key_value_heads": 32, + "num_query_token": 96, + "num_return_sequences": 1, + "output_attentions": false, + "output_hidden_states": false, + "output_scores": false, + "pad_token_id": 0, + "prefix": null, + "pretraining_tp": 1, + "problem_type": null, + "pruned_heads": {}, + "remove_invalid_values": false, + "repetition_penalty": 1.0, + "return_dict": true, + "return_dict_in_generate": false, + "rms_norm_eps": 1e-06, + "rope_scaling": null, + "sep_token_id": null, + "suppress_tokens": null, + "task_specific_params": null, + "temperature": 1.0, + "tf_legacy_loss": false, + "tie_encoder_decoder": false, + "tie_word_embeddings": false, + "tokenizer_class": null, + "top_k": 50, + "top_p": 1.0, + "torch_dtype": "float16", + "torchscript": false, + "transformers_version": "4.32.0", + "typical_p": 1.0, + "use_bfloat16": false, + "use_cache": false, + "vocab_size": 49954 + }, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": null, + "use_backbone_lora": 0, + "use_cache": false, + "use_decoder_only_language_model": true, + "use_qllama_lora": 0, + "vision_config": { + "_name_or_path": "", + "add_cross_attention": false, + "architectures": null, + "attention_dropout": 0.0, + "bad_words_ids": null, + "begin_suppress_tokens": null, + "bos_token_id": null, + "chunk_size_feed_forward": 0, + "cross_attention_hidden_size": null, + "decoder_start_token_id": null, + "diversity_penalty": 0.0, + "do_sample": false, + "drop_path_rate": 0.0, + "dropout": 0.0, + "early_stopping": false, + "encoder_no_repeat_ngram_size": 0, + "eos_token_id": null, + "exponential_decay_length_penalty": null, + "finetuning_task": null, + "forced_bos_token_id": null, + "forced_eos_token_id": null, + "hidden_act": "gelu", + "hidden_size": 3200, + "id2label": { + "0": "LABEL_0", + "1": "LABEL_1" + }, + "image_size": 224, + "initializer_factor": 0.1, + "initializer_range": 1e-10, + "intermediate_size": 12800, + "is_decoder": false, + "is_encoder_decoder": false, + "label2id": { + "LABEL_0": 0, + "LABEL_1": 1 + }, + "layer_norm_eps": 1e-06, + "length_penalty": 1.0, + "max_length": 20, + "min_length": 0, + "model_type": "intern_vit_6b", + "no_repeat_ngram_size": 0, + "num_attention_heads": 25, + "num_beam_groups": 1, + "num_beams": 1, + "num_channels": 3, + "num_hidden_layers": 48, + "num_return_sequences": 1, + "output_attentions": false, + "output_hidden_states": false, + "output_scores": false, + "pad_token_id": null, + "patch_size": 14, + "prefix": null, + "problem_type": null, + "pruned_heads": {}, + "qk_normalization": true, + "qkv_bias": false, + "remove_invalid_values": false, + "repetition_penalty": 1.0, + "return_dict": true, + "return_dict_in_generate": false, + "sep_token_id": null, + "suppress_tokens": null, + "task_specific_params": null, + "temperature": 1.0, + "tf_legacy_loss": false, + "tie_encoder_decoder": false, + "tie_word_embeddings": true, + "tokenizer_class": null, + "top_k": 50, + "top_p": 1.0, + "torch_dtype": null, + "torchscript": false, + "transformers_version": "4.32.0", + "typical_p": 1.0, + "use_bfloat16": false, + "use_flash_attn": true + } +} diff --git a/checkpoints/InternVL-14B-224px/configuration_intern_vit.py b/checkpoints/InternVL-14B-224px/configuration_intern_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..b85920fa23f29725600cff5810020a9a906954d1 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/configuration_intern_vit.py @@ -0,0 +1,117 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2023 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +import os +from typing import Union + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class InternVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to + instantiate a vision encoder according to the specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + Number of color channels in the input images (e.g., 3 for RGB). + patch_size (`int`, *optional*, defaults to 14): + The size (resolution) of each patch. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + qkv_bias (`bool`, *optional*, defaults to `False`): + Whether to add a bias to the queries and values in the self-attention layers. + hidden_size (`int`, *optional*, defaults to 3200): + Dimensionality of the encoder layers and the pooler layer. + num_attention_heads (`int`, *optional*, defaults to 25): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 12800): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + qk_normalization (`bool`, *optional*, defaults to `True`): + Whether to normalize the queries and keys in the self-attention layers. + num_hidden_layers (`int`, *optional*, defaults to 48): + Number of hidden layers in the Transformer encoder. + use_flash_attn (`bool`, *optional*, defaults to `True`): + Whether to use flash attention mechanism. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-6): + The epsilon used by the layer normalization layers. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + drop_path_rate (`float`, *optional*, defaults to 0.0): + Dropout rate for stochastic depth. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 0.1): + A factor for layer scale. + """ + + model_type = 'intern_vit_6b' + + def __init__( + self, + num_channels=3, + patch_size=14, + image_size=224, + qkv_bias=False, + hidden_size=3200, + num_attention_heads=25, + intermediate_size=12800, + qk_normalization=True, + num_hidden_layers=48, + use_flash_attn=True, + hidden_act='gelu', + layer_norm_eps=1e-6, + dropout=0.0, + drop_path_rate=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=0.1, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.drop_path_rate = drop_path_rate + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.qkv_bias = qkv_bias + self.qk_normalization = qk_normalization + self.use_flash_attn = use_flash_attn + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + if 'vision_config' in config_dict: + config_dict = config_dict['vision_config'] + + if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' + ) + + return cls.from_dict(config_dict, **kwargs) diff --git a/checkpoints/InternVL-14B-224px/configuration_internvl.py b/checkpoints/InternVL-14B-224px/configuration_internvl.py new file mode 100644 index 0000000000000000000000000000000000000000..15f402489c3b51785872a73e6ebc67fde6f1185a --- /dev/null +++ b/checkpoints/InternVL-14B-224px/configuration_internvl.py @@ -0,0 +1,108 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2023 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +import copy + +from transformers import LlamaConfig +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +from .configuration_intern_vit import InternVisionConfig + +logger = logging.get_logger(__name__) + + +class InternVLConfig(PretrainedConfig): + r""" + [`InternVLConfig`] is the configuration class to store the configuration of a + [`InternVLModel`]. It is used to instantiate a InternVLModel according to the specified + arguments, defining the InternViT-6B and QLLaMA configs. Instantiating a configuration with + the defaults will yield a similar configuration to that of the InternVL architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`InternVisionConfig`]. + qllama_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`LLaMAConfig`]. + clip_embed_dim (`int`, *optional*, defaults to 768): + Size of the embeddings from the CLIP model. + attn_pool_num_heads (`int`, *optional*, defaults to 16): + Number of attention heads used in the attention pooling layers. + num_query_token (`int`, *optional*, defaults to 96): + Number of query tokens used in the transformer. + label_smoothing (`float`, *optional*, defaults to 0.0): + The amount of label smoothing to apply. + cross_attention_frequency (`int`, *optional*, defaults to 2): + The frequency of cross-attention layers in the model. + use_backbone_lora (`int`, *optional*, defaults to 0): + If non-zero, indicates the use of LoRA in the backbone of the model. + use_qllama_lora (`int`, *optional*, defaults to 0): + If non-zero, indicates the use of LoRA in the QLLaMA of the model. + force_image_size (`int` or `None`, *optional*): + If not None, forces the model to use this specific image size. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + kwargs (*optional*): + Dictionary of additional keyword arguments. + """ + + model_type = 'internvl' + is_composition = True + + def __init__( + self, + vision_config=None, + qllama_config=None, + clip_embed_dim=768, + attn_pool_num_heads=16, + num_query_token=96, + label_smoothing=0.0, + cross_attention_frequency=2, + use_backbone_lora=0, + use_qllama_lora=0, + force_image_size=None, + initializer_range=0.02, + **kwargs): + super().__init__(**kwargs) + + if vision_config is None: + vision_config = {} + logger.info('vision_config is None. initializing the InternVisionConfig with default values.') + + if qllama_config is None: + qllama_config = {} + logger.info( + 'qllama_config is None. Initializing the InternTextConfig config with default values (`LlamaConfig`).') + + self.vision_config = InternVisionConfig(**vision_config) + self.qllama_config = LlamaConfig(**qllama_config) + self.qllama_config.num_query_token = num_query_token + self.qllama_config.cross_attention_frequency = cross_attention_frequency + self.hidden_size = self.qllama_config.hidden_size + + self.clip_embed_dim = clip_embed_dim + self.attn_pool_num_heads = attn_pool_num_heads + self.num_query_token = num_query_token + self.label_smoothing = label_smoothing + self.use_backbone_lora = use_backbone_lora + self.use_qllama_lora = use_qllama_lora + self.force_image_size = force_image_size + self.initializer_range = initializer_range + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output['vision_config'] = self.vision_config.to_dict() + output['qllama_config'] = self.qllama_config.to_dict() + output['model_type'] = self.__class__.model_type + return output diff --git a/checkpoints/InternVL-14B-224px/flash_attention.py b/checkpoints/InternVL-14B-224px/flash_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..7cda9bfadd290da35bdd04cccd51725e2d419c2f --- /dev/null +++ b/checkpoints/InternVL-14B-224px/flash_attention.py @@ -0,0 +1,76 @@ +# https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py +import torch +import torch.nn as nn +from einops import rearrange + +try: # v1 + from flash_attn.flash_attn_interface import \ + flash_attn_unpadded_qkvpacked_func +except: # v2 + from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func + +from flash_attn.bert_padding import pad_input, unpad_input + + +class FlashAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.0) + """ + + def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): + super().__init__() + self.softmax_scale = softmax_scale + self.dropout_p = attention_dropout + + def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None, + max_s=None, need_weights=False): + """Implements the multihead softmax attention. + Arguments + --------- + qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None + if unpadded: (nnz, 3, h, d) + key_padding_mask: a bool tensor of shape (B, S) + """ + assert not need_weights + assert qkv.dtype in [torch.float16, torch.bfloat16] + assert qkv.is_cuda + + if cu_seqlens is None: + batch_size = qkv.shape[0] + seqlen = qkv.shape[1] + if key_padding_mask is None: + qkv = rearrange(qkv, 'b s ... -> (b s) ...') + max_s = seqlen + cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, + device=qkv.device) + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + else: + nheads = qkv.shape[-2] + x = rearrange(qkv, 'b s three h d -> b s (three h d)') + x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) + x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads) + output_unpad = flash_attn_unpadded_qkvpacked_func( + x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), + indices, batch_size, seqlen), + 'b s (h d) -> b s h d', h=nheads) + else: + assert max_s is not None + output = flash_attn_unpadded_qkvpacked_func( + qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + + return output, None diff --git a/checkpoints/InternVL-14B-224px/modeling_intern_vit.py b/checkpoints/InternVL-14B-224px/modeling_intern_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..325116244ac252b1fbc5af211f1d74b3920be89f --- /dev/null +++ b/checkpoints/InternVL-14B-224px/modeling_intern_vit.py @@ -0,0 +1,342 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2023 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from einops import rearrange +from timm.models.layers import DropPath +from torch import nn +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutput, + BaseModelOutputWithPooling) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging + +from .configuration_intern_vit import InternVisionConfig + +try: + from .flash_attention import FlashAttention + has_flash_attn = True +except: + print('FlashAttention is not installed.') + has_flash_attn = False + + +logger = logging.get_logger(__name__) + + +class InternRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +try: + from apex.normalization import FusedRMSNorm + + InternRMSNorm = FusedRMSNorm # noqa + + logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm') +except ImportError: + # using the normal InternRMSNorm + pass +except Exception: + logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm') + pass + + +class InternVisionEmbeddings(nn.Module): + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter( + torch.randn(1, 1, self.embed_dim), + ) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + + self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding.to(target_dtype) + return embeddings + + +class InternAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.use_flash_attn = config.use_flash_attn and has_flash_attn + if config.use_flash_attn and not has_flash_attn: + print('Warning: Flash Attention is not available, use_flash_attn is set to False.') + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:' + f' {self.num_heads}).' + ) + + self.scale = self.head_dim ** -0.5 + self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias) + self.attn_drop = nn.Dropout(config.attention_dropout) + self.proj_drop = nn.Dropout(config.dropout) + + self.qk_normalization = config.qk_normalization + + if self.qk_normalization: + self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + + if self.use_flash_attn: + self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout) + self.proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _naive_attn(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + if self.qk_normalization: + B_, H_, N_, D_ = q.shape + q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) + k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2) + + attn = ((q * self.scale) @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def _flash_attn(self, x, key_padding_mask=None, need_weights=False): + qkv = self.qkv(x) + qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads) + + if self.qk_normalization: + q, k, v = qkv.unbind(2) + q = self.q_norm(q.flatten(-2, -1)).view(q.shape) + k = self.k_norm(k.flatten(-2, -1)).view(k.shape) + qkv = torch.stack([q, k, v], dim=2) + + context, _ = self.inner_attn( + qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False + ) + outs = self.proj(rearrange(context, 'b s h d -> b s (h d)')) + outs = self.proj_drop(outs) + return outs + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states) + return x + + +class InternMLP(nn.Module): + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + self.act = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class InternVisionEncoderLayer(nn.Module): + def __init__(self, config: InternVisionConfig, drop_path_rate: float): + super().__init__() + self.embed_dim = config.hidden_size + self.intermediate_size = config.intermediate_size + + self.attn = InternAttention(config) + self.mlp = InternMLP(config) + self.norm1 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + self.norm2 = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps) + + self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) + self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim)) + self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward( + self, + hidden_states: torch.Tensor, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: + """ + Args: + hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)` + """ + hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1) + + hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2) + + return hidden_states + + +class InternVisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`InternEncoderLayer`]. + + Args: + config (`InternConfig`): + The corresponding vision configuration for the `InternEncoder`. + """ + + def __init__(self, config: InternVisionConfig): + super().__init__() + self.config = config + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] + self.layers = nn.ModuleList([ + InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)]) + self.gradient_checkpointing = True + + def forward( + self, + inputs_embeds, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Embedded representation of the inputs. Should be float, not int tokens. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + hidden_states = inputs_embeds + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = torch.utils.checkpoint.checkpoint( + encoder_layer, + hidden_states) + else: + layer_outputs = encoder_layer( + hidden_states, + ) + hidden_states = layer_outputs + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states + ) + + +class InternVisionModel(PreTrainedModel): + main_input_name = 'pixel_values' + config_class = InternVisionConfig + + def __init__(self, config: InternVisionConfig): + super().__init__(config) + self.config = config + + self.embeddings = InternVisionEmbeddings(config) + self.encoder = InternVisionEncoder(config) + + def resize_pos_embeddings(self, old_size, new_size, patch_size): + pos_emb = self.embeddings.position_embedding + _, num_positions, embed_dim = pos_emb.shape + cls_emb = pos_emb[:, :1, :] + pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2) + pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False) + pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1) + pos_emb = torch.cat([cls_emb, pos_emb], dim=1) + self.embeddings.position_embedding = nn.Parameter(pos_emb) + logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size)) + + def get_input_embeddings(self): + return self.embeddings + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_embeds: Optional[torch.FloatTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None and pixel_embeds is None: + raise ValueError('You have to specify pixel_values or pixel_embeds') + + if pixel_embeds is not None: + hidden_states = pixel_embeds + else: + if len(pixel_values.shape) == 4: + hidden_states = self.embeddings(pixel_values) + else: + raise ValueError(f'wrong pixel_values size: {pixel_values.shape}') + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs.last_hidden_state + pooled_output = last_hidden_state[:, 0, :] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) diff --git a/checkpoints/InternVL-14B-224px/modeling_internvl.py b/checkpoints/InternVL-14B-224px/modeling_internvl.py new file mode 100644 index 0000000000000000000000000000000000000000..f8610e7094fd4494a9a0bcc6cd540c4f16aed824 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/modeling_internvl.py @@ -0,0 +1,519 @@ +# -------------------------------------------------------- +# InternVL +# Copyright (c) 2023 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +from functools import partial +from typing import Optional + +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from peft import LoraConfig, get_peft_model +from timm.models.layers import DropPath +from torch import nn +from transformers import GenerationConfig +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging + +from .configuration_internvl import InternVLConfig +from .modeling_intern_vit import (InternVisionEmbeddings, InternVisionEncoder, + InternVisionModel) +from .modeling_qllama import LlamaForCausalLM, _expand_mask, _make_causal_mask + +try: + from .flash_attention import FlashAttention # v1/v2 +except: + print('FlashAttention is not installed.') + +logger = logging.get_logger(__name__) + + +class InternVLPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = InternVLConfig + base_model_prefix = 'internvl' + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_missing = [ + r'position_ids', + ] + _no_split_modules = ['InternAttention', 'LlamaDecoderLayer', 'LlamaForCausalLM'] + _skip_keys_device_placement = 'past_key_values' + _keep_in_fp32_modules = ['wo'] + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_range + if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=factor) + if hasattr(module, 'bias') and module.bias is not None: + module.bias.data.zero_() + if isinstance(module, InternVisionEmbeddings): + if hasattr(self.config, 'vision_config'): + factor = self.config.vision_config.initializer_range + nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) + nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, InternVisionModel): + module.gradient_checkpointing = value + if isinstance(module, InternVisionEncoder): + module.gradient_checkpointing = value + + +class CrossAttention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., attn_head_dim=None, out_dim=None): + super().__init__() + if out_dim is None: + out_dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + assert all_head_dim == dim + + self.q = nn.Linear(dim, all_head_dim, bias=False) + self.k = nn.Linear(dim, all_head_dim, bias=False) + self.v = nn.Linear(dim, all_head_dim, bias=False) + + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.k_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, out_dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, k=None, v=None): + B, N, C = x.shape + N_k = k.shape[1] + N_v = v.shape[1] + + q_bias, k_bias, v_bias = None, None, None + if self.q_bias is not None: + q_bias = self.q_bias + k_bias = self.k_bias + v_bias = self.v_bias + + q = F.linear(input=x, weight=self.q.weight, bias=q_bias) + q = q.reshape(B, N, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0) # (B, N_head, N_q, dim) + + k = F.linear(input=k, weight=self.k.weight, bias=k_bias) + k = k.reshape(B, N_k, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0) + + v = F.linear(input=v, weight=self.v.weight, bias=v_bias) + v = v.reshape(B, N_v, 1, self.num_heads, -1).permute(2, 0, 3, 1, 4).squeeze(0) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) # (B, N_head, N_q, N_k) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class AttentiveBlock(nn.Module): + + def __init__(self, dim, num_heads, qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, attn_head_dim=None, out_dim=None): + super().__init__() + + self.norm1_q = norm_layer(dim) + self.norm1_k = norm_layer(dim) + self.norm1_v = norm_layer(dim) + self.cross_attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, + proj_drop=drop, attn_head_dim=attn_head_dim, out_dim=out_dim) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x_q, x_kv, pos_q, pos_k, bool_masked_pos, rel_pos_bias=None): + x_q = self.norm1_q(x_q + pos_q) + x_k = self.norm1_k(x_kv + pos_k) + x_v = self.norm1_v(x_kv) + x = self.cross_attn(x_q, k=x_k, v=x_v) + + return x + + +class AttentionPoolingBlock(AttentiveBlock): + + def forward(self, x): + x_q = x.mean(1, keepdim=True) + x_kv, pos_q, pos_k = x, 0, 0 + x = super().forward(x_q, x_kv, pos_q, pos_k, bool_masked_pos=None, rel_pos_bias=None) + x = x.squeeze(1) + return x + + +class InternVLModel(InternVLPreTrainedModel): + config_class = InternVLConfig + main_input_name = 'pixel_values' + + def __init__(self, config: InternVLConfig): + super().__init__(config) + + text_hidden_size = config.qllama_config.hidden_size + vision_hidden_size = config.vision_config.hidden_size + clip_embed_dim = config.clip_embed_dim + attn_pool_num_heads = config.attn_pool_num_heads + config.qllama_config.num_query_token = config.num_query_token + self.num_query_token = config.num_query_token + self.label_smoothing = config.label_smoothing + + self.vision_model = InternVisionModel(config.vision_config) # frozen + self.qllama = LlamaForCausalLM(config.qllama_config) # frozen + self.query_tokens = nn.Parameter( # trainable + torch.zeros(1, config.num_query_token, text_hidden_size) + ) + + self.text_projection = nn.Parameter(torch.empty(text_hidden_size, clip_embed_dim)) # frozen + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) # trainable + self.clip_projector = AttentionPoolingBlock( # frozen + dim=vision_hidden_size, num_heads=attn_pool_num_heads, qkv_bias=True, qk_scale=None, + drop=0., attn_drop=0., norm_layer=partial(nn.LayerNorm, eps=1e-5), out_dim=clip_embed_dim) + self.clip_projector2 = AttentionPoolingBlock( # trainable + dim=text_hidden_size, num_heads=attn_pool_num_heads, qkv_bias=True, qk_scale=None, + drop=0., attn_drop=0., norm_layer=partial(nn.LayerNorm, eps=1e-5), out_dim=clip_embed_dim) + self.itm_head = nn.Linear(text_hidden_size, 2) # trainable + self.gradient_checkpointing = True + + # Initialize weights and apply final processing + # self.post_init() + + if config.use_backbone_lora: + self.wrap_backbone_lora(r=config.use_backbone_lora) + if config.use_qllama_lora: + self.wrap_qllama_lora(r=config.use_qllama_lora) + if config.force_image_size: + self.vision_model.resize_pos_embeddings( + old_size=config.vision_config.image_size, + new_size=config.force_image_size, + patch_size=config.vision_config.patch_size + ) + + def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + lora_config = LoraConfig( + r=r, + target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'], + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + self.vision_model = get_peft_model(self.vision_model, lora_config) + self.vision_model.print_trainable_parameters() + + def wrap_qllama_lora(self, r=128, lora_alpha=256, lora_dropout=0.05): + lora_config = LoraConfig( + r=r, + target_modules=['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj', + 'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj'], + lora_alpha=lora_alpha, + lora_dropout=lora_dropout, + ) + self.qllama = get_peft_model(self.qllama, lora_config) + self.qllama.print_trainable_parameters() + + def get_input_embeddings(self): + return self.qllama.get_input_embeddings() + + def set_input_embeddings(self, value): + self.qllama.set_input_embeddings(value) + + def set_output_embeddings(self, new_embeddings): + self.qllama.set_output_embeddings(new_embeddings) + + def get_output_embeddings(self) -> nn.Module: + return self.qllama.get_output_embeddings() + + @torch.no_grad() + def generate( + self, + pixel_values: torch.FloatTensor, + input_ids: torch.FloatTensor, + attention_mask: torch.LongTensor, + generation_config: Optional[GenerationConfig] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **generate_kwargs, + ) -> torch.LongTensor: + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict) + image_embeds = vision_outputs[0] + + batch_size = image_embeds.shape[0] + input_embeds = self.get_input_embeddings()(input_ids) + query_tokens = self.query_tokens.repeat(batch_size, 1, 1) + input_embeds = torch.cat([query_tokens, input_embeds], dim=1) + image_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) + attention_mask = torch.cat([image_attention_mask, attention_mask], dim=1) + + outputs = self.qllama.generate( + inputs_embeds=input_embeds, + attention_mask=attention_mask, + vision_hidden_states=image_embeds, + generation_config=generation_config, + use_zero_attention_mask=True, + **generate_kwargs, + ) + + return outputs + + def get_text_features( + self, + input_ids: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`): + The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that + contains the language model logits, the past key values and the hidden states if + `output_hidden_states=True`. + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + input_embeds = self.get_input_embeddings()(input_ids) + attention_mask = _expand_mask(attention_mask, input_embeds.dtype).to( + input_embeds.device) # [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask += _make_causal_mask( + (attention_mask.shape[0], attention_mask.shape[2]), + input_embeds.dtype, + device=input_embeds.device + ) + if type(self.qllama.model) == LlamaForCausalLM: + outputs = self.qllama.model.model.forward_train( + inputs_embeds=input_embeds, + vision_hidden_states=None, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ).last_hidden_state + else: + outputs = self.qllama.model.forward_train( + inputs_embeds=input_embeds, + vision_hidden_states=None, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ).last_hidden_state + return outputs + + def get_image_features( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict) + image_embeds = vision_outputs[0] + backbone_embeds = image_embeds + + batch_size = image_embeds.shape[0] + input_embeds = self.query_tokens.repeat(batch_size, 1, 1) + + attention_mask = torch.ones(input_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) + attention_mask = _expand_mask(attention_mask, input_embeds.dtype).to( + input_embeds.device) # [bsz, 1, tgt_seq_len, src_seq_len] + if type(self.qllama.model) == LlamaForCausalLM: + outputs = self.qllama.model.model.forward_train( + inputs_embeds=input_embeds, + vision_hidden_states=image_embeds, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ).last_hidden_state + else: + outputs = self.qllama.model.forward_train( + inputs_embeds=input_embeds, + vision_hidden_states=image_embeds, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ).last_hidden_state + return backbone_embeds, outputs + + def encode_image(self, image, mode): + if mode == 'InternVL-C': + vision_outputs = self.vision_model( + pixel_values=image, + output_hidden_states=False, + return_dict=True) + image_embeds = vision_outputs[0] + image_embeds = self.clip_projector(image_embeds) + elif mode == 'InternVL-G': + backbone_embeds, image_embeds = self.get_image_features( + pixel_values=image, + output_hidden_states=False, + return_dict=True, + ) + backbone_embeds = self.clip_projector(backbone_embeds) + image_embeds = self.clip_projector2(image_embeds) + # ensemble + backbone_embeds = backbone_embeds / backbone_embeds.norm(dim=1, keepdim=True) + image_embeds = image_embeds / image_embeds.norm(dim=1, keepdim=True) + image_embeds = image_embeds + backbone_embeds + else: + raise NotImplementedError + return image_embeds + + def encode_text(self, text): + attention_mask = text > 0 + text_embeds = self.get_text_features( + input_ids=text, + attention_mask=attention_mask, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ) + text_embeds = text_embeds[torch.arange(text_embeds.shape[0]), attention_mask.sum(1) - 1] + text_embeds = text_embeds @ self.text_projection + return text_embeds + + def forward(self, image, text, mode='InternVL-C'): + assert mode in ['InternVL-C', 'InternVL-G'], 'mode must be InternVL-C or InternVL-G' + image_features = self.encode_image(image, mode) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + return logits_per_image, logits_per_text + + +class InternVL_C(InternVLModel): + + def encode_image(self, image): + vision_outputs = self.vision_model( + pixel_values=image, + output_hidden_states=False, + return_dict=True) + image_embeds = vision_outputs[0] + image_embeds = self.clip_projector(image_embeds) + return image_embeds + + def encode_text(self, text): + attention_mask = text > 0 + text_embeds = self.get_text_features( + input_ids=text, + attention_mask=attention_mask, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ) + text_embeds = text_embeds[torch.arange(text_embeds.shape[0]), attention_mask.sum(1) - 1] + text_embeds = text_embeds @ self.text_projection + return text_embeds + + def forward(self, image, text): + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + return logits_per_image, logits_per_text + + +class InternVL_G(InternVLModel): + + def encode_image(self, image): + backbone_embeds, image_embeds = self.get_image_features( + pixel_values=image, + output_hidden_states=False, + return_dict=True, + ) + backbone_embeds = self.clip_projector(backbone_embeds) + image_embeds = self.clip_projector2(image_embeds) + # ensemble + backbone_embeds = backbone_embeds / backbone_embeds.norm(dim=1, keepdim=True) + image_embeds = image_embeds / image_embeds.norm(dim=1, keepdim=True) + image_embeds = image_embeds + backbone_embeds + return image_embeds + + def encode_text(self, text): + attention_mask = text > 0 + text_embeds = self.get_text_features( + input_ids=text, + attention_mask=attention_mask, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ) + text_embeds = text_embeds[torch.arange(text_embeds.shape[0]), attention_mask.sum(1) - 1] + text_embeds = text_embeds @ self.text_projection + return text_embeds + + def forward(self, image, text): + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + return logits_per_image, logits_per_text diff --git a/checkpoints/InternVL-14B-224px/modeling_qllama.py b/checkpoints/InternVL-14B-224px/modeling_qllama.py new file mode 100644 index 0000000000000000000000000000000000000000..571863414d4ea0745c849f324e64e52ad1d1fed7 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/modeling_qllama.py @@ -0,0 +1,1073 @@ +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch QLLaMA model.""" +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers import LlamaConfig +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutputWithPast, + CausalLMOutputWithPast) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import (add_start_docstrings, + add_start_docstrings_to_model_forward, logging, + replace_return_docstrings) + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = 'LlamaConfig' + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +try: + from functools import partial + + from apex.normalization import FusedRMSNorm + + LlamaRMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa + print('Discovered apex.normalization.FusedRMSNorm - will use it instead of LlamaRMSNorm') +except ImportError: + # using the normal LlamaRMSNorm + pass +except Exception: + print('discovered apex but it failed to load, falling back to LlamaRMSNorm') + pass + + +class LlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer('inv_freq', inv_freq) + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', emb.cos()[None, None, :, :], persistent=False) + self.register_buffer('sin_cached', emb.sin()[None, None, :, :], persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.register_buffer('cos_cached', emb.cos()[None, None, :, :], persistent=False) + self.register_buffer('sin_cached', emb.sin()[None, None, :, :], persistent=False) + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +class FixedLlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + self.inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', emb.cos()[None, None, :, :], persistent=False) + self.register_buffer('sin_cached', emb.sin()[None, None, :, :], persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +LlamaRotaryEmbedding = FixedLlamaRotaryEmbedding + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] + gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) + cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + ): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' + f' and `num_heads`: {self.num_heads}).' + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + # [bsz, nh, t, hd] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f'Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is' + f' {attn_weights.size()}' + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}' + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaCrossAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.max_position_embeddings + self.vision_hidden_size = 3200 + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' + f' and `num_heads`: {self.num_heads}).' + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self.norm1 = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.k_proj = nn.Linear(self.vision_hidden_size, self.num_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.vision_hidden_size, self.num_heads * self.head_dim, bias=False) + self.norm2 = LlamaRMSNorm(self.vision_hidden_size, eps=config.rms_norm_eps) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + vision_hidden_states: torch.Tensor, + repeat_time: int = 1, + attention_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + hidden_states = self.norm1(hidden_states) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + + vision_hidden_states = self.norm2(vision_hidden_states) + + bs_v, kv_len, _ = vision_hidden_states.size() + + key_states = self.k_proj(vision_hidden_states).view( + bs_v, kv_len, self.num_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(vision_hidden_states).view( + bs_v, kv_len, self.num_heads, self.head_dim).transpose(1, 2) + + key_states = key_states.repeat(repeat_time, 1, 1, 1) + value_states = value_states.repeat(repeat_time, 1, 1, 1) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f'Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is' + f' {attn_weights.size()}' + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' + ) + attn_weights = attn_weights + attention_mask + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}' + ) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig, use_cross_attn: bool): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = LlamaAttention(config=config) + self.cross_attn = LlamaCrossAttention(config=config) if use_cross_attn else None + self.mlp = LlamaMLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + self.num_query_token = 96 + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + vision_hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + repeat_time: int = 1, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # when using generate function and cache mode, the size of hidden_states is 1, + # so we should not use cross attention + if self.cross_attn is not None and hidden_states.size(1) >= self.num_query_token \ + and vision_hidden_states is not None: + query_feats = hidden_states[:, :self.num_query_token, :] + text_feats = hidden_states[:, self.num_query_token:, :] + residual = query_feats + query_feats, _, _ = self.cross_attn( + hidden_states=query_feats, + vision_hidden_states=vision_hidden_states, + attention_mask=None, # not use attention mask in cross attention + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + repeat_time=repeat_time, + ) + query_feats = residual + query_feats + hidden_states = torch.cat([query_feats, text_feats], dim=1) + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + 'The bare LLaMA Model outputting raw hidden-states without any specific head on top.', + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = 'model' + supports_gradient_checkpointing = True + _no_split_modules = ['LlamaDecoderLayer'] + _keys_to_ignore_on_load_unexpected = [r'decoder\.version'] + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LlamaModel): + module.gradient_checkpointing = value + if isinstance(module, LlamaDecoderLayer): + module.gradient_checkpointing = value + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + 'The bare LLaMA Model outputting raw hidden-states without any specific head on top.', + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.cross_attention_frequency = config.cross_attention_frequency + self.num_query_token = config.num_query_token + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + use_cross_attn = [idx % self.cross_attention_frequency == 0 for idx in range(config.num_hidden_layers)] + self.layers = nn.ModuleList( + [LlamaDecoderLayer(config, use_cross_attn[idx]) for idx in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.gradient_checkpointing = False + # Initialize weights and apply final processing + # self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + vision_hidden_states: Optional[torch.FloatTensor] = None, + repeat_time: Optional[int] = 1, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + use_zero_attention_mask: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + if use_zero_attention_mask: + attention_mask[:, :, :self.num_query_token, :self.num_query_token] = 0 + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + layer_outputs = decoder_layer( + hidden_states, + vision_hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + repeat_time=repeat_time, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward_train( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + vision_hidden_states: Optional[torch.FloatTensor] = None, + repeat_time: Optional[int] = 1, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + # if attention_mask is None: + # attention_mask = torch.ones( + # (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + # ) + # attention_mask = self._prepare_decoder_attention_mask( + # attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + # ) + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None, repeat_time) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + vision_hidden_states, + attention_mask, + position_ids, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + vision_hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + repeat_time=repeat_time, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class LlamaForCausalLM(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + # self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + vision_hidden_states: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + use_zero_attention_mask: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you consciours? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + vision_hidden_states=vision_hidden_states, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + use_zero_attention_mask=use_zero_attention_mask, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, + vision_hidden_states=None, use_zero_attention_mask=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get('position_ids', None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update( + { + 'position_ids': position_ids, + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + 'vision_hidden_states': vision_hidden_states, + 'use_zero_attention_mask': use_zero_attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/checkpoints/InternVL-14B-224px/preprocessor_config.json b/checkpoints/InternVL-14B-224px/preprocessor_config.json new file mode 100644 index 0000000000000000000000000000000000000000..b1ba81f08f39016c8bbacea558f0d9ed92fd969a --- /dev/null +++ b/checkpoints/InternVL-14B-224px/preprocessor_config.json @@ -0,0 +1,19 @@ +{ + "crop_size": 224, + "do_center_crop": true, + "do_normalize": true, + "do_resize": true, + "feature_extractor_type": "CLIPFeatureExtractor", + "image_mean": [ + 0.485, + 0.456, + 0.406 + ], + "image_std": [ + 0.229, + 0.224, + 0.225 + ], + "resample": 3, + "size": 224 +} diff --git a/checkpoints/InternVL-14B-224px/pytorch_model.bin.index.json b/checkpoints/InternVL-14B-224px/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..70f57ca624bbbae56c311136b13648ba05175318 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/pytorch_model.bin.index.json @@ -0,0 +1,1055 @@ +{ + "metadata": { + "total_size": 27669951494 + }, + "weight_map": { + "clip_projector.cross_attn.k.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.k_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.proj.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.proj.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.q.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.q_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.v.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.cross_attn.v_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_k.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_k.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_q.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_q.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_v.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector.norm1_v.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.k.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.k_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.proj.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.proj.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.q.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.q_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.v.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.cross_attn.v_bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_k.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_k.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_q.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_q.weight": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_v.bias": "pytorch_model-00003-of-00003.bin", + "clip_projector2.norm1_v.weight": "pytorch_model-00003-of-00003.bin", + "itm_head.bias": "pytorch_model-00003-of-00003.bin", + "itm_head.weight": "pytorch_model-00003-of-00003.bin", + "logit_scale": "pytorch_model-00001-of-00003.bin", + "qllama.lm_head.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.embed_tokens.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.0.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.16.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.16.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.16.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.16.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.17.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.2.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.2.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.20.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.28.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.29.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.3.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.30.cross_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.cross_attn.norm1.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.cross_attn.norm2.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.cross_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.cross_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.cross_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.30.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin", + "qllama.model.layers.4.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.4.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.5.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.6.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.7.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.norm1.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.norm2.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.cross_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin", + "qllama.model.norm.weight": "pytorch_model-00003-of-00003.bin", + "query_tokens": "pytorch_model-00001-of-00003.bin", + "text_projection": "pytorch_model-00001-of-00003.bin", + "vision_model.embeddings.class_embedding": "pytorch_model-00001-of-00003.bin", + "vision_model.embeddings.patch_embedding.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.embeddings.patch_embedding.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.embeddings.position_embedding": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.0.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.1.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.10.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.11.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.12.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.13.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.14.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.15.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.16.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.17.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.18.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.19.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.2.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.20.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.21.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.22.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.23.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.24.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.25.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.26.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.27.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.28.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.29.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.3.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.30.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.31.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.32.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.33.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.34.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.35.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.36.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.37.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.38.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.39.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.4.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.40.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.40.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.40.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.40.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.40.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.40.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.41.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.42.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.43.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.44.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.45.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.46.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.attn.k_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.attn.proj.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.attn.proj.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.attn.q_norm.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.attn.qkv.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.ls1": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.ls2": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.mlp.fc1.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.mlp.fc1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.mlp.fc2.bias": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.mlp.fc2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.norm1.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.47.norm2.weight": "pytorch_model-00002-of-00003.bin", + "vision_model.encoder.layers.5.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.5.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.6.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.7.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.8.norm2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.attn.k_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.attn.proj.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.attn.proj.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.attn.q_norm.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.attn.qkv.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.ls1": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.ls2": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.norm1.weight": "pytorch_model-00001-of-00003.bin", + "vision_model.encoder.layers.9.norm2.weight": "pytorch_model-00001-of-00003.bin" + } +} diff --git a/checkpoints/InternVL-14B-224px/special_tokens_map.json b/checkpoints/InternVL-14B-224px/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..3f58a5e115855c6ea3cec98accae196ad927222e --- /dev/null +++ b/checkpoints/InternVL-14B-224px/special_tokens_map.json @@ -0,0 +1,6 @@ +{ + "bos_token": "", + "eos_token": "", + "pad_token": "[PAD]", + "unk_token": "" +} diff --git a/checkpoints/InternVL-14B-224px/tokenizer.model b/checkpoints/InternVL-14B-224px/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..0873389d502cb46810209ad9de6bd6afbe3eea64 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d967e855b1213a439df6c8ce2791f869c84b4f3b6cfacf22b86440b8192a2f8 +size 757972 diff --git a/checkpoints/InternVL-14B-224px/tokenizer_config.json b/checkpoints/InternVL-14B-224px/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..6a5294cb2595f792b3bf4c72362d35456b728e99 --- /dev/null +++ b/checkpoints/InternVL-14B-224px/tokenizer_config.json @@ -0,0 +1,37 @@ +{ + "add_bos_token": true, + "add_eos_token": true, + "bos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "clean_up_tokenization_spaces": false, + "eos_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "legacy": null, + "model_max_length": 1000000000000000019884624838656, + "pad_token": null, + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "LlamaTokenizer", + "unk_token": { + "__type": "AddedToken", + "content": "", + "lstrip": false, + "normalized": true, + "rstrip": false, + "single_word": false + }, + "use_default_system_prompt": true, + "use_fast": false +}