diff --git a/.gitattributes b/.gitattributes
index e1583355c7ae1185bf4c3220fbaec8b7a97d264c..3022d34633af4e2eff2942657b0835250e534d94 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -730,3 +730,4 @@ mplug_owl2/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/tk8.6/demos/images/teapot.ppm filter=lfs diff=lfs merge=lfs -text
mplug_owl2/x86_64-conda-linux-gnu/bin/ld filter=lfs diff=lfs merge=lfs -text
mplug_owl2/x86_64-conda_cos7-linux-gnu/bin/ld filter=lfs diff=lfs merge=lfs -text
+llava_video/bin/python filter=lfs diff=lfs merge=lfs -text
diff --git a/llava_video/bin/python b/llava_video/bin/python
new file mode 100644
index 0000000000000000000000000000000000000000..74a5baaf2e81fcc0b6d75ff455a28dc3e8a666b0
--- /dev/null
+++ b/llava_video/bin/python
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb1beead1165d4a29ee335f42e91675430ca957b4edc44686d5d7cc21cfa6098
+size 17225608
diff --git a/mplug_owl2/share/terminfo/h/h19-us b/mplug_owl2/share/terminfo/h/h19-us
new file mode 100644
index 0000000000000000000000000000000000000000..cbcbac1de175eedcf88f9a355853bbadca1c430c
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/h19-us differ
diff --git a/mplug_owl2/share/terminfo/h/hazel b/mplug_owl2/share/terminfo/h/hazel
new file mode 100644
index 0000000000000000000000000000000000000000..60d191ec601d1cc7487f68683b167c3a141134a7
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hazel differ
diff --git a/mplug_owl2/share/terminfo/h/hft-old b/mplug_owl2/share/terminfo/h/hft-old
new file mode 100644
index 0000000000000000000000000000000000000000..ccf948d90ae5c0d417194bb19c8352bfd0f200ce
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hft-old differ
diff --git a/mplug_owl2/share/terminfo/h/hp2397 b/mplug_owl2/share/terminfo/h/hp2397
new file mode 100644
index 0000000000000000000000000000000000000000..70ea75de59c7d50e6776d9b677fef950a18046ea
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp2397 differ
diff --git a/mplug_owl2/share/terminfo/h/hp2624b-10p b/mplug_owl2/share/terminfo/h/hp2624b-10p
new file mode 100644
index 0000000000000000000000000000000000000000..194ab137f331374fa0e1f11c870f7958d40cc20e
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp2624b-10p differ
diff --git a/mplug_owl2/share/terminfo/h/hp2624b-4p b/mplug_owl2/share/terminfo/h/hp2624b-4p
new file mode 100644
index 0000000000000000000000000000000000000000..b5a829ab86f0b528d7bab60e486d9287757eb8aa
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp2624b-4p differ
diff --git a/mplug_owl2/share/terminfo/h/hp2627c b/mplug_owl2/share/terminfo/h/hp2627c
new file mode 100644
index 0000000000000000000000000000000000000000..61f9e2c20cf79598efbd460f2aed23b923da83c2
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp2627c differ
diff --git a/mplug_owl2/share/terminfo/h/hp9845 b/mplug_owl2/share/terminfo/h/hp9845
new file mode 100644
index 0000000000000000000000000000000000000000..c813f8b87980badade5fa95712caff825d72d5b3
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp9845 differ
diff --git a/mplug_owl2/share/terminfo/h/hp98550 b/mplug_owl2/share/terminfo/h/hp98550
new file mode 100644
index 0000000000000000000000000000000000000000..05110fcea74067c2fe3ff7f9b5dd9cdd359d9dc5
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hp98550 differ
diff --git a/mplug_owl2/share/terminfo/h/hz1510 b/mplug_owl2/share/terminfo/h/hz1510
new file mode 100644
index 0000000000000000000000000000000000000000..96d7e73678efc042caed6bd46a9c4c42e11b4952
Binary files /dev/null and b/mplug_owl2/share/terminfo/h/hz1510 differ
diff --git a/mplug_owl2/share/terminfo/p/p12 b/mplug_owl2/share/terminfo/p/p12
new file mode 100644
index 0000000000000000000000000000000000000000..12de25677be37fc5fee85d23eda474b8cb642755
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p12 differ
diff --git a/mplug_owl2/share/terminfo/p/p12-m b/mplug_owl2/share/terminfo/p/p12-m
new file mode 100644
index 0000000000000000000000000000000000000000..c4dd32141f0efc92e1692a94f55bacf68d0db902
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p12-m differ
diff --git a/mplug_owl2/share/terminfo/p/p12-w b/mplug_owl2/share/terminfo/p/p12-w
new file mode 100644
index 0000000000000000000000000000000000000000..2fe870dbc535c37e247b6f7213e7d89ec4e3879e
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p12-w differ
diff --git a/mplug_owl2/share/terminfo/p/p14 b/mplug_owl2/share/terminfo/p/p14
new file mode 100644
index 0000000000000000000000000000000000000000..485f1bc722fddca07abc271ac66fa3bc4cb6b0d4
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p14 differ
diff --git a/mplug_owl2/share/terminfo/p/p14-m b/mplug_owl2/share/terminfo/p/p14-m
new file mode 100644
index 0000000000000000000000000000000000000000..39dd310720abf005cd3c3561b386b426fbb3a3a4
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p14-m differ
diff --git a/mplug_owl2/share/terminfo/p/p14-m-w b/mplug_owl2/share/terminfo/p/p14-m-w
new file mode 100644
index 0000000000000000000000000000000000000000..5b6475de6eef814393a5f5a8fc3d368625b276f7
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p14-m-w differ
diff --git a/mplug_owl2/share/terminfo/p/p7 b/mplug_owl2/share/terminfo/p/p7
new file mode 100644
index 0000000000000000000000000000000000000000..539b4ff6570496b03f33a1c661ff661137089364
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p7 differ
diff --git a/mplug_owl2/share/terminfo/p/p8gl b/mplug_owl2/share/terminfo/p/p8gl
new file mode 100644
index 0000000000000000000000000000000000000000..aabd61376d58a5c96fc3950012d8646a36bc17e3
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p8gl differ
diff --git a/mplug_owl2/share/terminfo/p/p9-8 b/mplug_owl2/share/terminfo/p/p9-8
new file mode 100644
index 0000000000000000000000000000000000000000..8191a9bd8dc3adefd3fecabeac8265090521e448
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p9-8 differ
diff --git a/mplug_owl2/share/terminfo/p/p9-w b/mplug_owl2/share/terminfo/p/p9-w
new file mode 100644
index 0000000000000000000000000000000000000000..44029ba7f9d33ae39f2d11a5ddbb8d53e8d6874f
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/p9-w differ
diff --git a/mplug_owl2/share/terminfo/p/pc3 b/mplug_owl2/share/terminfo/p/pc3
new file mode 100644
index 0000000000000000000000000000000000000000..08460b72fd4bf74e3189f8b3d806ee8f5d125ce8
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc3 differ
diff --git a/mplug_owl2/share/terminfo/p/pc6300plus b/mplug_owl2/share/terminfo/p/pc6300plus
new file mode 100644
index 0000000000000000000000000000000000000000..8e95f22bfe8a74cb364fb016d0e9d74251253a30
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc6300plus differ
diff --git a/mplug_owl2/share/terminfo/p/pc7300 b/mplug_owl2/share/terminfo/p/pc7300
new file mode 100644
index 0000000000000000000000000000000000000000..b41843d6a40a7d583c1dab1a8ab7da28442fbab6
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pc7300 differ
diff --git a/mplug_owl2/share/terminfo/p/pcansi-25 b/mplug_owl2/share/terminfo/p/pcansi-25
new file mode 100644
index 0000000000000000000000000000000000000000..50440accd578cfbc03bc7c1979af64ebb15ebee4
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-25 differ
diff --git a/mplug_owl2/share/terminfo/p/pcansi-25-m b/mplug_owl2/share/terminfo/p/pcansi-25-m
new file mode 100644
index 0000000000000000000000000000000000000000..eea464609fa627787de94bef3b401d01f63540ae
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-25-m differ
diff --git a/mplug_owl2/share/terminfo/p/pcansi-33-m b/mplug_owl2/share/terminfo/p/pcansi-33-m
new file mode 100644
index 0000000000000000000000000000000000000000..c9b0dcb6076a56225f92ed4578b9c5d164647c82
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi-33-m differ
diff --git a/mplug_owl2/share/terminfo/p/pcansi33 b/mplug_owl2/share/terminfo/p/pcansi33
new file mode 100644
index 0000000000000000000000000000000000000000..77ec07a919bdc8c2553d752f939c2a65198964b1
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcansi33 differ
diff --git a/mplug_owl2/share/terminfo/p/pccon b/mplug_owl2/share/terminfo/p/pccon
new file mode 100644
index 0000000000000000000000000000000000000000..978b4620771e8b49ef8aeaacf07ba8d47a1c71af
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pccon differ
diff --git a/mplug_owl2/share/terminfo/p/pccon+colors b/mplug_owl2/share/terminfo/p/pccon+colors
new file mode 100644
index 0000000000000000000000000000000000000000..8a884fa8f8df4bc873f678ce85a50027dc796f4b
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pccon+colors differ
diff --git a/mplug_owl2/share/terminfo/p/pccon0 b/mplug_owl2/share/terminfo/p/pccon0
new file mode 100644
index 0000000000000000000000000000000000000000..e3553fbf74d37c40fd3e89f469c8b58e2dee703d
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pccon0 differ
diff --git a/mplug_owl2/share/terminfo/p/pcconsole b/mplug_owl2/share/terminfo/p/pcconsole
new file mode 100644
index 0000000000000000000000000000000000000000..5d7c9457abb34e75d0d0c1f5d58b68eefa697978
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcconsole differ
diff --git a/mplug_owl2/share/terminfo/p/pcix b/mplug_owl2/share/terminfo/p/pcix
new file mode 100644
index 0000000000000000000000000000000000000000..fdfbe5174216ff07831532aaa865dcdd345fb93e
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcix differ
diff --git a/mplug_owl2/share/terminfo/p/pckermit12 b/mplug_owl2/share/terminfo/p/pckermit12
new file mode 100644
index 0000000000000000000000000000000000000000..55ac2ccbb1632c5c88bc7368505abad128df4a79
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pckermit12 differ
diff --git a/mplug_owl2/share/terminfo/p/pckermit120 b/mplug_owl2/share/terminfo/p/pckermit120
new file mode 100644
index 0000000000000000000000000000000000000000..b8682c845f6f1ad9ebe6cb952a6c9638970feefc
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pckermit120 differ
diff --git a/mplug_owl2/share/terminfo/p/pcvt25 b/mplug_owl2/share/terminfo/p/pcvt25
new file mode 100644
index 0000000000000000000000000000000000000000..b3effccec4d5472188c9cfb0bba5f2ee5f74fff5
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt25 differ
diff --git a/mplug_owl2/share/terminfo/p/pcvt25w b/mplug_owl2/share/terminfo/p/pcvt25w
new file mode 100644
index 0000000000000000000000000000000000000000..89688cda466333f85b0a82c3e316b1ec5ec7d752
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt25w differ
diff --git a/mplug_owl2/share/terminfo/p/pcvt35w b/mplug_owl2/share/terminfo/p/pcvt35w
new file mode 100644
index 0000000000000000000000000000000000000000..8aec22cb4d5118258eab54704d610384a78c2fe2
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt35w differ
diff --git a/mplug_owl2/share/terminfo/p/pcvt43w b/mplug_owl2/share/terminfo/p/pcvt43w
new file mode 100644
index 0000000000000000000000000000000000000000..2c4a616be91d1b954c80445ba8f36cfe02673173
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt43w differ
diff --git a/mplug_owl2/share/terminfo/p/pcvt50 b/mplug_owl2/share/terminfo/p/pcvt50
new file mode 100644
index 0000000000000000000000000000000000000000..bca907f73fc7059f7fa0c2d34ea75f2ec4acd87c
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvt50 differ
diff --git a/mplug_owl2/share/terminfo/p/pcvtXX b/mplug_owl2/share/terminfo/p/pcvtXX
new file mode 100644
index 0000000000000000000000000000000000000000..e14f85d530b9f2e4b086968d117cd09da3584364
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pcvtXX differ
diff --git a/mplug_owl2/share/terminfo/p/pe1251 b/mplug_owl2/share/terminfo/p/pe1251
new file mode 100644
index 0000000000000000000000000000000000000000..01acc1b70c66fd6eb748506db7d001cdc8e487a1
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pe1251 differ
diff --git a/mplug_owl2/share/terminfo/p/pe550 b/mplug_owl2/share/terminfo/p/pe550
new file mode 100644
index 0000000000000000000000000000000000000000..2dc4fa3a3cb77217ddcc0d8bd8fcc65577fcc5f2
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pe550 differ
diff --git a/mplug_owl2/share/terminfo/p/pe6300 b/mplug_owl2/share/terminfo/p/pe6300
new file mode 100644
index 0000000000000000000000000000000000000000..01acc1b70c66fd6eb748506db7d001cdc8e487a1
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pe6300 differ
diff --git a/mplug_owl2/share/terminfo/p/prism12-m b/mplug_owl2/share/terminfo/p/prism12-m
new file mode 100644
index 0000000000000000000000000000000000000000..c4dd32141f0efc92e1692a94f55bacf68d0db902
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism12-m differ
diff --git a/mplug_owl2/share/terminfo/p/prism12-m-w b/mplug_owl2/share/terminfo/p/prism12-m-w
new file mode 100644
index 0000000000000000000000000000000000000000..7a52b60953882f736415e566f6ce9e89185adb14
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism12-m-w differ
diff --git a/mplug_owl2/share/terminfo/p/prism14-m b/mplug_owl2/share/terminfo/p/prism14-m
new file mode 100644
index 0000000000000000000000000000000000000000..39dd310720abf005cd3c3561b386b426fbb3a3a4
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism14-m differ
diff --git a/mplug_owl2/share/terminfo/p/prism14-m-w b/mplug_owl2/share/terminfo/p/prism14-m-w
new file mode 100644
index 0000000000000000000000000000000000000000..5b6475de6eef814393a5f5a8fc3d368625b276f7
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism14-m-w differ
diff --git a/mplug_owl2/share/terminfo/p/prism14-w b/mplug_owl2/share/terminfo/p/prism14-w
new file mode 100644
index 0000000000000000000000000000000000000000..1d7df53b710aff295e42eeca63d45879279a70fe
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism14-w differ
diff --git a/mplug_owl2/share/terminfo/p/prism5 b/mplug_owl2/share/terminfo/p/prism5
new file mode 100644
index 0000000000000000000000000000000000000000..f1848d045a59b0bd9a0bffdad259f03cbc407ffa
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism5 differ
diff --git a/mplug_owl2/share/terminfo/p/prism8gl b/mplug_owl2/share/terminfo/p/prism8gl
new file mode 100644
index 0000000000000000000000000000000000000000..aabd61376d58a5c96fc3950012d8646a36bc17e3
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/prism8gl differ
diff --git a/mplug_owl2/share/terminfo/p/ps300 b/mplug_owl2/share/terminfo/p/ps300
new file mode 100644
index 0000000000000000000000000000000000000000..de2ccd7689a6b6d204318bf574a65f4500d3199f
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/ps300 differ
diff --git a/mplug_owl2/share/terminfo/p/psterm-80x24 b/mplug_owl2/share/terminfo/p/psterm-80x24
new file mode 100644
index 0000000000000000000000000000000000000000..786fe23202064ff98d51483cd739439405f41493
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/psterm-80x24 differ
diff --git a/mplug_owl2/share/terminfo/p/psterm-90x28 b/mplug_owl2/share/terminfo/p/psterm-90x28
new file mode 100644
index 0000000000000000000000000000000000000000..6557b3727a3d9725cbf3f0af1285a99973f2d29c
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/psterm-90x28 differ
diff --git a/mplug_owl2/share/terminfo/p/psterm-96x48 b/mplug_owl2/share/terminfo/p/psterm-96x48
new file mode 100644
index 0000000000000000000000000000000000000000..a4ab8884a927fe2261f52c2c8b45bbccd6aa30e9
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/psterm-96x48 differ
diff --git a/mplug_owl2/share/terminfo/p/psx_ansi b/mplug_owl2/share/terminfo/p/psx_ansi
new file mode 100644
index 0000000000000000000000000000000000000000..b6bc8b8f576d8726fc0ec20ecccc67988fe960a6
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/psx_ansi differ
diff --git a/mplug_owl2/share/terminfo/p/pt100 b/mplug_owl2/share/terminfo/p/pt100
new file mode 100644
index 0000000000000000000000000000000000000000..cf224e6f4f4ab49dd2d1b020d58572b8905c8f54
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt100 differ
diff --git a/mplug_owl2/share/terminfo/p/pt250 b/mplug_owl2/share/terminfo/p/pt250
new file mode 100644
index 0000000000000000000000000000000000000000..01b1660d892af3129f0530f10846f8bfb443249e
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt250 differ
diff --git a/mplug_owl2/share/terminfo/p/pt505-22 b/mplug_owl2/share/terminfo/p/pt505-22
new file mode 100644
index 0000000000000000000000000000000000000000..bbf0d9a1a0969f9627684f6a45de7e8880107c7e
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt505-22 differ
diff --git a/mplug_owl2/share/terminfo/p/pt505-24 b/mplug_owl2/share/terminfo/p/pt505-24
new file mode 100644
index 0000000000000000000000000000000000000000..ead33f945ac23d717bf22d18f000b965418d4631
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pt505-24 differ
diff --git a/mplug_owl2/share/terminfo/p/pty b/mplug_owl2/share/terminfo/p/pty
new file mode 100644
index 0000000000000000000000000000000000000000..deb1bf7da46f912f518b102a99523a696d3113e3
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/pty differ
diff --git a/mplug_owl2/share/terminfo/p/putty b/mplug_owl2/share/terminfo/p/putty
new file mode 100644
index 0000000000000000000000000000000000000000..94e2c883aaa72e71489eae2863a380b5408edfa5
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty differ
diff --git a/mplug_owl2/share/terminfo/p/putty+fnkeys+vt400 b/mplug_owl2/share/terminfo/p/putty+fnkeys+vt400
new file mode 100644
index 0000000000000000000000000000000000000000..1d329d383e81793c381ff2f8ab9b5651fd7abfa7
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty+fnkeys+vt400 differ
diff --git a/mplug_owl2/share/terminfo/p/putty-m1 b/mplug_owl2/share/terminfo/p/putty-m1
new file mode 100644
index 0000000000000000000000000000000000000000..6db7ff6f065f45d7cce75531a1f9778705022afb
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-m1 differ
diff --git a/mplug_owl2/share/terminfo/p/putty-m2 b/mplug_owl2/share/terminfo/p/putty-m2
new file mode 100644
index 0000000000000000000000000000000000000000..6d07493f3ba55c91d816c6b84b48950eaefab8fd
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-m2 differ
diff --git a/mplug_owl2/share/terminfo/p/putty-sco b/mplug_owl2/share/terminfo/p/putty-sco
new file mode 100644
index 0000000000000000000000000000000000000000..d615ac650ad983cc887fd5b0594a7d85673cb0eb
Binary files /dev/null and b/mplug_owl2/share/terminfo/p/putty-sco differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2a7a15706f3eda36302d2502cc9aeba18363f41
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a1741fe4cd6e07478af5650aec5f8ab58ba3b9a
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5492f647f3252e4b675c1bf20bf4d8df09e7143
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/download.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10692b4e358db7ef60ea76c1012bec51612bf6d5
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6f6c3729dbc1b754b4a9b1dbd6119c28ef08c32
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/commands/__pycache__/serving.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/data_collator.py b/openflamingo/lib/python3.10/site-packages/transformers/data/data_collator.py
new file mode 100644
index 0000000000000000000000000000000000000000..a68ee3b83d9537622181f41167d825dab7090c05
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/data/data_collator.py
@@ -0,0 +1,1535 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import warnings
+from collections.abc import Mapping
+from dataclasses import dataclass
+from random import randint
+from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
+
+import numpy as np
+
+from ..models.bert import BertTokenizer, BertTokenizerFast
+from ..tokenization_utils_base import PreTrainedTokenizerBase
+from ..utils import PaddingStrategy
+
+
+InputDataClass = NewType("InputDataClass", Any)
+
+"""
+A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
+of PyTorch/TensorFlow tensors or NumPy arrays.
+"""
+DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
+
+
+class DataCollatorMixin:
+ def __call__(self, features, return_tensors=None):
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ if return_tensors == "tf":
+ return self.tf_call(features)
+ elif return_tensors == "pt":
+ return self.torch_call(features)
+ elif return_tensors == "np":
+ return self.numpy_call(features)
+ else:
+ raise ValueError(f"Framework '{return_tensors}' not recognized!")
+
+
+def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
+ """
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
+ potential keys named:
+
+ - `label`: handles a single value (int or float) per object
+ - `label_ids`: handles a list of values per object
+
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
+ to the model. See glue and ner for example of how it's useful.
+ """
+
+ # In this function we'll make the assumption that all `features` in the batch
+ # have the same attributes.
+ # So we will look at the first element as a proxy for what attributes exist
+ # on the whole batch.
+
+ if return_tensors == "pt":
+ return torch_default_data_collator(features)
+ elif return_tensors == "tf":
+ return tf_default_data_collator(features)
+ elif return_tensors == "np":
+ return numpy_default_data_collator(features)
+
+
+@dataclass
+class DefaultDataCollator(DataCollatorMixin):
+ """
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
+ potential keys named:
+
+ - `label`: handles a single value (int or float) per object
+ - `label_ids`: handles a list of values per object
+
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
+ to the model. See glue and ner for example of how it's useful.
+
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
+ helpful if you need to set a return_tensors value at initialization.
+
+ Args:
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ return_tensors: str = "pt"
+
+ def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ return default_data_collator(features, return_tensors)
+
+
+def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ import torch
+
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
+ dtype = torch.long if isinstance(label, int) else torch.float
+ batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
+ elif "label_ids" in first and first["label_ids"] is not None:
+ if isinstance(first["label_ids"], torch.Tensor):
+ batch["labels"] = torch.stack([f["label_ids"] for f in features])
+ else:
+ dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
+ batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
+
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
+ if isinstance(v, torch.Tensor):
+ batch[k] = torch.stack([f[k] for f in features])
+ elif isinstance(v, np.ndarray):
+ batch[k] = torch.tensor(np.stack([f[k] for f in features]))
+ else:
+ batch[k] = torch.tensor([f[k] for f in features])
+
+ return batch
+
+
+def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label_col_name = "label"
+ elif "label_ids" in first and first["label_ids"] is not None:
+ label_col_name = "label_ids"
+ elif "labels" in first and first["labels"] is not None:
+ label_col_name = "labels"
+ else:
+ label_col_name = None
+ if label_col_name is not None:
+ if isinstance(first[label_col_name], tf.Tensor):
+ dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
+ elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
+ dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
+ elif isinstance(first[label_col_name], (tuple, list)):
+ dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
+ else:
+ dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
+ batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
+ if isinstance(v, (tf.Tensor, np.ndarray)):
+ batch[k] = tf.stack([f[k] for f in features])
+ else:
+ batch[k] = tf.convert_to_tensor([f[k] for f in features])
+
+ return batch
+
+
+def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
+ dtype = np.int64 if isinstance(label, int) else np.float32
+ batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
+ elif "label_ids" in first and first["label_ids"] is not None:
+ if isinstance(first["label_ids"], np.ndarray):
+ batch["labels"] = np.stack([f["label_ids"] for f in features])
+ else:
+ dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32
+ batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
+
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
+ if isinstance(v, np.ndarray):
+ batch[k] = np.stack([f[k] for f in features])
+ else:
+ batch[k] = np.array([f[k] for f in features])
+
+ return batch
+
+
+@dataclass
+class DataCollatorWithPadding:
+ """
+ Data collator that will dynamically pad the inputs received.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ return_tensors: str = "pt"
+
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
+ batch = self.tokenizer.pad(
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors=self.return_tensors,
+ )
+ if "label" in batch:
+ batch["labels"] = batch["label"]
+ del batch["label"]
+ if "label_ids" in batch:
+ batch["labels"] = batch["label_ids"]
+ del batch["label_ids"]
+ return batch
+
+
+@dataclass
+class DataCollatorForTokenClassification(DataCollatorMixin):
+ """
+ Data collator that will dynamically pad the inputs received, as well as the labels.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ label_pad_token_id (`int`, *optional*, defaults to -100):
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ label_pad_token_id: int = -100
+ return_tensors: str = "pt"
+
+ def torch_call(self, features):
+ import torch
+
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
+
+ batch = self.tokenizer.pad(
+ no_labels_features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors="pt",
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = batch["input_ids"].shape[1]
+ padding_side = self.tokenizer.padding_side
+
+ def to_list(tensor_or_iterable):
+ if isinstance(tensor_or_iterable, torch.Tensor):
+ return tensor_or_iterable.tolist()
+ return list(tensor_or_iterable)
+
+ if padding_side == "right":
+ batch[label_name] = [
+ to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch[label_name] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
+ ]
+
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
+ return batch
+
+ def tf_call(self, features):
+ import tensorflow as tf
+
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+ batch = self.tokenizer.pad(
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
+ return_tensors="tf" if labels is None else None,
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
+ padding_side = self.tokenizer.padding_side
+ if padding_side == "right":
+ batch["labels"] = [
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch["labels"] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
+ ]
+
+ batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
+ return batch
+
+ def numpy_call(self, features):
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+ batch = self.tokenizer.pad(
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
+ return_tensors="np" if labels is None else None,
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = np.array(batch["input_ids"]).shape[1]
+ padding_side = self.tokenizer.padding_side
+ if padding_side == "right":
+ batch["labels"] = [
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch["labels"] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
+ ]
+
+ batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
+ return batch
+
+
+def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ import torch
+
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
+
+ length_of_first = examples[0].size(0)
+
+ # Check if padding is necessary.
+
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return torch.stack(examples, dim=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(x.size(0) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
+ for i, example in enumerate(examples):
+ if tokenizer.padding_side == "right":
+ result[i, : example.shape[0]] = example
+ else:
+ result[i, -example.shape[0] :] = example
+ return result
+
+
+def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ import tensorflow as tf
+
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple)):
+ examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
+
+ # Check if padding is necessary.
+ length_of_first = len(examples[0])
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return tf.stack(examples, axis=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(len(x) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
+ result = []
+ rank = tf.rank(examples[0])
+ paddings = np.zeros((rank, 2), dtype=np.int32)
+ for example in examples:
+ if tokenizer.padding_side == "right":
+ paddings[0, 1] = max_length - len(example)
+ else:
+ paddings[0, 0] = max_length - len(example)
+ result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
+ return tf.stack(result, axis=0)
+
+
+def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple)):
+ examples = [np.array(e, dtype=np.int64) for e in examples]
+
+ # Check if padding is necessary.
+ length_of_first = len(examples[0])
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return np.stack(examples, axis=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(len(x) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
+ for i, example in enumerate(examples):
+ if tokenizer.padding_side == "right":
+ result[i, : example.shape[0]] = example
+ else:
+ result[i, -example.shape[0] :] = example
+ return result
+
+
+def tolist(x):
+ if isinstance(x, list):
+ return x
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
+ x = x.numpy()
+ return x.tolist()
+
+
+@dataclass
+class DataCollatorForSeq2Seq:
+ """
+ Data collator that will dynamically pad the inputs received, as well as the labels.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ model ([`PreTrainedModel`]):
+ The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
+ prepare the *decoder_input_ids*
+
+ This is useful when using *label_smoothing* to avoid calculating loss twice.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ label_pad_token_id (`int`, *optional*, defaults to -100):
+ The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ model: Optional[Any] = None
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ label_pad_token_id: int = -100
+ return_tensors: str = "pt"
+
+ def __call__(self, features, return_tensors=None):
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
+ # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
+ # same length to return tensors.
+ if labels is not None:
+ max_label_length = max(len(l) for l in labels)
+ if self.pad_to_multiple_of is not None:
+ max_label_length = (
+ (max_label_length + self.pad_to_multiple_of - 1)
+ // self.pad_to_multiple_of
+ * self.pad_to_multiple_of
+ )
+
+ padding_side = self.tokenizer.padding_side
+ for feature in features:
+ remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
+ if isinstance(feature["labels"], list):
+ feature["labels"] = (
+ feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
+ )
+ elif padding_side == "right":
+ feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
+ else:
+ feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
+
+ features = self.tokenizer.pad(
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors=return_tensors,
+ )
+
+ # prepare decoder_input_ids
+ if (
+ labels is not None
+ and self.model is not None
+ and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
+ ):
+ decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
+ features["decoder_input_ids"] = decoder_input_ids
+
+ return features
+
+
+@dataclass
+class DataCollatorForLanguageModeling(DataCollatorMixin):
+ """
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
+ are not all of the same length.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ mlm (`bool`, *optional*, defaults to `True`):
+ Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
+ with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
+ tokens and the value to predict for the masked token.
+ mlm_probability (`float`, *optional*, defaults to 0.15):
+ The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+
+
+
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
+ BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
+ [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
+
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ mlm: bool = True
+ mlm_probability: float = 0.15
+ pad_to_multiple_of: Optional[int] = None
+ tf_experimental_compile: bool = False
+ return_tensors: str = "pt"
+
+ def __post_init__(self):
+ if self.mlm and self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
+ "You should pass `mlm=False` to train on causal language modeling instead."
+ )
+ if self.tf_experimental_compile:
+ import tensorflow as tf
+
+ self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
+
+ @staticmethod
+ def tf_bernoulli(shape, probability):
+ import tensorflow as tf
+
+ prob_matrix = tf.fill(shape, probability)
+ return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
+
+ def tf_mask_tokens(
+ self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
+ ) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ import tensorflow as tf
+
+ mask_token_id = tf.cast(mask_token_id, inputs.dtype)
+
+ input_shape = tf.shape(inputs)
+ # 1 for a special token, 0 for a normal token in the special tokens mask
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
+ labels = tf.where(masked_indices, inputs, -100)
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
+
+ inputs = tf.where(indices_replaced, mask_token_id, inputs)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
+ random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
+
+ inputs = tf.where(indices_random, random_words, inputs)
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = self.tokenizer.pad(examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of)
+ else:
+ batch = {
+ "input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
+ for val in batch["input_ids"].numpy().tolist()
+ ]
+ # Cannot directly create as bool
+ special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
+ else:
+ special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
+ batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
+ tf.cast(batch["input_ids"], tf.int64),
+ special_tokens_mask=special_tokens_mask,
+ mask_token_id=self.tokenizer.mask_token_id,
+ vocab_size=len(self.tokenizer),
+ )
+ else:
+ labels = batch["input_ids"]
+ if self.tokenizer.pad_token_id is not None:
+ # Replace self.tokenizer.pad_token_id with -100
+ labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
+ else:
+ labels = tf.identity(labels) # Makes a copy, just in case
+ batch["labels"] = labels
+ return batch
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
+ else:
+ batch = {
+ "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
+ )
+ else:
+ labels = batch["input_ids"].clone()
+ if self.tokenizer.pad_token_id is not None:
+ labels[labels == self.tokenizer.pad_token_id] = -100
+ batch["labels"] = labels
+ return batch
+
+ def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ import torch
+
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
+ else:
+ special_tokens_mask = special_tokens_mask.bool()
+
+ probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = self.tokenizer.pad(examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of)
+ else:
+ batch = {
+ "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
+ )
+ else:
+ labels = np.copy(batch["input_ids"])
+ if self.tokenizer.pad_token_id is not None:
+ labels[labels == self.tokenizer.pad_token_id] = -100
+ batch["labels"] = labels
+ return batch
+
+ def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ labels = np.copy(inputs)
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
+ else:
+ special_tokens_mask = special_tokens_mask.astype(bool)
+
+ probability_matrix[special_tokens_mask] = 0
+ # Numpy doesn't have bernoulli, so we use a binomial with 1 trial
+ masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
+ inputs[indices_replaced] = self.tokenizer.mask_token_id
+
+ # 10% of the time, we replace masked input tokens with random word
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ indices_random = (
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
+ )
+ random_words = np.random.randint(
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
+ )
+ inputs[indices_random] = random_words
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+
+@dataclass
+class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
+ """
+ Data collator used for language modeling that masks entire words.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for masked language modeling
+
+
+
+ This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
+ that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
+ produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
+
+ """
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
+ """
+ Get 0/1 labels for masked tokens with whole word mask proxy
+ """
+ if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
+ warnings.warn(
+ "DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
+ "Please refer to the documentation for more information."
+ )
+
+ cand_indexes = []
+ for i, token in enumerate(input_tokens):
+ if token == "[CLS]" or token == "[SEP]":
+ continue
+
+ if len(cand_indexes) >= 1 and token.startswith("##"):
+ cand_indexes[-1].append(i)
+ else:
+ cand_indexes.append([i])
+
+ random.shuffle(cand_indexes)
+ num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
+ masked_lms = []
+ covered_indexes = set()
+ for index_set in cand_indexes:
+ if len(masked_lms) >= num_to_predict:
+ break
+ # If adding a whole-word mask would exceed the maximum number of
+ # predictions, then just skip this candidate.
+ if len(masked_lms) + len(index_set) > num_to_predict:
+ continue
+ is_any_index_covered = False
+ for index in index_set:
+ if index in covered_indexes:
+ is_any_index_covered = True
+ break
+ if is_any_index_covered:
+ continue
+ for index in index_set:
+ covered_indexes.add(index)
+ masked_lms.append(index)
+
+ if len(covered_indexes) != len(masked_lms):
+ raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
+ mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
+ return mask_labels
+
+ def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ probability_matrix = mask_labels
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
+
+ masked_indices = probability_matrix.bool()
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ import tensorflow as tf
+
+ input_shape = tf.shape(inputs)
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = tf.identity(inputs)
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ masked_indices = tf.cast(mask_labels, tf.bool)
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
+ ]
+ masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = inputs == self.tokenizer.pad_token_id
+ masked_indices = masked_indices & ~padding_mask
+
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
+ labels = tf.where(masked_indices, inputs, -100)
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
+
+ inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
+ random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
+ inputs = tf.where(indices_random, random_words, inputs)
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = np.copy(inputs)
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ masked_indices = mask_labels.astype(bool)
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices[padding_mask] = 0
+
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ indices_random = (
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
+ )
+ random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+
+@dataclass
+class DataCollatorForSOP(DataCollatorForLanguageModeling):
+ """
+ Data collator used for sentence order prediction task.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for both masked language modeling and sentence order prediction
+ """
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
+ "DataCollatorForLanguageModeling instead.",
+ FutureWarning,
+ )
+
+ def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
+ import torch
+ from torch.nn.utils.rnn import pad_sequence
+
+ input_ids = [example["input_ids"] for example in examples]
+ input_ids = _torch_collate_batch(input_ids, self.tokenizer)
+ input_ids, labels, attention_mask = self.mask_tokens(input_ids)
+
+ token_type_ids = [example["token_type_ids"] for example in examples]
+ # size of segment_ids varied because randomness, padding zero to the end as the original implementation
+ token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
+
+ sop_label_list = [example["sentence_order_label"] for example in examples]
+ sentence_order_label = torch.stack(sop_label_list)
+
+ return {
+ "input_ids": input_ids,
+ "labels": labels,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ "sentence_order_label": sentence_order_label,
+ }
+
+ def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
+ """
+ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
+ original. N-gram not applied yet.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+ # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
+ attention_mask = (~masked_indices).float()
+ if self.tokenizer._pad_token is not None:
+ attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ attention_mask.masked_fill_(attention_padding_mask, value=1.0)
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels, attention_mask
+
+
+@dataclass
+class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
+ """
+ Data collator used for permutation language modeling.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for permutation language modeling with procedures specific to XLNet
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ plm_probability: float = 1 / 6
+ max_span_length: int = 5 # maximum length of a span of masked tokens
+ return_tensors: str = "pt"
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _torch_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _tf_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _numpy_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if inputs.size(1) % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = inputs.clone()
+ # Creating the mask and target_mapping tensors
+ masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
+ target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
+
+ for i in range(labels.size(0)):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = labels.size(1)
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = torch.eye(labels.size(1))
+
+ special_tokens_mask = torch.tensor(
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
+ dtype=torch.bool,
+ )
+ masked_indices.masked_fill_(special_tokens_mask, value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ masked_indices.masked_fill_(padding_mask, value=0.0)
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs[masked_indices] = self.tokenizer.mask_token_id
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
+
+ for i in range(labels.size(0)):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ perm_index = torch.arange(labels.size(1))
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
+ # Permute the two halves such that they do not cross over
+ perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = torch.flatten(perm_index.transpose(0, 1))
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask[i] = (
+ perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
+ ) & masked_indices[i]
+
+ return inputs.long(), perm_mask, target_mapping, labels.long()
+
+ def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ import tensorflow as tf
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if tf.shape(inputs)[1] % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = tf.identity(inputs)
+ # Creating the mask and target_mapping tensors
+ masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
+ labels_shape = tf.shape(labels)
+ target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
+
+ for i in range(len(labels)):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = tf.shape(labels)[1]
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = randint(1, self.max_span_length + 1)
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + randint(0, context_length - span_length + 1)
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = np.eye(labels_shape[1])
+ masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
+ target_mapping = tf.convert_to_tensor(target_mapping)
+ special_tokens_mask = tf.convert_to_tensor(
+ [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
+ for val in labels.numpy().tolist()
+ ],
+ )
+ special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
+ masked_indices = masked_indices & ~special_tokens_mask
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices = masked_indices & ~padding_mask
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
+ labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
+
+ perm_mask = []
+
+ for i in range(len(labels)):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ # tf.range is the equivalent of torch.arange
+ perm_index = tf.range(labels_shape[1])
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
+ # Permute the two halves such that they do not cross over
+ perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask.append(
+ (tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
+ & masked_indices[i]
+ )
+ perm_mask = tf.stack(perm_mask, axis=0)
+
+ return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
+
+ def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if inputs.shape[1] % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = np.copy(inputs)
+ # Creating the mask and target_mapping tensors
+ masked_indices = np.full(labels.shape, 0, dtype=bool)
+ target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
+
+ for i in range(labels.shape[0]):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = labels.shape[1]
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = randint(1, self.max_span_length + 1)
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + randint(0, context_length - span_length + 1)
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = np.eye(labels.shape[1])
+
+ special_tokens_mask = np.array(
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
+ dtype=bool,
+ )
+ masked_indices[special_tokens_mask] = 0
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices[padding_mask] = 0.0
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs[masked_indices] = self.tokenizer.mask_token_id
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
+
+ for i in range(labels.shape[0]):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ perm_index = np.arange(labels.shape[1])
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
+ # Permute the two halves such that they do not cross over
+ np.random.shuffle(perm_index)
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = perm_index.T.flatten()
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index[~masked_indices[i] & non_func_mask[i]] = -1
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask[i] = (
+ perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
+ ) & masked_indices[i]
+
+ return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/utils.py b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..936f5a51e9fcf4c4189eb444e567d761e8fa0865
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/utils.py
@@ -0,0 +1,349 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import csv
+import dataclasses
+import json
+from dataclasses import dataclass
+from typing import List, Optional, Union
+
+from ...utils import is_tf_available, is_torch_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class InputExample:
+ """
+ A single training/test example for simple sequence classification.
+
+ Args:
+ guid: Unique id for the example.
+ text_a: string. The untokenized text of the first sequence. For single
+ sequence tasks, only this sequence must be specified.
+ text_b: (Optional) string. The untokenized text of the second sequence.
+ Only must be specified for sequence pair tasks.
+ label: (Optional) string. The label of the example. This should be
+ specified for train and dev examples, but not for test examples.
+ """
+
+ guid: str
+ text_a: str
+ text_b: Optional[str] = None
+ label: Optional[str] = None
+
+ def to_json_string(self):
+ """Serializes this instance to a JSON string."""
+ return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
+
+
+@dataclass(frozen=True)
+class InputFeatures:
+ """
+ A single set of features of data. Property names are the same names as the corresponding inputs to a model.
+
+ Args:
+ input_ids: Indices of input sequence tokens in the vocabulary.
+ attention_mask: Mask to avoid performing attention on padding token indices.
+ Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
+ tokens.
+ token_type_ids: (Optional) Segment token indices to indicate first and second
+ portions of the inputs. Only some models use them.
+ label: (Optional) Label corresponding to the input. Int for classification problems,
+ float for regression problems.
+ """
+
+ input_ids: List[int]
+ attention_mask: Optional[List[int]] = None
+ token_type_ids: Optional[List[int]] = None
+ label: Optional[Union[int, float]] = None
+
+ def to_json_string(self):
+ """Serializes this instance to a JSON string."""
+ return json.dumps(dataclasses.asdict(self)) + "\n"
+
+
+class DataProcessor:
+ """Base class for data converters for sequence classification data sets."""
+
+ def get_example_from_tensor_dict(self, tensor_dict):
+ """
+ Gets an example from a dict with tensorflow tensors.
+
+ Args:
+ tensor_dict: Keys and values should match the corresponding Glue
+ tensorflow_dataset examples.
+ """
+ raise NotImplementedError()
+
+ def get_train_examples(self, data_dir):
+ """Gets a collection of [`InputExample`] for the train set."""
+ raise NotImplementedError()
+
+ def get_dev_examples(self, data_dir):
+ """Gets a collection of [`InputExample`] for the dev set."""
+ raise NotImplementedError()
+
+ def get_test_examples(self, data_dir):
+ """Gets a collection of [`InputExample`] for the test set."""
+ raise NotImplementedError()
+
+ def get_labels(self):
+ """Gets the list of labels for this data set."""
+ raise NotImplementedError()
+
+ def tfds_map(self, example):
+ """
+ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
+ examples to the correct format.
+ """
+ if len(self.get_labels()) > 1:
+ example.label = self.get_labels()[int(example.label)]
+ return example
+
+ @classmethod
+ def _read_tsv(cls, input_file, quotechar=None):
+ """Reads a tab separated value file."""
+ with open(input_file, "r", encoding="utf-8-sig") as f:
+ return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
+
+
+class SingleSentenceClassificationProcessor(DataProcessor):
+ """Generic processor for a single sentence classification data set."""
+
+ def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
+ self.labels = [] if labels is None else labels
+ self.examples = [] if examples is None else examples
+ self.mode = mode
+ self.verbose = verbose
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, idx):
+ if isinstance(idx, slice):
+ return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
+ return self.examples[idx]
+
+ @classmethod
+ def create_from_csv(
+ cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
+ ):
+ processor = cls(**kwargs)
+ processor.add_examples_from_csv(
+ file_name,
+ split_name=split_name,
+ column_label=column_label,
+ column_text=column_text,
+ column_id=column_id,
+ skip_first_row=skip_first_row,
+ overwrite_labels=True,
+ overwrite_examples=True,
+ )
+ return processor
+
+ @classmethod
+ def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
+ processor = cls(**kwargs)
+ processor.add_examples(texts_or_text_and_labels, labels=labels)
+ return processor
+
+ def add_examples_from_csv(
+ self,
+ file_name,
+ split_name="",
+ column_label=0,
+ column_text=1,
+ column_id=None,
+ skip_first_row=False,
+ overwrite_labels=False,
+ overwrite_examples=False,
+ ):
+ lines = self._read_tsv(file_name)
+ if skip_first_row:
+ lines = lines[1:]
+ texts = []
+ labels = []
+ ids = []
+ for i, line in enumerate(lines):
+ texts.append(line[column_text])
+ labels.append(line[column_label])
+ if column_id is not None:
+ ids.append(line[column_id])
+ else:
+ guid = f"{split_name}-{i}" if split_name else str(i)
+ ids.append(guid)
+
+ return self.add_examples(
+ texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
+ )
+
+ def add_examples(
+ self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
+ ):
+ if labels is not None and len(texts_or_text_and_labels) != len(labels):
+ raise ValueError(
+ f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
+ )
+ if ids is not None and len(texts_or_text_and_labels) != len(ids):
+ raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
+ if ids is None:
+ ids = [None] * len(texts_or_text_and_labels)
+ if labels is None:
+ labels = [None] * len(texts_or_text_and_labels)
+ examples = []
+ added_labels = set()
+ for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
+ if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
+ text, label = text_or_text_and_label
+ else:
+ text = text_or_text_and_label
+ added_labels.add(label)
+ examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
+
+ # Update examples
+ if overwrite_examples:
+ self.examples = examples
+ else:
+ self.examples.extend(examples)
+
+ # Update labels
+ if overwrite_labels:
+ self.labels = list(added_labels)
+ else:
+ self.labels = list(set(self.labels).union(added_labels))
+
+ return self.examples
+
+ def get_features(
+ self,
+ tokenizer,
+ max_length=None,
+ pad_on_left=False,
+ pad_token=0,
+ mask_padding_with_zero=True,
+ return_tensors=None,
+ ):
+ """
+ Convert examples in a list of `InputFeatures`
+
+ Args:
+ tokenizer: Instance of a tokenizer that will tokenize the examples
+ max_length: Maximum example length
+ pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
+ pad_token: Padding token
+ mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
+ and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
+ values)
+
+ Returns:
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
+ task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
+ `InputFeatures` which can be fed to the model.
+
+ """
+ if max_length is None:
+ max_length = tokenizer.max_len
+
+ label_map = {label: i for i, label in enumerate(self.labels)}
+
+ all_input_ids = []
+ for ex_index, example in enumerate(self.examples):
+ if ex_index % 10000 == 0:
+ logger.info(f"Tokenizing example {ex_index}")
+
+ input_ids = tokenizer.encode(
+ example.text_a,
+ add_special_tokens=True,
+ max_length=min(max_length, tokenizer.max_len),
+ )
+ all_input_ids.append(input_ids)
+
+ batch_length = max(len(input_ids) for input_ids in all_input_ids)
+
+ features = []
+ for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
+ if ex_index % 10000 == 0:
+ logger.info(f"Writing example {ex_index}/{len(self.examples)}")
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
+ # tokens are attended to.
+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
+
+ # Zero-pad up to the sequence length.
+ padding_length = batch_length - len(input_ids)
+ if pad_on_left:
+ input_ids = ([pad_token] * padding_length) + input_ids
+ attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
+ else:
+ input_ids = input_ids + ([pad_token] * padding_length)
+ attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
+
+ if len(input_ids) != batch_length:
+ raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
+ if len(attention_mask) != batch_length:
+ raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
+
+ if self.mode == "classification":
+ label = label_map[example.label]
+ elif self.mode == "regression":
+ label = float(example.label)
+ else:
+ raise ValueError(self.mode)
+
+ if ex_index < 5 and self.verbose:
+ logger.info("*** Example ***")
+ logger.info(f"guid: {example.guid}")
+ logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
+ logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
+ logger.info(f"label: {example.label} (id = {label})")
+
+ features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
+
+ if return_tensors is None:
+ return features
+ elif return_tensors == "tf":
+ if not is_tf_available():
+ raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
+ import tensorflow as tf
+
+ def gen():
+ for ex in features:
+ yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
+
+ dataset = tf.data.Dataset.from_generator(
+ gen,
+ ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
+ ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
+ )
+ return dataset
+ elif return_tensors == "pt":
+ if not is_torch_available():
+ raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
+ import torch
+ from torch.utils.data import TensorDataset
+
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
+ all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
+ if self.mode == "classification":
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
+ elif self.mode == "regression":
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
+
+ dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
+ return dataset
+ else:
+ raise ValueError("return_tensors should be one of 'tf' or 'pt'")
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/data/processors/xnli.py b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/xnli.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f1a11fcd6b4ef167fc77fb1cc6d9acbbadaccf0
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/data/processors/xnli.py
@@ -0,0 +1,97 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XNLI utils (dataset loading and evaluation)"""
+
+
+import os
+
+from ...utils import logging
+from .utils import DataProcessor, InputExample
+
+
+logger = logging.get_logger(__name__)
+
+
+class XnliProcessor(DataProcessor):
+ """
+ Processor for the XNLI dataset. Adapted from
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
+ """
+
+ def __init__(self, language, train_language=None):
+ self.language = language
+ self.train_language = train_language
+
+ def get_train_examples(self, data_dir):
+ """See base class."""
+ lg = self.language if self.train_language is None else self.train_language
+ lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
+ examples = []
+ for i, line in enumerate(lines):
+ if i == 0:
+ continue
+ guid = f"train-{i}"
+ text_a = line[0]
+ text_b = line[1]
+ label = "contradiction" if line[2] == "contradictory" else line[2]
+ if not isinstance(text_a, str):
+ raise ValueError(f"Training input {text_a} is not a string")
+ if not isinstance(text_b, str):
+ raise ValueError(f"Training input {text_b} is not a string")
+ if not isinstance(label, str):
+ raise ValueError(f"Training label {label} is not a string")
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
+ return examples
+
+ def get_test_examples(self, data_dir):
+ """See base class."""
+ lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
+ examples = []
+ for i, line in enumerate(lines):
+ if i == 0:
+ continue
+ language = line[0]
+ if language != self.language:
+ continue
+ guid = f"test-{i}"
+ text_a = line[6]
+ text_b = line[7]
+ label = line[1]
+ if not isinstance(text_a, str):
+ raise ValueError(f"Training input {text_a} is not a string")
+ if not isinstance(text_b, str):
+ raise ValueError(f"Training input {text_b} is not a string")
+ if not isinstance(label, str):
+ raise ValueError(f"Training label {label} is not a string")
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
+ return examples
+
+ def get_labels(self):
+ """See base class."""
+ return ["contradiction", "entailment", "neutral"]
+
+
+xnli_processors = {
+ "xnli": XnliProcessor,
+}
+
+xnli_output_modes = {
+ "xnli": "classification",
+}
+
+xnli_tasks_num_labels = {
+ "xnli": 3,
+}
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/feature_extraction_whisper.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/feature_extraction_whisper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3abb348f03aad2d754f6db021a0fc5445a1b05d8
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/feature_extraction_whisper.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/tokenization_whisper.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/tokenization_whisper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48b5c49fa8b1e4862ae6668087ca44646e6a91a4
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/whisper/__pycache__/tokenization_whisper.cpython-310.pyc differ