diff --git a/.gitattributes b/.gitattributes
index 808df9913cfd283a0919ef9e44cac2e042b46619..bffd50884cbb0a2634d3090b11c096d7d6a46bcd 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -727,3 +727,4 @@ mplug_owl2/lib/python3.10/lib-dynload/_hashlib.cpython-310-x86_64-linux-gnu.so f
mplug_owl2/lib/itcl4.2.4/libitcl4.2.4.so filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/sqlite3.44.2/libsqlite3.44.2.so filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
+mplug_owl2/lib/tk8.6/demos/images/teapot.ppm filter=lfs diff=lfs merge=lfs -text
diff --git a/mplug_owl2/lib/pkgconfig/libcrypto.pc b/mplug_owl2/lib/pkgconfig/libcrypto.pc
new file mode 100644
index 0000000000000000000000000000000000000000..423a8ec3321515f09f78bde5fde62722d648bf6c
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/libcrypto.pc
@@ -0,0 +1,13 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+enginesdir=${libdir}/engines-3
+modulesdir=${libdir}/ossl-modules
+
+Name: OpenSSL-libcrypto
+Description: OpenSSL cryptography library
+Version: 3.0.16
+Libs: -L${libdir} -lcrypto
+Libs.private: -ldl -pthread
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/libffi.pc b/mplug_owl2/lib/pkgconfig/libffi.pc
new file mode 100644
index 0000000000000000000000000000000000000000..edc9afaac400348c1e97bc41dcf9d6ad26d0b073
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/libffi.pc
@@ -0,0 +1,11 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+toolexeclibdir=${libdir}
+includedir=/root/envs/mplug_owl2/include
+
+Name: libffi
+Description: Library supporting Foreign Function Interfaces
+Version: 3.4.4
+Libs: -L${toolexeclibdir} -lffi
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/liblzma.pc b/mplug_owl2/lib/pkgconfig/liblzma.pc
new file mode 100644
index 0000000000000000000000000000000000000000..504060e4d7c1946f314e22a2c746632cd1412961
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/liblzma.pc
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: 0BSD
+# Author: Lasse Collin
+
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: liblzma
+Description: General purpose data compression library
+URL: https://tukaani.org/xz/
+Version: 5.6.4
+Cflags: -I${includedir}
+Cflags.private: -DLZMA_API_STATIC
+Libs: -L${libdir} -llzma
+Libs.private: -pthread
diff --git a/mplug_owl2/lib/pkgconfig/libssl.pc b/mplug_owl2/lib/pkgconfig/libssl.pc
new file mode 100644
index 0000000000000000000000000000000000000000..86cba2379ceb25d20a2ccf13ed282fd9d140af40
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/libssl.pc
@@ -0,0 +1,11 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: OpenSSL-libssl
+Description: Secure Sockets Layer and cryptography libraries
+Version: 3.0.16
+Requires.private: libcrypto
+Libs: -L${libdir} -lssl
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/menuw.pc b/mplug_owl2/lib/pkgconfig/menuw.pc
new file mode 100644
index 0000000000000000000000000000000000000000..4903074034e4538aa3eeacd1e5efd690f8b71d76
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/menuw.pc
@@ -0,0 +1,19 @@
+# pkg-config file generated by gen-pkgconfig
+# vile:makemode
+
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include/ncursesw
+abi_version=6
+major_version=6
+version=6.4.20221231
+
+Name: menuw
+Description: ncurses 6.4 add-on library
+Version: ${version}
+URL: https://invisible-island.net/ncurses
+Requires.private: ncursesw
+Libs: -L/root/envs/mplug_owl2/lib -Wl,-O2 -Wl,--sort-common -Wl,--disable-new-dtags -Wl,--gc-sections -Wl,-rpath,/root/envs/mplug_owl2/lib -Wl,-rpath-link,/root/envs/mplug_owl2/lib -lmenuw
+Libs.private:
+Cflags: -D_GNU_SOURCE -DNCURSES_WIDECHAR -I${includedir} -I/root/envs/mplug_owl2/include
diff --git a/mplug_owl2/lib/pkgconfig/python-3.10-embed.pc b/mplug_owl2/lib/pkgconfig/python-3.10-embed.pc
new file mode 100644
index 0000000000000000000000000000000000000000..25617700890271b32d10285ef223a24514a2397b
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/python-3.10-embed.pc
@@ -0,0 +1,13 @@
+# See: man pkg-config
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: Python
+Description: Embed Python into an application
+Requires:
+Version: 3.10
+Libs.private: -lcrypt -lpthread -ldl -lutil -lm
+Libs: -L${libdir} -lpython3.10
+Cflags: -I${includedir}/python3.10
diff --git a/mplug_owl2/lib/pkgconfig/python3.pc b/mplug_owl2/lib/pkgconfig/python3.pc
new file mode 100644
index 0000000000000000000000000000000000000000..f9b17512c647cd2816432a76351a762019cc628c
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/python3.pc
@@ -0,0 +1,13 @@
+# See: man pkg-config
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: Python
+Description: Build a C extension for Python
+Requires:
+Version: 3.10
+Libs.private: -lcrypt -lpthread -ldl -lutil -lm
+Libs:
+Cflags: -I${includedir}/python3.10
diff --git a/mplug_owl2/lib/pkgconfig/readline.pc b/mplug_owl2/lib/pkgconfig/readline.pc
new file mode 100644
index 0000000000000000000000000000000000000000..4e30c0081b003cb1ee085821e99192c566f768fc
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/readline.pc
@@ -0,0 +1,12 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: Readline
+Description: Gnu Readline library for command line editing
+URL: http://tiswww.cwru.edu/php/chet/readline/rltop.html
+Version: 8.2
+Requires.private: tinfo
+Libs: -L${libdir} -lreadline
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/sqlite3.pc b/mplug_owl2/lib/pkgconfig/sqlite3.pc
new file mode 100644
index 0000000000000000000000000000000000000000..a14e1d07b255b5b73fde36292c577480480a063c
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/sqlite3.pc
@@ -0,0 +1,13 @@
+# Package Information for pkg-config
+
+prefix=/root/envs/mplug_owl2
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: SQLite
+Description: SQL database engine
+Version: 3.45.3
+Libs: -L${libdir} -lsqlite3
+Libs.private: -lz -lm -ldl -lpthread
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/tk.pc b/mplug_owl2/lib/pkgconfig/tk.pc
new file mode 100644
index 0000000000000000000000000000000000000000..9600639e8125bdb0835a39fda8f9881809b58fcd
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/tk.pc
@@ -0,0 +1,15 @@
+# tk pkg-config source file
+
+prefix=/root/envs/mplug_owl2
+exec_prefix=/root/envs/mplug_owl2
+libdir=/root/envs/mplug_owl2/lib
+includedir=${prefix}/include
+
+Name: The Tk Toolkit
+Description: Tk is a cross-platform graphical user interface toolkit, the standard GUI not only for Tcl, but for many other dynamic languages as well.
+URL: https://www.tcl-lang.org/
+Version: 8.6.14
+Requires: tcl >= 8.6
+Libs: -L${libdir} -ltk8.6 -ltkstub8.6
+Libs.private: -lX11
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/pkgconfig/uuid.pc b/mplug_owl2/lib/pkgconfig/uuid.pc
new file mode 100644
index 0000000000000000000000000000000000000000..3bdd16f7eb63c27738179ad4b833adacfa97f82d
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/uuid.pc
@@ -0,0 +1,11 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=/root/envs/mplug_owl2
+libdir=/root/envs/mplug_owl2/lib
+includedir=/root/envs/mplug_owl2/include
+
+Name: uuid
+Description: Universally unique id library
+Version: 2.32.1
+Requires:
+Cflags: -I${includedir}/uuid
+Libs: -L${libdir} -luuid
diff --git a/mplug_owl2/lib/pkgconfig/zlib.pc b/mplug_owl2/lib/pkgconfig/zlib.pc
new file mode 100644
index 0000000000000000000000000000000000000000..426b4fb536a90cb1ceb90096756a866d2591ea5b
--- /dev/null
+++ b/mplug_owl2/lib/pkgconfig/zlib.pc
@@ -0,0 +1,13 @@
+prefix=/root/envs/mplug_owl2
+exec_prefix=/root/envs/mplug_owl2
+libdir=/root/envs/mplug_owl2/lib
+sharedlibdir=/root/envs/mplug_owl2/lib
+includedir=/root/envs/mplug_owl2/include
+
+Name: zlib
+Description: zlib compression library
+Version: 1.2.13
+
+Requires:
+Libs: -L${libdir} -L${sharedlibdir} -lz
+Cflags: -I${includedir}
diff --git a/mplug_owl2/lib/tk8.6/demos/images/teapot.ppm b/mplug_owl2/lib/tk8.6/demos/images/teapot.ppm
new file mode 100644
index 0000000000000000000000000000000000000000..59307eb96ee451f7682034add2d2737673ef04c2
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/demos/images/teapot.ppm
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:786f29b88771e439187dd2e86ad4d255dd185e0c1ea3f8c37d21770fd1df253a
+size 196623
diff --git a/mplug_owl2/lib/tk8.6/images/README b/mplug_owl2/lib/tk8.6/images/README
new file mode 100644
index 0000000000000000000000000000000000000000..7b61d5a0bc0eb901b8e3b6e329f89ff8531adce2
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/images/README
@@ -0,0 +1,7 @@
+README - images directory
+
+This directory includes images for the Tcl Logo and the Tcl Powered
+Logo. Please feel free to use the Tcl Powered Logo on any of your
+products that employ the use of Tcl or Tk. The Tcl logo may also be
+used to promote Tcl in your product documentation, web site or other
+places you so desire.
diff --git a/mplug_owl2/lib/tk8.6/images/logo.eps b/mplug_owl2/lib/tk8.6/images/logo.eps
new file mode 100644
index 0000000000000000000000000000000000000000..0d05d3404bd092d449ac89245dc26d8b3e4f92b2
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/images/logo.eps
@@ -0,0 +1,2091 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: Adobe Illustrator(TM) 5.5
+%%For: (Bud Northern) (Mark Anderson Design)
+%%Title: (TCL/TK LOGO.ILLUS)
+%%CreationDate: (8/1/96) (4:58 PM)
+%%BoundingBox: 251 331 371 512
+%%HiResBoundingBox: 251.3386 331.5616 370.5213 511.775
+%%DocumentProcessColors: Cyan Magenta Yellow
+%%DocumentSuppliedResources: procset Adobe_level2_AI5 1.0 0
+%%+ procset Adobe_IllustratorA_AI5 1.0 0
+%AI5_FileFormat 1.2
+%AI3_ColorUsage: Color
+%%DocumentCustomColors: (TCL RED)
+%%CMYKCustomColor: 0 0.45 1 0 (Orange)
+%%+ 0 0.25 1 0 (Orange Yellow)
+%%+ 0 0.79 0.91 0 (TCL RED)
+%AI3_TemplateBox: 306 396 306 396
+%AI3_TileBox: 12 12 600 780
+%AI3_DocumentPreview: Macintosh_ColorPic
+%AI5_ArtSize: 612 792
+%AI5_RulerUnits: 0
+%AI5_ArtFlags: 1 0 0 1 0 0 1 1 0
+%AI5_TargetResolution: 800
+%AI5_NumLayers: 1
+%AI5_OpenToView: 90 576 2 938 673 18 1 1 2 40
+%AI5_OpenViewLayers: 7
+%%EndComments
+%%BeginProlog
+%%BeginResource: procset Adobe_level2_AI5 1.0 0
+%%Title: (Adobe Illustrator (R) Version 5.0 Level 2 Emulation)
+%%Version: 1.0
+%%CreationDate: (04/10/93) ()
+%%Copyright: ((C) 1987-1993 Adobe Systems Incorporated All Rights Reserved)
+userdict /Adobe_level2_AI5 21 dict dup begin
+ put
+ /packedarray where not
+ {
+ userdict begin
+ /packedarray
+ {
+ array astore readonly
+ } bind def
+ /setpacking /pop load def
+ /currentpacking false def
+ end
+ 0
+ } if
+ pop
+ userdict /defaultpacking currentpacking put true setpacking
+ /initialize
+ {
+ Adobe_level2_AI5 begin
+ } bind def
+ /terminate
+ {
+ currentdict Adobe_level2_AI5 eq
+ {
+ end
+ } if
+ } bind def
+ mark
+ /setcustomcolor where not
+ {
+ /findcmykcustomcolor
+ {
+ 5 packedarray
+ } bind def
+ /setcustomcolor
+ {
+ exch aload pop pop
+ 4
+ {
+ 4 index mul 4 1 roll
+ } repeat
+ 5 -1 roll pop
+ setcmykcolor
+ }
+ def
+ } if
+
+ /gt38? mark {version cvx exec} stopped {cleartomark true} {38 gt exch pop} ifelse def
+ userdict /deviceDPI 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt put
+ userdict /level2?
+ systemdict /languagelevel known dup
+ {
+ pop systemdict /languagelevel get 2 ge
+ } if
+ put
+ level2? not
+ {
+ /setcmykcolor where not
+ {
+ /setcmykcolor
+ {
+ exch .11 mul add exch .59 mul add exch .3 mul add
+ 1 exch sub setgray
+ } def
+ } if
+ /currentcmykcolor where not
+ {
+ /currentcmykcolor
+ {
+ 0 0 0 1 currentgray sub
+ } def
+ } if
+ /setoverprint where not
+ {
+ /setoverprint /pop load def
+ } if
+ /selectfont where not
+ {
+ /selectfont
+ {
+ exch findfont exch
+ dup type /arraytype eq
+ {
+ makefont
+ }
+ {
+ scalefont
+ } ifelse
+ setfont
+ } bind def
+ } if
+ /cshow where not
+ {
+ /cshow
+ {
+ [
+ 0 0 5 -1 roll aload pop
+ ] cvx bind forall
+ } bind def
+ } if
+ } if
+ cleartomark
+ /anyColor?
+ {
+ add add add 0 ne
+ } bind def
+ /testColor
+ {
+ gsave
+ setcmykcolor currentcmykcolor
+ grestore
+ } bind def
+ /testCMYKColorThrough
+ {
+ testColor anyColor?
+ } bind def
+ userdict /composite?
+ level2?
+ {
+ gsave 1 1 1 1 setcmykcolor currentcmykcolor grestore
+ add add add 4 eq
+ }
+ {
+ 1 0 0 0 testCMYKColorThrough
+ 0 1 0 0 testCMYKColorThrough
+ 0 0 1 0 testCMYKColorThrough
+ 0 0 0 1 testCMYKColorThrough
+ and and and
+ } ifelse
+ put
+ composite? not
+ {
+ userdict begin
+ gsave
+ /cyan? 1 0 0 0 testCMYKColorThrough def
+ /magenta? 0 1 0 0 testCMYKColorThrough def
+ /yellow? 0 0 1 0 testCMYKColorThrough def
+ /black? 0 0 0 1 testCMYKColorThrough def
+ grestore
+ /isCMYKSep? cyan? magenta? yellow? black? or or or def
+ /customColor? isCMYKSep? not def
+ end
+ } if
+ end defaultpacking setpacking
+%%EndResource
+%%BeginResource: procset Adobe_IllustratorA_AI5 1.1 0
+%%Title: (Adobe Illustrator (R) Version 5.0 Abbreviated Prolog)
+%%Version: 1.1
+%%CreationDate: (3/7/1994) ()
+%%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
+currentpacking true setpacking
+userdict /Adobe_IllustratorA_AI5_vars 70 dict dup begin
+put
+/_lp /none def
+/_pf
+{
+} def
+/_ps
+{
+} def
+/_psf
+{
+} def
+/_pss
+{
+} def
+/_pjsf
+{
+} def
+/_pjss
+{
+} def
+/_pola 0 def
+/_doClip 0 def
+/cf currentflat def
+/_tm matrix def
+/_renderStart
+[
+/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0
+] def
+/_renderEnd
+[
+null null null null /i1 /i1 /i1 /i1
+] def
+/_render -1 def
+/_rise 0 def
+/_ax 0 def
+/_ay 0 def
+/_cx 0 def
+/_cy 0 def
+/_leading
+[
+0 0
+] def
+/_ctm matrix def
+/_mtx matrix def
+/_sp 16#020 def
+/_hyphen (-) def
+/_fScl 0 def
+/_cnt 0 def
+/_hs 1 def
+/_nativeEncoding 0 def
+/_useNativeEncoding 0 def
+/_tempEncode 0 def
+/_pntr 0 def
+/_tDict 2 dict def
+/_wv 0 def
+/Tx
+{
+} def
+/Tj
+{
+} def
+/CRender
+{
+} def
+/_AI3_savepage
+{
+} def
+/_gf null def
+/_cf 4 array def
+/_if null def
+/_of false def
+/_fc
+{
+} def
+/_gs null def
+/_cs 4 array def
+/_is null def
+/_os false def
+/_sc
+{
+} def
+/discardSave null def
+/buffer 256 string def
+/beginString null def
+/endString null def
+/endStringLength null def
+/layerCnt 1 def
+/layerCount 1 def
+/perCent (%) 0 get def
+/perCentSeen? false def
+/newBuff null def
+/newBuffButFirst null def
+/newBuffLast null def
+/clipForward? false def
+end
+userdict /Adobe_IllustratorA_AI5 74 dict dup begin
+put
+/initialize
+{
+ Adobe_IllustratorA_AI5 dup begin
+ Adobe_IllustratorA_AI5_vars begin
+ discardDict
+ {
+ bind pop pop
+ } forall
+ dup /nc get begin
+ {
+ dup xcheck 1 index type /operatortype ne and
+ {
+ bind
+ } if
+ pop pop
+ } forall
+ end
+ newpath
+} def
+/terminate
+{
+ end
+ end
+} def
+/_
+null def
+/ddef
+{
+ Adobe_IllustratorA_AI5_vars 3 1 roll put
+} def
+/xput
+{
+ dup load dup length exch maxlength eq
+ {
+ dup dup load dup
+ length 2 mul dict copy def
+ } if
+ load begin
+ def
+ end
+} def
+/npop
+{
+ {
+ pop
+ } repeat
+} def
+/sw
+{
+ dup length exch stringwidth
+ exch 5 -1 roll 3 index mul add
+ 4 1 roll 3 1 roll mul add
+} def
+/swj
+{
+ dup 4 1 roll
+ dup length exch stringwidth
+ exch 5 -1 roll 3 index mul add
+ 4 1 roll 3 1 roll mul add
+ 6 2 roll /_cnt 0 ddef
+ {
+ 1 index eq
+ {
+ /_cnt _cnt 1 add ddef
+ } if
+ } forall
+ pop
+ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
+} def
+/ss
+{
+ 4 1 roll
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put pop
+ gsave
+ false charpath currentpoint
+ 4 index setmatrix
+ stroke
+ grestore
+ moveto
+ 2 copy rmoveto
+ } exch cshow
+ 3 npop
+} def
+/jss
+{
+ 4 1 roll
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put
+ gsave
+ _sp eq
+ {
+ exch 6 index 6 index 6 index 5 -1 roll widthshow
+ currentpoint
+ }
+ {
+ false charpath currentpoint
+ 4 index setmatrix stroke
+ } ifelse
+ grestore
+ moveto
+ 2 copy rmoveto
+ } exch cshow
+ 6 npop
+} def
+/sp
+{
+ {
+ 2 npop (0) exch
+ 2 copy 0 exch put pop
+ false charpath
+ 2 copy rmoveto
+ } exch cshow
+ 2 npop
+} def
+/jsp
+{
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put
+ _sp eq
+ {
+ exch 5 index 5 index 5 index 5 -1 roll widthshow
+ }
+ {
+ false charpath
+ } ifelse
+ 2 copy rmoveto
+ } exch cshow
+ 5 npop
+} def
+/pl
+{
+ transform
+ 0.25 sub round 0.25 add exch
+ 0.25 sub round 0.25 add exch
+ itransform
+} def
+/setstrokeadjust where
+{
+ pop true setstrokeadjust
+ /c
+ {
+ curveto
+ } def
+ /C
+ /c load def
+ /v
+ {
+ currentpoint 6 2 roll curveto
+ } def
+ /V
+ /v load def
+ /y
+ {
+ 2 copy curveto
+ } def
+ /Y
+ /y load def
+ /l
+ {
+ lineto
+ } def
+ /L
+ /l load def
+ /m
+ {
+ moveto
+ } def
+}
+{
+ /c
+ {
+ pl curveto
+ } def
+ /C
+ /c load def
+ /v
+ {
+ currentpoint 6 2 roll pl curveto
+ } def
+ /V
+ /v load def
+ /y
+ {
+ pl 2 copy curveto
+ } def
+ /Y
+ /y load def
+ /l
+ {
+ pl lineto
+ } def
+ /L
+ /l load def
+ /m
+ {
+ pl moveto
+ } def
+} ifelse
+/d
+{
+ setdash
+} def
+/cf
+{
+} def
+/i
+{
+ dup 0 eq
+ {
+ pop cf
+ } if
+ setflat
+} def
+/j
+{
+ setlinejoin
+} def
+/J
+{
+ setlinecap
+} def
+/M
+{
+ setmiterlimit
+} def
+/w
+{
+ setlinewidth
+} def
+/H
+{
+} def
+/h
+{
+ closepath
+} def
+/N
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ clip /_doClip 0 ddef
+ } if
+ newpath
+ }
+ {
+ /CRender
+ {
+ N
+ } ddef
+ } ifelse
+} def
+/n
+{
+ N
+} def
+/F
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ gsave _pf grestore clip newpath /_lp /none ddef _fc
+ /_doClip 0 ddef
+ }
+ {
+ _pf
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ F
+ } ddef
+ } ifelse
+} def
+/f
+{
+ closepath
+ F
+} def
+/S
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ gsave _ps grestore clip newpath /_lp /none ddef _sc
+ /_doClip 0 ddef
+ }
+ {
+ _ps
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ S
+ } ddef
+ } ifelse
+} def
+/s
+{
+ closepath
+ S
+} def
+/B
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ gsave F grestore
+ {
+ gsave S grestore clip newpath /_lp /none ddef _sc
+ /_doClip 0 ddef
+ }
+ {
+ S
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ B
+ } ddef
+ } ifelse
+} def
+/b
+{
+ closepath
+ B
+} def
+/W
+{
+ /_doClip 1 ddef
+} def
+/*
+{
+ count 0 ne
+ {
+ dup type /stringtype eq
+ {
+ pop
+ } if
+ } if
+ newpath
+} def
+/u
+{
+} def
+/U
+{
+} def
+/q
+{
+ _pola 0 eq
+ {
+ gsave
+ } if
+} def
+/Q
+{
+ _pola 0 eq
+ {
+ grestore
+ } if
+} def
+/*u
+{
+ _pola 1 add /_pola exch ddef
+} def
+/*U
+{
+ _pola 1 sub /_pola exch ddef
+ _pola 0 eq
+ {
+ CRender
+ } if
+} def
+/D
+{
+ pop
+} def
+/*w
+{
+} def
+/*W
+{
+} def
+/`
+{
+ /_i save ddef
+ clipForward?
+ {
+ nulldevice
+ } if
+ 6 1 roll 4 npop
+ concat pop
+ userdict begin
+ /showpage
+ {
+ } def
+ 0 setgray
+ 0 setlinecap
+ 1 setlinewidth
+ 0 setlinejoin
+ 10 setmiterlimit
+ [] 0 setdash
+ /setstrokeadjust where {pop false setstrokeadjust} if
+ newpath
+ 0 setgray
+ false setoverprint
+} def
+/~
+{
+ end
+ _i restore
+} def
+/O
+{
+ 0 ne
+ /_of exch ddef
+ /_lp /none ddef
+} def
+/R
+{
+ 0 ne
+ /_os exch ddef
+ /_lp /none ddef
+} def
+/g
+{
+ /_gf exch ddef
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _gf setgray
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/G
+{
+ /_gs exch ddef
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _gs setgray
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/k
+{
+ _cf astore pop
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _cf aload pop setcmykcolor
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/K
+{
+ _cs astore pop
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _cs aload pop setcmykcolor
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/x
+{
+ /_gf exch ddef
+ findcmykcustomcolor
+ /_if exch ddef
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _if _gf 1 exch sub setcustomcolor
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/X
+{
+ /_gs exch ddef
+ findcmykcustomcolor
+ /_is exch ddef
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _is _gs 1 exch sub setcustomcolor
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/A
+{
+ pop
+} def
+/annotatepage
+{
+userdict /annotatepage 2 copy known {get exec} {pop pop} ifelse
+} def
+/discard
+{
+ save /discardSave exch store
+ discardDict begin
+ /endString exch store
+ gt38?
+ {
+ 2 add
+ } if
+ load
+ stopped
+ pop
+ end
+ discardSave restore
+} bind def
+userdict /discardDict 7 dict dup begin
+put
+/pre38Initialize
+{
+ /endStringLength endString length store
+ /newBuff buffer 0 endStringLength getinterval store
+ /newBuffButFirst newBuff 1 endStringLength 1 sub getinterval store
+ /newBuffLast newBuff endStringLength 1 sub 1 getinterval store
+} def
+/shiftBuffer
+{
+ newBuff 0 newBuffButFirst putinterval
+ newBuffLast 0
+ currentfile read not
+ {
+ stop
+ } if
+ put
+} def
+0
+{
+ pre38Initialize
+ mark
+ currentfile newBuff readstring exch pop
+ {
+ {
+ newBuff endString eq
+ {
+ cleartomark stop
+ } if
+ shiftBuffer
+ } loop
+ }
+ {
+ stop
+ } ifelse
+} def
+1
+{
+ pre38Initialize
+ /beginString exch store
+ mark
+ currentfile newBuff readstring exch pop
+ {
+ {
+ newBuff beginString eq
+ {
+ /layerCount dup load 1 add store
+ }
+ {
+ newBuff endString eq
+ {
+ /layerCount dup load 1 sub store
+ layerCount 0 eq
+ {
+ cleartomark stop
+ } if
+ } if
+ } ifelse
+ shiftBuffer
+ } loop
+ }
+ {
+ stop
+ } ifelse
+} def
+2
+{
+ mark
+ {
+ currentfile buffer readline not
+ {
+ stop
+ } if
+ endString eq
+ {
+ cleartomark stop
+ } if
+ } loop
+} def
+3
+{
+ /beginString exch store
+ /layerCnt 1 store
+ mark
+ {
+ currentfile buffer readline not
+ {
+ stop
+ } if
+ dup beginString eq
+ {
+ pop /layerCnt dup load 1 add store
+ }
+ {
+ endString eq
+ {
+ layerCnt 1 eq
+ {
+ cleartomark stop
+ }
+ {
+ /layerCnt dup load 1 sub store
+ } ifelse
+ } if
+ } ifelse
+ } loop
+} def
+end
+userdict /clipRenderOff 15 dict dup begin
+put
+{
+ /n /N /s /S /f /F /b /B
+}
+{
+ {
+ _doClip 1 eq
+ {
+ /_doClip 0 ddef clip
+ } if
+ newpath
+ } def
+} forall
+/Tr /pop load def
+/Bb {} def
+/BB /pop load def
+/Bg {12 npop} def
+/Bm {6 npop} def
+/Bc /Bm load def
+/Bh {4 npop} def
+end
+/Lb
+{
+ 4 npop
+ 6 1 roll
+ pop
+ 4 1 roll
+ pop pop pop
+ 0 eq
+ {
+ 0 eq
+ {
+ (%AI5_BeginLayer) 1 (%AI5_EndLayer--) discard
+ }
+ {
+ /clipForward? true def
+
+ /Tx /pop load def
+ /Tj /pop load def
+ currentdict end clipRenderOff begin begin
+ } ifelse
+ }
+ {
+ 0 eq
+ {
+ save /discardSave exch store
+ } if
+ } ifelse
+} bind def
+/LB
+{
+ discardSave dup null ne
+ {
+ restore
+ }
+ {
+ pop
+ clipForward?
+ {
+ currentdict
+ end
+ end
+ begin
+
+ /clipForward? false ddef
+ } if
+ } ifelse
+} bind def
+/Pb
+{
+ pop pop
+ 0 (%AI5_EndPalette) discard
+} bind def
+/Np
+{
+ 0 (%AI5_End_NonPrinting--) discard
+} bind def
+/Ln /pop load def
+/Ap
+/pop load def
+/Ar
+{
+ 72 exch div
+ 0 dtransform dup mul exch dup mul add sqrt
+ dup 1 lt
+ {
+ pop 1
+ } if
+ setflat
+} def
+/Mb
+{
+ q
+} def
+/Md
+{
+} def
+/MB
+{
+ Q
+} def
+/nc 3 dict def
+nc begin
+/setgray
+{
+ pop
+} bind def
+/setcmykcolor
+{
+ 4 npop
+} bind def
+/setcustomcolor
+{
+ 2 npop
+} bind def
+currentdict readonly pop
+end
+currentdict readonly pop
+end
+setpacking
+%%EndResource
+%%EndProlog
+%%BeginSetup
+Adobe_level2_AI5 /initialize get exec
+Adobe_IllustratorA_AI5 /initialize get exec
+%AI5_Begin_NonPrinting
+Np
+%AI3_BeginPattern: (Yellow Stripe)
+(Yellow Stripe) 8.4499 4.6 80.4499 76.6 [
+%AI3_Tile
+(0 O 0 R 0 0.4 1 0 k 0 0.4 1 0 K) @
+(
+800 Ar
+0 J 0 j 3.6 w 4 M []0 d
+%AI3_Note:
+0 D
+8.1999 8.1999 m
+80.6999 8.1999 L
+S
+8.1999 22.6 m
+80.6999 22.6 L
+S
+8.1999 37.0001 m
+80.6999 37.0001 L
+S
+8.1999 51.3999 m
+80.6999 51.3999 L
+S
+8.1999 65.8 m
+80.6999 65.8 L
+S
+8.1999 15.3999 m
+80.6999 15.3999 L
+S
+8.1999 29.8 m
+80.6999 29.8 L
+S
+8.1999 44.1999 m
+80.6999 44.1999 L
+S
+8.1999 58.6 m
+80.6999 58.6 L
+S
+8.1999 73.0001 m
+80.6999 73.0001 L
+S
+) &
+] E
+%AI3_EndPattern
+%AI5_End_NonPrinting--
+%AI5_Begin_NonPrinting
+Np
+3 Bn
+%AI5_BeginGradient: (Black & White)
+(Black & White) 0 2 Bd
+[
+<
+FFFEFDFCFBFAF9F8F7F6F5F4F3F2F1F0EFEEEDECEBEAE9E8E7E6E5E4E3E2E1E0DFDEDDDCDBDAD9D8
+D7D6D5D4D3D2D1D0CFCECDCCCBCAC9C8C7C6C5C4C3C2C1C0BFBEBDBCBBBAB9B8B7B6B5B4B3B2B1B0
+AFAEADACABAAA9A8A7A6A5A4A3A2A1A09F9E9D9C9B9A999897969594939291908F8E8D8C8B8A8988
+87868584838281807F7E7D7C7B7A797877767574737271706F6E6D6C6B6A69686766656463626160
+5F5E5D5C5B5A595857565554535251504F4E4D4C4B4A494847464544434241403F3E3D3C3B3A3938
+37363534333231302F2E2D2C2B2A292827262524232221201F1E1D1C1B1A19181716151413121110
+0F0E0D0C0B0A09080706050403020100
+>
+0 %_Br
+[
+0 0 50 100 %_Bs
+1 0 50 0 %_Bs
+BD
+%AI5_EndGradient
+%AI5_BeginGradient: (Red & Yellow)
+(Red & Yellow) 0 2 Bd
+[
+0
+<
+000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627
+28292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F
+505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F7071727374757677
+78797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9F
+A0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7
+C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF
+F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF
+>
+<
+FFFFFEFEFDFDFDFCFCFBFBFBFAFAF9F9F9F8F8F7F7F7F6F6F5F5F5F4F4F3F3F3F2F2F1F1F1F0F0EF
+EFEFEEEEEDEDEDECECEBEBEBEAEAE9E9E9E8E8E7E7E7E6E6E5E5E5E4E4E3E3E3E2E2E1E1E1E0E0DF
+DFDFDEDEDDDDDDDCDCDBDBDBDADAD9D9D9D8D8D7D7D7D6D6D5D5D5D4D4D3D3D3D2D2D1D1D1D0D0CF
+CFCFCECECDCDCDCCCCCBCBCBCACAC9C9C9C8C8C7C7C7C6C6C5C5C5C4C4C3C3C3C2C2C1C1C1C0C0BF
+BFBFBEBEBDBDBDBCBCBBBBBBBABAB9B9B9B8B8B7B7B7B6B6B5B5B5B4B4B3B3B3B2B2B1B1B1B0B0AF
+AFAFAEAEADADADACACABABABAAAAA9A9A9A8A8A7A7A7A6A6A5A5A5A4A4A3A3A3A2A2A1A1A1A0A09F
+9F9F9E9E9D9D9D9C9C9B9B9B9A9A9999
+>
+0
+1 %_Br
+[
+0 1 0.6 0 1 50 100 %_Bs
+0 0 1 0 1 50 0 %_Bs
+BD
+%AI5_EndGradient
+%AI5_BeginGradient: (Yellow & Blue Radial)
+(Yellow & Blue Radial) 1 2 Bd
+[
+<
+000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627
+28292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F
+505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F7071727374757677
+78797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9F
+A0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7
+C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF
+F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF
+>
+<
+1415161718191A1B1C1D1E1F1F202122232425262728292A2A2B2C2D2E2F30313233343536363738
+393A3B3C3D3E3F40414142434445464748494A4B4C4D4D4E4F50515253545556575858595A5B5C5D
+5E5F60616263646465666768696A6B6C6D6E6F6F707172737475767778797A7B7B7C7D7E7F808182
+83848586868788898A8B8C8D8E8F90919292939495969798999A9B9C9D9D9E9FA0A1A2A3A4A5A6A7
+A8A9A9AAABACADAEAFB0B1B2B3B4B4B5B6B7B8B9BABBBCBDBEBFC0C0C1C2C3C4C5C6C7C8C9CACBCB
+CCCDCECFD0D1D2D3D4D5D6D7D7D8D9DADBDCDDDEDFE0E1E2E2E3E4E5E6E7E8E9EAEBECEDEEEEEFF0
+F1F2F3F4F5F6F7F8F9F9FAFBFCFDFEFF
+>
+<
+ABAAAAA9A8A7A7A6A5A5A4A3A3A2A1A1A09F9F9E9D9D9C9B9B9A9999989797969595949393929191
+908F8F8E8D8D8C8B8B8A8989888787868585848383828181807F7F7E7D7D7C7B7B7A797978777776
+7575747373727171706F6F6E6D6D6C6B6B6A6969686767666565646362626160605F5E5E5D5C5C5B
+5A5A5958585756565554545352525150504F4E4E4D4C4C4B4A4A4948484746464544444342424140
+403F3E3E3D3C3C3B3A3A3938383736363534343332323130302F2E2E2D2C2C2B2A2A292828272626
+25242423222121201F1F1E1D1D1C1B1B1A1919181717161515141313121111100F0F0E0D0D0C0B0B
+0A090908070706050504030302010100
+>
+0
+1 %_Br
+[
+0 0.08 0.67 0 1 50 14 %_Bs
+1 1 0 0 1 50 100 %_Bs
+BD
+%AI5_EndGradient
+%AI5_End_NonPrinting--
+%AI5_BeginPalette
+144 170 Pb
+Pn
+Pc
+1 g
+Pc
+0 g
+Pc
+0 0 0 0 k
+Pc
+0.75 g
+Pc
+0.5 g
+Pc
+0.25 g
+Pc
+0 g
+Pc
+Bb
+2 (Black & White) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0.25 0 0 0 k
+Pc
+0.5 0 0 0 k
+Pc
+0.75 0 0 0 k
+Pc
+1 0 0 0 k
+Pc
+0.25 0.25 0 0 k
+Pc
+0.5 0.5 0 0 k
+Pc
+0.75 0.75 0 0 k
+Pc
+1 1 0 0 k
+Pc
+Bb
+2 (Red & Yellow) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0 0.25 0 0 k
+Pc
+0 0.5 0 0 k
+Pc
+0 0.75 0 0 k
+Pc
+0 1 0 0 k
+Pc
+0 0.25 0.25 0 k
+Pc
+0 0.5 0.5 0 k
+Pc
+0 0.75 0.75 0 k
+Pc
+0 1 1 0 k
+Pc
+Bb
+0 0 0 0 Bh
+2 (Yellow & Blue Radial) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0 0 0.25 0 k
+Pc
+0 0 0.5 0 k
+Pc
+0 0 0.75 0 k
+Pc
+0 0 1 0 k
+Pc
+0.25 0 0.25 0 k
+Pc
+0.5 0 0.5 0 k
+Pc
+0.75 0 0.75 0 k
+Pc
+1 0 1 0 k
+Pc
+(Yellow Stripe) 0 0 1 1 0 0 0 0 0 [1 0 0 1 0 0] p
+Pc
+0.25 0.125 0 0 k
+Pc
+0.5 0.25 0 0 k
+Pc
+0.75 0.375 0 0 k
+Pc
+1 0.5 0 0 k
+Pc
+0.125 0.25 0 0 k
+Pc
+0.25 0.5 0 0 k
+Pc
+0.375 0.75 0 0 k
+Pc
+0.5 1 0 0 k
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0 0.25 0.125 0 k
+Pc
+0 0.5 0.25 0 k
+Pc
+0 0.75 0.375 0 k
+Pc
+0 1 0.5 0 k
+Pc
+0 0.125 0.25 0 k
+Pc
+0 0.25 0.5 0 k
+Pc
+0 0.375 0.75 0 k
+Pc
+0 0.5 1 0 k
+Pc
+0 0.79 0.91 0 (TCL RED) 0 x
+Pc
+0.125 0 0.25 0 k
+Pc
+0.25 0 0.5 0 k
+Pc
+0.375 0 0.75 0 k
+Pc
+0.5 0 1 0 k
+Pc
+0.25 0 0.125 0 k
+Pc
+0.5 0 0.25 0 k
+Pc
+0.75 0 0.375 0 k
+Pc
+1 0 0.5 0 k
+Pc
+0.5 1 0 0 k
+Pc
+0.25 0.125 0.125 0 k
+Pc
+0.5 0.25 0.25 0 k
+Pc
+0.75 0.375 0.375 0 k
+Pc
+1 0.5 0.5 0 k
+Pc
+0.25 0.25 0.125 0 k
+Pc
+0.5 0.5 0.25 0 k
+Pc
+0.75 0.75 0.375 0 k
+Pc
+1 1 0.5 0 k
+Pc
+0 1 0.5 0 k
+Pc
+0.125 0.25 0.125 0 k
+Pc
+0.25 0.5 0.25 0 k
+Pc
+0.375 0.75 0.375 0 k
+Pc
+0.5 1 0.5 0 k
+Pc
+0.125 0.25 0.25 0 k
+Pc
+0.25 0.5 0.5 0 k
+Pc
+0.375 0.75 0.75 0 k
+Pc
+0.5 1 1 0 k
+Pc
+0.75 0.75 0.375 0 k
+Pc
+0.125 0.125 0.25 0 k
+Pc
+0.25 0.25 0.5 0 k
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0.5 0.5 1 0 k
+Pc
+0.25 0.125 0.25 0 k
+Pc
+0.5 0.25 0.5 0 k
+Pc
+0.75 0.375 0.75 0 k
+Pc
+1 0.5 1 0 k
+Pc
+0 0.79 0.91 0 (TCL RED) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0.5 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.25 1 0 (Orange Yellow) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 1 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.45 1 0 (Orange) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.79 0.91 0 (TCL RED) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0.65 0 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0 1 0 k
+Pc
+PB
+%AI5_EndPalette
+%%EndSetup
+%AI5_BeginLayer
+1 1 1 1 0 0 0 79 128 255 Lb
+(Layer 1) Ln
+0 A
+u
+1 Ap
+0 O
+0 0.79 0.91 0 (TCL RED) 0 x
+800 Ar
+0 J 0 j 1.25 w 4 M []0 d
+%AI3_Note:
+0 D
+294.5207 335.3041 m
+368.2181 333.001 L
+363.6121 423.9713 L
+370.5213 507.1689 L
+336.5513 505.4417 L
+320.7179 511.775 L
+251.3386 508.0325 L
+254.7931 425.9866 L
+251.3386 331.5616 L
+294.5207 335.3041 L
+f
+u
+0 Ap
+1 0.65 0 0 k
+1 w
+318.1366 400.9627 m
+311.8663 399.2526 l
+315.2864 407.5177 l
+318.7064 430.6032 l
+314.4314 431.4581 l
+319.5616 438.5832 l
+325.9526 462.6014 l
+314.7164 460.2436 l
+320.6412 471.0911 326.9284 478.1557 v
+318.7064 484.469 l
+292.2183 472.8011 299.3434 434.8954 v
+293.8679 435.8542 l
+299.1189 396.1175 l
+294.6797 394.9775 l
+299.2277 385.6974 305.5963 381.2973 v
+306.1744 380.8979 297.6162 412.3629 306.7363 443.7133 c
+307.5914 441.7183 l
+300.3238 408.3015 307.5914 381.2973 v
+307.9261 380.656 311.5598 381.0836 v
+318.1366 393.4813 318.1366 400.9627 v
+f
+u
+*u
+1 g
+271.4311 372.5074 m
+272.7184 372.5074 L
+272.7184 375.1913 L
+273.2858 375.1913 273.8313 375.1913 274.3768 375.2786 c
+274.3768 372.5074 L
+276.2969 372.5074 L
+276.2969 372.0056 L
+274.3768 372.0056 L
+274.3768 365.3286 L
+274.3768 364.9359 274.3768 364.3467 275.2059 364.3467 c
+275.7733 364.3467 276.0787 364.7395 276.4279 365.1541 c
+276.777 364.9141 L
+276.3624 364.0849 275.2932 363.583 274.4204 363.583 c
+272.8494 363.583 272.6748 364.434 272.6748 365.4814 c
+272.6748 372.0056 L
+271.4311 372.0056 L
+271.4311 372.5074 l
+f
+*U
+*u
+290.5617 366.5724 m
+290.0598 365.0232 289.187 363.6703 286.9178 363.583 c
+283.5356 363.583 282.5101 366.3978 282.5101 367.9034 c
+282.5101 371.7874 285.6304 372.7256 286.8741 372.7256 c
+288.2924 372.7256 290.2999 372.071 290.2999 370.3909 c
+290.2999 369.8018 289.9289 369.2344 289.318 369.2344 c
+288.7288 369.2344 288.2924 369.6272 288.2924 370.26 c
+288.2924 371.111 288.9907 371.2201 288.9907 371.4601 c
+288.9907 372.0492 287.616 372.2892 287.136 372.2892 c
+285.0412 372.2892 284.4957 370.7618 284.4957 367.9034 c
+284.4957 366.5942 284.823 365.5905 284.9539 365.285 c
+285.2812 364.5649 285.9577 364.1067 287.0923 364.0413 c
+288.3579 363.9758 289.5798 365.0013 290.1035 366.5724 C
+290.5617 366.5724 l
+f
+*U
+*u
+296.6 363.8667 m
+296.6 364.3686 L
+298.2802 364.3686 L
+298.2802 378.3989 L
+296.6 378.3989 L
+296.6 378.9007 L
+297.5383 378.9007 L
+298.3457 378.9007 299.1966 378.9444 299.9822 379.0971 c
+299.9822 364.3686 L
+301.6623 364.3686 L
+301.6623 363.8667 L
+296.6 363.8667 l
+f
+*U
+*u
+317.4527 372.5074 m
+318.7401 372.5074 L
+318.7401 375.1913 L
+319.3074 375.1913 319.8529 375.1913 320.3984 375.2786 c
+320.3984 372.5074 L
+322.3186 372.5074 L
+322.3186 372.0056 L
+320.3984 372.0056 L
+320.3984 365.3286 L
+320.3984 364.9359 320.3984 364.3467 321.2276 364.3467 c
+321.7949 364.3467 322.1004 364.7395 322.4495 365.1541 c
+322.7986 364.9141 L
+322.384 364.0849 321.3148 363.583 320.442 363.583 c
+318.871 363.583 318.6964 364.434 318.6964 365.4814 c
+318.6964 372.0056 L
+317.4527 372.0056 L
+317.4527 372.5074 l
+f
+*U
+*u
+333.7467 372.0056 m
+333.7467 372.5074 L
+337.3252 372.5074 L
+337.3252 372.0056 L
+335.9942 372.0056 L
+332.983 369.3872 L
+337.1288 364.3686 L
+338.0453 364.3686 L
+338.0453 363.8667 L
+333.8995 363.8667 L
+333.8995 364.3686 L
+334.9905 364.3686 L
+331.3465 368.798 L
+335.0341 371.9401 L
+335.0341 372.0056 L
+333.7467 372.0056 l
+f
+328.4881 363.8667 m
+328.4881 364.3686 L
+329.6227 364.3686 L
+329.6227 378.3989 L
+328.4881 378.3989 L
+328.4881 378.9007 L
+328.8809 378.9007 L
+329.6882 378.9007 330.5392 378.9444 331.3247 379.0971 c
+331.3247 364.3686 L
+332.6339 364.3686 L
+332.6339 363.8667 L
+328.4881 363.8667 l
+f
+*U
+u
+309.5341 446.5364 m
+305.6878 429.3874 306.7947 401.5837 v
+307.1266 393.2441 308.0387 385.5779 309.1527 378.9301 C
+309.1587 378.9297 L
+309.8832 373.0923 310.3679 370.9791 312.2568 363.9454 C
+312.1466 359.4091 L
+297.0216 407.7015 309.5341 446.5364 V
+f
+318.8187 461.4058 m
+322.2203 463.1 327.0966 463.7165 v
+332.427 453.9463 319.3087 437.2655 v
+327.1346 454.735 325.2889 460.2079 v
+323.225 461.4903 318.8187 461.4058 v
+f
+317.2065 432.0795 m
+320.2613 431.3723 321.7279 432.5601 v
+318.8383 421.2839 319.5958 415.0813 v
+320.3533 408.8787 314.8881 404.9079 y
+319.5435 410.7982 318.0802 415.5959 v
+317.0657 418.9214 318.2006 427.4326 319.4809 430.1349 c
+318.2853 430.3025 317.2065 432.0795 v
+f
+314.1861 402.3703 m
+319.2343 402.9744 319.7646 405.5244 v
+320.3824 390.2725 313.3689 383.9873 v
+318.7204 392.3347 317.8807 400.9697 v
+314.1861 402.3703 l
+f
+299.9864 396.0219 m
+298.3586 394.1986 293.4739 398.2203 v
+295.0301 387.9694 304.6978 383.2767 v
+298.0444 388.2897 296.2519 393.7045 v
+298.6029 394.3966 299.9864 396.0219 v
+f
+298.4281 399.9096 m
+291.8229 416.6749 293.2382 439.3286 v
+294.7808 435.2261 299.738 433.7875 v
+297.4026 433.3101 296.0372 433.517 v
+292.5816 423.9535 298.4281 399.9096 v
+f
+326.1736 477.812 m
+323.6983 496.0028 308.2122 477.6066 v
+295.8813 462.9582 297.3508 450.5217 298.1072 443.5831 c
+298.3007 441.8079 295.8131 462.1138 309.3231 475.4768 c
+322.8328 488.8398 325.8846 478.5879 326.1736 477.812 c
+f
+U
+0 0 1 0 k
+303.3623 493.3274 m
+291.211 496.7978 287.3437 456.5222 v
+284.3599 468.9535 292.0777 486.5353 v
+299.7955 504.1172 303.3623 493.3274 y
+f
+288.2873 496.2718 m
+282.0897 486.9502 283.4958 477.0213 v
+278.7953 495.712 288.2873 496.2718 v
+f
+333.8987 470.1328 m
+341.2276 472.8361 330.7334 445.5571 v
+336.1654 453.5292 339.5844 466.0531 v
+341.7789 474.0903 333.8987 470.1328 y
+f
+345.752 472.2583 m
+350.9334 467.5681 347.2615 461.3636 v
+356.4779 471.0481 345.752 472.2583 v
+f
+U
+*u
+273.1765 354.3318 m
+273.1765 353.7507 273.1305 353.2908 272.5159 353.2908 c
+271.8846 353.2908 271.8554 353.7674 271.8554 354.3318 c
+271.8554 356.485 L
+272.148 356.485 L
+272.148 354.3486 L
+272.148 353.8259 272.1773 353.5751 272.5159 353.5751 c
+272.8504 353.5751 272.8839 353.8259 272.8839 354.3486 c
+272.8839 356.485 L
+273.1765 356.485 L
+273.1765 354.3318 l
+f
+*U
+*u
+277.1612 356.485 m
+276.9062 356.485 L
+276.9062 354.3862 l
+276.9062 354.2482 276.9271 354.1061 276.9355 353.9681 C
+276.9229 353.9681 l
+276.8937 354.0768 276.8644 354.1855 276.8268 354.2942 C
+276.1035 356.485 L
+275.8484 356.485 L
+275.8484 353.3326 L
+276.1035 353.3326 L
+276.1035 355.2474 l
+276.1035 355.4523 276.0826 355.653 276.07 355.8579 C
+276.0867 355.8579 l
+276.1244 355.7241 276.1495 355.5819 276.1954 355.4523 C
+276.9062 353.3326 L
+277.1612 353.3326 l
+277.1612 356.485 L
+f
+*U
+*u
+280.1421 353.3326 m
+279.8494 353.3326 L
+279.8494 356.485 L
+280.1421 356.485 L
+280.1421 353.3326 l
+f
+*U
+*u
+283.5141 353.3326 m
+283.2549 353.3326 L
+282.6194 356.485 L
+282.9205 356.485 L
+283.3344 354.1897 L
+283.3511 354.1102 283.3678 353.9054 283.3845 353.7632 c
+283.4013 353.7632 L
+283.4138 353.9054 283.4305 354.1144 283.4431 354.1897 c
+283.8528 356.485 L
+284.1496 356.485 L
+283.5141 353.3326 l
+f
+*U
+*u
+287.6238 356.2174 m
+286.9256 356.2174 L
+286.9256 355.1053 L
+287.6029 355.1053 L
+287.6029 354.8377 L
+286.9256 354.8377 L
+286.9256 353.6002 L
+287.6238 353.6002 L
+287.6238 353.3326 L
+286.6329 353.3326 L
+286.6329 356.485 L
+287.6238 356.485 L
+287.6238 356.2174 l
+f
+*U
+*u
+290.2278 353.3326 m
+290.2278 356.485 L
+290.5414 356.485 L
+290.9804 356.485 291.4026 356.4515 291.4026 355.6823 c
+291.4026 355.2809 291.3148 354.8879 290.8089 354.8712 c
+291.5072 353.3326 L
+291.1978 353.3326 L
+290.5288 354.8753 L
+290.5205 354.8753 L
+290.5205 353.3326 L
+290.2278 353.3326 l
+f
+290.5205 355.1137 m
+290.625 355.1137 L
+291.0347 355.1137 291.1016 355.2558 291.1016 355.6697 c
+291.1016 356.1672 290.9511 356.2174 290.579 356.2174 c
+290.5205 356.2174 L
+290.5205 355.1137 l
+f
+*U
+*u
+295.0981 355.9875 m
+294.9727 356.1296 294.8347 356.2425 294.634 356.2425 c
+294.3414 356.2425 294.1783 356 294.1783 355.7324 c
+294.1783 355.3645 294.4459 355.1931 294.7176 355.0091 c
+294.9852 354.821 295.2528 354.6203 295.2528 354.1855 c
+295.2528 353.7256 294.9559 353.2908 294.4626 353.2908 c
+294.287 353.2908 294.1072 353.341 293.9651 353.4497 c
+293.9651 353.8301 L
+294.0989 353.688 294.2745 353.5751 294.4751 353.5751 c
+294.7845 353.5751 294.9559 353.8468 294.9518 354.1311 c
+294.9559 354.4991 294.6842 354.6621 294.4166 354.8503 c
+294.149 355.0342 293.8773 355.2391 293.8773 355.6906 c
+293.8773 356.1129 294.1365 356.5268 294.6006 356.5268 c
+294.7887 356.5268 294.9476 356.4641 295.0981 356.3596 C
+295.0981 355.9875 l
+f
+*U
+*u
+299.0865 353.3326 m
+298.773 353.3326 L
+298.6559 353.9806 L
+297.9869 353.9806 L
+297.8741 353.3326 L
+297.5605 353.3326 L
+298.1793 356.485 L
+298.4552 356.485 L
+299.0865 353.3326 l
+f
+298.6099 354.2357 m
+298.4009 355.444 L
+298.3632 355.6572 298.3465 355.8746 298.3214 356.0878 c
+298.3047 356.0878 L
+298.2754 355.8746 298.2545 355.6572 298.2211 355.444 c
+298.0371 354.2357 L
+298.6099 354.2357 l
+f
+*U
+*u
+301.8124 353.6002 m
+302.4981 353.6002 L
+302.4981 353.3326 L
+301.5198 353.3326 L
+301.5198 356.485 L
+301.8124 356.485 L
+301.8124 353.6002 l
+f
+*U
+*u
+309.0754 355.9875 m
+308.95 356.1296 308.812 356.2425 308.6114 356.2425 c
+308.3187 356.2425 308.1556 356 308.1556 355.7324 c
+308.1556 355.3645 308.4232 355.1931 308.695 355.0091 c
+308.9626 354.821 309.2301 354.6203 309.2301 354.1855 c
+309.2301 353.7256 308.9333 353.2908 308.4399 353.2908 c
+308.2643 353.2908 308.0846 353.341 307.9424 353.4497 c
+307.9424 353.8301 L
+308.0762 353.688 308.2518 353.5751 308.4525 353.5751 c
+308.7619 353.5751 308.9333 353.8468 308.9291 354.1311 c
+308.9333 354.4991 308.6615 354.6621 308.3939 354.8503 c
+308.1264 355.0342 307.8546 355.2391 307.8546 355.6906 c
+307.8546 356.1129 308.1138 356.5268 308.5779 356.5268 c
+308.766 356.5268 308.9249 356.4641 309.0754 356.3596 C
+309.0754 355.9875 l
+f
+*U
+*u
+312.9468 353.7172 m
+312.8339 353.6378 312.7001 353.5751 312.558 353.5751 c
+311.9977 353.5751 311.9977 354.5492 311.9977 354.9172 c
+311.9977 355.5025 312.0688 356.2425 312.5789 356.2425 c
+312.7252 356.2425 312.8297 356.184 312.9468 356.1045 C
+312.9468 356.4265 l
+312.8506 356.4975 312.6918 356.5268 312.5747 356.5268 c
+311.7134 356.5268 311.6967 355.306 311.6967 354.7959 c
+311.6967 354.2566 311.8054 353.2908 312.5454 353.2908 c
+312.6834 353.2908 312.8381 353.3451 312.9468 353.4204 c
+312.9468 353.7172 L
+f
+*U
+*u
+315.5053 353.3326 m
+315.5053 356.485 L
+315.8188 356.485 L
+316.2578 356.485 316.6801 356.4515 316.6801 355.6823 c
+316.6801 355.2809 316.5923 354.8879 316.0864 354.8712 c
+316.7846 353.3326 L
+316.4752 353.3326 L
+315.8063 354.8753 L
+315.7979 354.8753 L
+315.7979 353.3326 L
+315.5053 353.3326 l
+f
+315.7979 355.1137 m
+315.9025 355.1137 L
+316.3122 355.1137 316.3791 355.2558 316.3791 355.6697 c
+316.3791 356.1672 316.2286 356.2174 315.8565 356.2174 c
+315.7979 356.2174 L
+315.7979 355.1137 l
+f
+*U
+*u
+319.5728 353.3326 m
+319.2802 353.3326 L
+319.2802 356.485 L
+319.5728 356.485 L
+319.5728 353.3326 l
+f
+*U
+*u
+322.2551 353.3326 m
+322.2551 356.485 L
+322.5812 356.485 L
+323.0327 356.485 323.4341 356.4432 323.4341 355.6655 c
+323.4341 355.0551 323.2209 354.8419 322.623 354.8419 c
+322.5477 354.8419 L
+322.5477 353.3326 L
+322.2551 353.3326 l
+f
+322.5477 355.1095 m
+322.6606 355.1095 L
+323.0703 355.1095 323.1205 355.26 323.1331 355.6655 c
+323.1331 356.1004 323.016 356.2174 322.6063 356.2174 c
+322.5477 356.2174 L
+322.5477 355.1095 l
+f
+*U
+*u
+326.9539 356.485 m
+325.7164 356.485 L
+325.7164 356.2174 L
+326.1888 356.2174 L
+326.1888 353.3326 L
+326.4815 353.3326 L
+326.4815 356.2174 L
+326.9539 356.2174 l
+326.9539 356.485 L
+f
+*U
+*u
+329.7077 353.3326 m
+329.4151 353.3326 L
+329.4151 356.485 L
+329.7077 356.485 L
+329.7077 353.3326 l
+f
+*U
+*u
+333.7028 353.3326 m
+333.4477 353.3326 L
+332.737 355.4523 L
+332.691 355.5819 332.6659 355.7241 332.6283 355.8579 c
+332.6116 355.8579 L
+332.6241 355.653 332.645 355.4523 332.645 355.2474 c
+332.645 353.3326 L
+332.39 353.3326 L
+332.39 356.485 L
+332.645 356.485 L
+333.3683 354.2942 L
+333.4059 354.1855 333.4352 354.0768 333.4645 353.9681 c
+333.477 353.9681 L
+333.4686 354.1061 333.4477 354.2482 333.4477 354.3862 c
+333.4477 356.485 L
+333.7028 356.485 L
+333.7028 353.3326 l
+f
+*U
+*u
+336.9846 354.9966 m
+337.7037 354.9966 L
+337.7037 354.4154 L
+337.7037 353.9179 337.6787 353.2908 337.0264 353.2908 c
+336.3617 353.2908 336.299 353.989 336.299 354.9841 c
+336.299 355.7283 336.3868 356.5268 337.0557 356.5268 c
+337.432 356.5268 337.6201 356.276 337.6996 355.9331 c
+337.4111 355.8202 L
+337.3776 356.0084 337.2982 356.2425 337.0682 356.2425 c
+336.6334 356.2383 336.6 355.5652 336.6 355.0091 c
+336.6 353.8427 336.7463 353.5751 337.0515 353.5751 c
+337.3818 353.5751 337.4111 353.8176 337.4111 354.4907 c
+337.4111 354.729 L
+336.9846 354.729 L
+336.9846 354.9966 l
+f
+*U
+U
+U
+337.6667 -3924 m
+(N) *
+337.6667 4716 m
+(N) *
+LB
+%AI5_EndLayer--
+%%PageTrailer
+gsave annotatepage grestore showpage
+%%Trailer
+Adobe_IllustratorA_AI5 /terminate get exec
+Adobe_level2_AI5 /terminate get exec
+%%EOF
diff --git a/mplug_owl2/lib/tk8.6/images/logo64.gif b/mplug_owl2/lib/tk8.6/images/logo64.gif
new file mode 100644
index 0000000000000000000000000000000000000000..1401554ec80451603fbf50973d2ed612e415cc4f
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/images/logo64.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:138c240382304f350383b02ed56c69103a9431c0544eb1ec5dcd7dec7a555dd9
+size 1670
diff --git a/mplug_owl2/lib/tk8.6/images/pwrdLogo.eps b/mplug_owl2/lib/tk8.6/images/pwrdLogo.eps
new file mode 100644
index 0000000000000000000000000000000000000000..e11d9e96451bbbd3fd6ddc0e40ed9896ea96f25f
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/images/pwrdLogo.eps
@@ -0,0 +1,1897 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: Adobe Illustrator(TM) 5.5
+%%For: (Bud Northern) (Mark Anderson Design)
+%%Title: (TCL PWRD LOGO.ILLUS)
+%%CreationDate: (8/1/96) (4:59 PM)
+%%BoundingBox: 242 302 377 513
+%%HiResBoundingBox: 242.0523 302.5199 376.3322 512.5323
+%%DocumentProcessColors: Cyan Magenta Yellow
+%%DocumentSuppliedResources: procset Adobe_level2_AI5 1.0 0
+%%+ procset Adobe_IllustratorA_AI5 1.0 0
+%AI5_FileFormat 1.2
+%AI3_ColorUsage: Color
+%%CMYKCustomColor: 0 0.45 1 0 (Orange)
+%%+ 0 0.25 1 0 (Orange Yellow)
+%%+ 0 0.79 0.91 0 (PANTONE Warm Red CV)
+%%+ 0 0.79 0.91 0 (TCL RED)
+%AI3_TemplateBox: 306 396 306 396
+%AI3_TileBox: 12 12 600 780
+%AI3_DocumentPreview: Macintosh_ColorPic
+%AI5_ArtSize: 612 792
+%AI5_RulerUnits: 0
+%AI5_ArtFlags: 1 0 0 1 0 0 1 1 0
+%AI5_TargetResolution: 800
+%AI5_NumLayers: 1
+%AI5_OpenToView: 102 564 2 938 673 18 1 1 2 40
+%AI5_OpenViewLayers: 7
+%%EndComments
+%%BeginProlog
+%%BeginResource: procset Adobe_level2_AI5 1.0 0
+%%Title: (Adobe Illustrator (R) Version 5.0 Level 2 Emulation)
+%%Version: 1.0
+%%CreationDate: (04/10/93) ()
+%%Copyright: ((C) 1987-1993 Adobe Systems Incorporated All Rights Reserved)
+userdict /Adobe_level2_AI5 21 dict dup begin
+ put
+ /packedarray where not
+ {
+ userdict begin
+ /packedarray
+ {
+ array astore readonly
+ } bind def
+ /setpacking /pop load def
+ /currentpacking false def
+ end
+ 0
+ } if
+ pop
+ userdict /defaultpacking currentpacking put true setpacking
+ /initialize
+ {
+ Adobe_level2_AI5 begin
+ } bind def
+ /terminate
+ {
+ currentdict Adobe_level2_AI5 eq
+ {
+ end
+ } if
+ } bind def
+ mark
+ /setcustomcolor where not
+ {
+ /findcmykcustomcolor
+ {
+ 5 packedarray
+ } bind def
+ /setcustomcolor
+ {
+ exch aload pop pop
+ 4
+ {
+ 4 index mul 4 1 roll
+ } repeat
+ 5 -1 roll pop
+ setcmykcolor
+ }
+ def
+ } if
+
+ /gt38? mark {version cvx exec} stopped {cleartomark true} {38 gt exch pop} ifelse def
+ userdict /deviceDPI 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt put
+ userdict /level2?
+ systemdict /languagelevel known dup
+ {
+ pop systemdict /languagelevel get 2 ge
+ } if
+ put
+ level2? not
+ {
+ /setcmykcolor where not
+ {
+ /setcmykcolor
+ {
+ exch .11 mul add exch .59 mul add exch .3 mul add
+ 1 exch sub setgray
+ } def
+ } if
+ /currentcmykcolor where not
+ {
+ /currentcmykcolor
+ {
+ 0 0 0 1 currentgray sub
+ } def
+ } if
+ /setoverprint where not
+ {
+ /setoverprint /pop load def
+ } if
+ /selectfont where not
+ {
+ /selectfont
+ {
+ exch findfont exch
+ dup type /arraytype eq
+ {
+ makefont
+ }
+ {
+ scalefont
+ } ifelse
+ setfont
+ } bind def
+ } if
+ /cshow where not
+ {
+ /cshow
+ {
+ [
+ 0 0 5 -1 roll aload pop
+ ] cvx bind forall
+ } bind def
+ } if
+ } if
+ cleartomark
+ /anyColor?
+ {
+ add add add 0 ne
+ } bind def
+ /testColor
+ {
+ gsave
+ setcmykcolor currentcmykcolor
+ grestore
+ } bind def
+ /testCMYKColorThrough
+ {
+ testColor anyColor?
+ } bind def
+ userdict /composite?
+ level2?
+ {
+ gsave 1 1 1 1 setcmykcolor currentcmykcolor grestore
+ add add add 4 eq
+ }
+ {
+ 1 0 0 0 testCMYKColorThrough
+ 0 1 0 0 testCMYKColorThrough
+ 0 0 1 0 testCMYKColorThrough
+ 0 0 0 1 testCMYKColorThrough
+ and and and
+ } ifelse
+ put
+ composite? not
+ {
+ userdict begin
+ gsave
+ /cyan? 1 0 0 0 testCMYKColorThrough def
+ /magenta? 0 1 0 0 testCMYKColorThrough def
+ /yellow? 0 0 1 0 testCMYKColorThrough def
+ /black? 0 0 0 1 testCMYKColorThrough def
+ grestore
+ /isCMYKSep? cyan? magenta? yellow? black? or or or def
+ /customColor? isCMYKSep? not def
+ end
+ } if
+ end defaultpacking setpacking
+%%EndResource
+%%BeginResource: procset Adobe_IllustratorA_AI5 1.1 0
+%%Title: (Adobe Illustrator (R) Version 5.0 Abbreviated Prolog)
+%%Version: 1.1
+%%CreationDate: (3/7/1994) ()
+%%Copyright: ((C) 1987-1994 Adobe Systems Incorporated All Rights Reserved)
+currentpacking true setpacking
+userdict /Adobe_IllustratorA_AI5_vars 70 dict dup begin
+put
+/_lp /none def
+/_pf
+{
+} def
+/_ps
+{
+} def
+/_psf
+{
+} def
+/_pss
+{
+} def
+/_pjsf
+{
+} def
+/_pjss
+{
+} def
+/_pola 0 def
+/_doClip 0 def
+/cf currentflat def
+/_tm matrix def
+/_renderStart
+[
+/e0 /r0 /a0 /o0 /e1 /r1 /a1 /i0
+] def
+/_renderEnd
+[
+null null null null /i1 /i1 /i1 /i1
+] def
+/_render -1 def
+/_rise 0 def
+/_ax 0 def
+/_ay 0 def
+/_cx 0 def
+/_cy 0 def
+/_leading
+[
+0 0
+] def
+/_ctm matrix def
+/_mtx matrix def
+/_sp 16#020 def
+/_hyphen (-) def
+/_fScl 0 def
+/_cnt 0 def
+/_hs 1 def
+/_nativeEncoding 0 def
+/_useNativeEncoding 0 def
+/_tempEncode 0 def
+/_pntr 0 def
+/_tDict 2 dict def
+/_wv 0 def
+/Tx
+{
+} def
+/Tj
+{
+} def
+/CRender
+{
+} def
+/_AI3_savepage
+{
+} def
+/_gf null def
+/_cf 4 array def
+/_if null def
+/_of false def
+/_fc
+{
+} def
+/_gs null def
+/_cs 4 array def
+/_is null def
+/_os false def
+/_sc
+{
+} def
+/discardSave null def
+/buffer 256 string def
+/beginString null def
+/endString null def
+/endStringLength null def
+/layerCnt 1 def
+/layerCount 1 def
+/perCent (%) 0 get def
+/perCentSeen? false def
+/newBuff null def
+/newBuffButFirst null def
+/newBuffLast null def
+/clipForward? false def
+end
+userdict /Adobe_IllustratorA_AI5 74 dict dup begin
+put
+/initialize
+{
+ Adobe_IllustratorA_AI5 dup begin
+ Adobe_IllustratorA_AI5_vars begin
+ discardDict
+ {
+ bind pop pop
+ } forall
+ dup /nc get begin
+ {
+ dup xcheck 1 index type /operatortype ne and
+ {
+ bind
+ } if
+ pop pop
+ } forall
+ end
+ newpath
+} def
+/terminate
+{
+ end
+ end
+} def
+/_
+null def
+/ddef
+{
+ Adobe_IllustratorA_AI5_vars 3 1 roll put
+} def
+/xput
+{
+ dup load dup length exch maxlength eq
+ {
+ dup dup load dup
+ length 2 mul dict copy def
+ } if
+ load begin
+ def
+ end
+} def
+/npop
+{
+ {
+ pop
+ } repeat
+} def
+/sw
+{
+ dup length exch stringwidth
+ exch 5 -1 roll 3 index mul add
+ 4 1 roll 3 1 roll mul add
+} def
+/swj
+{
+ dup 4 1 roll
+ dup length exch stringwidth
+ exch 5 -1 roll 3 index mul add
+ 4 1 roll 3 1 roll mul add
+ 6 2 roll /_cnt 0 ddef
+ {
+ 1 index eq
+ {
+ /_cnt _cnt 1 add ddef
+ } if
+ } forall
+ pop
+ exch _cnt mul exch _cnt mul 2 index add 4 1 roll 2 index add 4 1 roll pop pop
+} def
+/ss
+{
+ 4 1 roll
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put pop
+ gsave
+ false charpath currentpoint
+ 4 index setmatrix
+ stroke
+ grestore
+ moveto
+ 2 copy rmoveto
+ } exch cshow
+ 3 npop
+} def
+/jss
+{
+ 4 1 roll
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put
+ gsave
+ _sp eq
+ {
+ exch 6 index 6 index 6 index 5 -1 roll widthshow
+ currentpoint
+ }
+ {
+ false charpath currentpoint
+ 4 index setmatrix stroke
+ } ifelse
+ grestore
+ moveto
+ 2 copy rmoveto
+ } exch cshow
+ 6 npop
+} def
+/sp
+{
+ {
+ 2 npop (0) exch
+ 2 copy 0 exch put pop
+ false charpath
+ 2 copy rmoveto
+ } exch cshow
+ 2 npop
+} def
+/jsp
+{
+ {
+ 2 npop
+ (0) exch 2 copy 0 exch put
+ _sp eq
+ {
+ exch 5 index 5 index 5 index 5 -1 roll widthshow
+ }
+ {
+ false charpath
+ } ifelse
+ 2 copy rmoveto
+ } exch cshow
+ 5 npop
+} def
+/pl
+{
+ transform
+ 0.25 sub round 0.25 add exch
+ 0.25 sub round 0.25 add exch
+ itransform
+} def
+/setstrokeadjust where
+{
+ pop true setstrokeadjust
+ /c
+ {
+ curveto
+ } def
+ /C
+ /c load def
+ /v
+ {
+ currentpoint 6 2 roll curveto
+ } def
+ /V
+ /v load def
+ /y
+ {
+ 2 copy curveto
+ } def
+ /Y
+ /y load def
+ /l
+ {
+ lineto
+ } def
+ /L
+ /l load def
+ /m
+ {
+ moveto
+ } def
+}
+{
+ /c
+ {
+ pl curveto
+ } def
+ /C
+ /c load def
+ /v
+ {
+ currentpoint 6 2 roll pl curveto
+ } def
+ /V
+ /v load def
+ /y
+ {
+ pl 2 copy curveto
+ } def
+ /Y
+ /y load def
+ /l
+ {
+ pl lineto
+ } def
+ /L
+ /l load def
+ /m
+ {
+ pl moveto
+ } def
+} ifelse
+/d
+{
+ setdash
+} def
+/cf
+{
+} def
+/i
+{
+ dup 0 eq
+ {
+ pop cf
+ } if
+ setflat
+} def
+/j
+{
+ setlinejoin
+} def
+/J
+{
+ setlinecap
+} def
+/M
+{
+ setmiterlimit
+} def
+/w
+{
+ setlinewidth
+} def
+/H
+{
+} def
+/h
+{
+ closepath
+} def
+/N
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ clip /_doClip 0 ddef
+ } if
+ newpath
+ }
+ {
+ /CRender
+ {
+ N
+ } ddef
+ } ifelse
+} def
+/n
+{
+ N
+} def
+/F
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ gsave _pf grestore clip newpath /_lp /none ddef _fc
+ /_doClip 0 ddef
+ }
+ {
+ _pf
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ F
+ } ddef
+ } ifelse
+} def
+/f
+{
+ closepath
+ F
+} def
+/S
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ {
+ gsave _ps grestore clip newpath /_lp /none ddef _sc
+ /_doClip 0 ddef
+ }
+ {
+ _ps
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ S
+ } ddef
+ } ifelse
+} def
+/s
+{
+ closepath
+ S
+} def
+/B
+{
+ _pola 0 eq
+ {
+ _doClip 1 eq
+ gsave F grestore
+ {
+ gsave S grestore clip newpath /_lp /none ddef _sc
+ /_doClip 0 ddef
+ }
+ {
+ S
+ } ifelse
+ }
+ {
+ /CRender
+ {
+ B
+ } ddef
+ } ifelse
+} def
+/b
+{
+ closepath
+ B
+} def
+/W
+{
+ /_doClip 1 ddef
+} def
+/*
+{
+ count 0 ne
+ {
+ dup type /stringtype eq
+ {
+ pop
+ } if
+ } if
+ newpath
+} def
+/u
+{
+} def
+/U
+{
+} def
+/q
+{
+ _pola 0 eq
+ {
+ gsave
+ } if
+} def
+/Q
+{
+ _pola 0 eq
+ {
+ grestore
+ } if
+} def
+/*u
+{
+ _pola 1 add /_pola exch ddef
+} def
+/*U
+{
+ _pola 1 sub /_pola exch ddef
+ _pola 0 eq
+ {
+ CRender
+ } if
+} def
+/D
+{
+ pop
+} def
+/*w
+{
+} def
+/*W
+{
+} def
+/`
+{
+ /_i save ddef
+ clipForward?
+ {
+ nulldevice
+ } if
+ 6 1 roll 4 npop
+ concat pop
+ userdict begin
+ /showpage
+ {
+ } def
+ 0 setgray
+ 0 setlinecap
+ 1 setlinewidth
+ 0 setlinejoin
+ 10 setmiterlimit
+ [] 0 setdash
+ /setstrokeadjust where {pop false setstrokeadjust} if
+ newpath
+ 0 setgray
+ false setoverprint
+} def
+/~
+{
+ end
+ _i restore
+} def
+/O
+{
+ 0 ne
+ /_of exch ddef
+ /_lp /none ddef
+} def
+/R
+{
+ 0 ne
+ /_os exch ddef
+ /_lp /none ddef
+} def
+/g
+{
+ /_gf exch ddef
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _gf setgray
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/G
+{
+ /_gs exch ddef
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _gs setgray
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/k
+{
+ _cf astore pop
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _cf aload pop setcmykcolor
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/K
+{
+ _cs astore pop
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _cs aload pop setcmykcolor
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/x
+{
+ /_gf exch ddef
+ findcmykcustomcolor
+ /_if exch ddef
+ /_fc
+ {
+ _lp /fill ne
+ {
+ _of setoverprint
+ _if _gf 1 exch sub setcustomcolor
+ /_lp /fill ddef
+ } if
+ } ddef
+ /_pf
+ {
+ _fc
+ fill
+ } ddef
+ /_psf
+ {
+ _fc
+ ashow
+ } ddef
+ /_pjsf
+ {
+ _fc
+ awidthshow
+ } ddef
+ /_lp /none ddef
+} def
+/X
+{
+ /_gs exch ddef
+ findcmykcustomcolor
+ /_is exch ddef
+ /_sc
+ {
+ _lp /stroke ne
+ {
+ _os setoverprint
+ _is _gs 1 exch sub setcustomcolor
+ /_lp /stroke ddef
+ } if
+ } ddef
+ /_ps
+ {
+ _sc
+ stroke
+ } ddef
+ /_pss
+ {
+ _sc
+ ss
+ } ddef
+ /_pjss
+ {
+ _sc
+ jss
+ } ddef
+ /_lp /none ddef
+} def
+/A
+{
+ pop
+} def
+/annotatepage
+{
+userdict /annotatepage 2 copy known {get exec} {pop pop} ifelse
+} def
+/discard
+{
+ save /discardSave exch store
+ discardDict begin
+ /endString exch store
+ gt38?
+ {
+ 2 add
+ } if
+ load
+ stopped
+ pop
+ end
+ discardSave restore
+} bind def
+userdict /discardDict 7 dict dup begin
+put
+/pre38Initialize
+{
+ /endStringLength endString length store
+ /newBuff buffer 0 endStringLength getinterval store
+ /newBuffButFirst newBuff 1 endStringLength 1 sub getinterval store
+ /newBuffLast newBuff endStringLength 1 sub 1 getinterval store
+} def
+/shiftBuffer
+{
+ newBuff 0 newBuffButFirst putinterval
+ newBuffLast 0
+ currentfile read not
+ {
+ stop
+ } if
+ put
+} def
+0
+{
+ pre38Initialize
+ mark
+ currentfile newBuff readstring exch pop
+ {
+ {
+ newBuff endString eq
+ {
+ cleartomark stop
+ } if
+ shiftBuffer
+ } loop
+ }
+ {
+ stop
+ } ifelse
+} def
+1
+{
+ pre38Initialize
+ /beginString exch store
+ mark
+ currentfile newBuff readstring exch pop
+ {
+ {
+ newBuff beginString eq
+ {
+ /layerCount dup load 1 add store
+ }
+ {
+ newBuff endString eq
+ {
+ /layerCount dup load 1 sub store
+ layerCount 0 eq
+ {
+ cleartomark stop
+ } if
+ } if
+ } ifelse
+ shiftBuffer
+ } loop
+ }
+ {
+ stop
+ } ifelse
+} def
+2
+{
+ mark
+ {
+ currentfile buffer readline not
+ {
+ stop
+ } if
+ endString eq
+ {
+ cleartomark stop
+ } if
+ } loop
+} def
+3
+{
+ /beginString exch store
+ /layerCnt 1 store
+ mark
+ {
+ currentfile buffer readline not
+ {
+ stop
+ } if
+ dup beginString eq
+ {
+ pop /layerCnt dup load 1 add store
+ }
+ {
+ endString eq
+ {
+ layerCnt 1 eq
+ {
+ cleartomark stop
+ }
+ {
+ /layerCnt dup load 1 sub store
+ } ifelse
+ } if
+ } ifelse
+ } loop
+} def
+end
+userdict /clipRenderOff 15 dict dup begin
+put
+{
+ /n /N /s /S /f /F /b /B
+}
+{
+ {
+ _doClip 1 eq
+ {
+ /_doClip 0 ddef clip
+ } if
+ newpath
+ } def
+} forall
+/Tr /pop load def
+/Bb {} def
+/BB /pop load def
+/Bg {12 npop} def
+/Bm {6 npop} def
+/Bc /Bm load def
+/Bh {4 npop} def
+end
+/Lb
+{
+ 4 npop
+ 6 1 roll
+ pop
+ 4 1 roll
+ pop pop pop
+ 0 eq
+ {
+ 0 eq
+ {
+ (%AI5_BeginLayer) 1 (%AI5_EndLayer--) discard
+ }
+ {
+ /clipForward? true def
+
+ /Tx /pop load def
+ /Tj /pop load def
+ currentdict end clipRenderOff begin begin
+ } ifelse
+ }
+ {
+ 0 eq
+ {
+ save /discardSave exch store
+ } if
+ } ifelse
+} bind def
+/LB
+{
+ discardSave dup null ne
+ {
+ restore
+ }
+ {
+ pop
+ clipForward?
+ {
+ currentdict
+ end
+ end
+ begin
+
+ /clipForward? false ddef
+ } if
+ } ifelse
+} bind def
+/Pb
+{
+ pop pop
+ 0 (%AI5_EndPalette) discard
+} bind def
+/Np
+{
+ 0 (%AI5_End_NonPrinting--) discard
+} bind def
+/Ln /pop load def
+/Ap
+/pop load def
+/Ar
+{
+ 72 exch div
+ 0 dtransform dup mul exch dup mul add sqrt
+ dup 1 lt
+ {
+ pop 1
+ } if
+ setflat
+} def
+/Mb
+{
+ q
+} def
+/Md
+{
+} def
+/MB
+{
+ Q
+} def
+/nc 3 dict def
+nc begin
+/setgray
+{
+ pop
+} bind def
+/setcmykcolor
+{
+ 4 npop
+} bind def
+/setcustomcolor
+{
+ 2 npop
+} bind def
+currentdict readonly pop
+end
+currentdict readonly pop
+end
+setpacking
+%%EndResource
+%%EndProlog
+%%BeginSetup
+Adobe_level2_AI5 /initialize get exec
+Adobe_IllustratorA_AI5 /initialize get exec
+%AI5_Begin_NonPrinting
+Np
+%AI3_BeginPattern: (Yellow Stripe)
+(Yellow Stripe) 8.4499 4.6 80.4499 76.6 [
+%AI3_Tile
+(0 O 0 R 0 0.4 1 0 k 0 0.4 1 0 K) @
+(
+800 Ar
+0 J 0 j 3.6 w 4 M []0 d
+%AI3_Note:
+0 D
+8.1999 8.1999 m
+80.6999 8.1999 L
+S
+8.1999 22.6 m
+80.6999 22.6 L
+S
+8.1999 37.0001 m
+80.6999 37.0001 L
+S
+8.1999 51.3999 m
+80.6999 51.3999 L
+S
+8.1999 65.8 m
+80.6999 65.8 L
+S
+8.1999 15.3999 m
+80.6999 15.3999 L
+S
+8.1999 29.8 m
+80.6999 29.8 L
+S
+8.1999 44.1999 m
+80.6999 44.1999 L
+S
+8.1999 58.6 m
+80.6999 58.6 L
+S
+8.1999 73.0001 m
+80.6999 73.0001 L
+S
+) &
+] E
+%AI3_EndPattern
+%AI5_End_NonPrinting--
+%AI5_Begin_NonPrinting
+Np
+3 Bn
+%AI5_BeginGradient: (Black & White)
+(Black & White) 0 2 Bd
+[
+<
+FFFEFDFCFBFAF9F8F7F6F5F4F3F2F1F0EFEEEDECEBEAE9E8E7E6E5E4E3E2E1E0DFDEDDDCDBDAD9D8
+D7D6D5D4D3D2D1D0CFCECDCCCBCAC9C8C7C6C5C4C3C2C1C0BFBEBDBCBBBAB9B8B7B6B5B4B3B2B1B0
+AFAEADACABAAA9A8A7A6A5A4A3A2A1A09F9E9D9C9B9A999897969594939291908F8E8D8C8B8A8988
+87868584838281807F7E7D7C7B7A797877767574737271706F6E6D6C6B6A69686766656463626160
+5F5E5D5C5B5A595857565554535251504F4E4D4C4B4A494847464544434241403F3E3D3C3B3A3938
+37363534333231302F2E2D2C2B2A292827262524232221201F1E1D1C1B1A19181716151413121110
+0F0E0D0C0B0A09080706050403020100
+>
+0 %_Br
+[
+0 0 50 100 %_Bs
+1 0 50 0 %_Bs
+BD
+%AI5_EndGradient
+%AI5_BeginGradient: (Red & Yellow)
+(Red & Yellow) 0 2 Bd
+[
+0
+<
+000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627
+28292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F
+505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F7071727374757677
+78797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9F
+A0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7
+C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF
+F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF
+>
+<
+FFFFFEFEFDFDFDFCFCFBFBFBFAFAF9F9F9F8F8F7F7F7F6F6F5F5F5F4F4F3F3F3F2F2F1F1F1F0F0EF
+EFEFEEEEEDEDEDECECEBEBEBEAEAE9E9E9E8E8E7E7E7E6E6E5E5E5E4E4E3E3E3E2E2E1E1E1E0E0DF
+DFDFDEDEDDDDDDDCDCDBDBDBDADAD9D9D9D8D8D7D7D7D6D6D5D5D5D4D4D3D3D3D2D2D1D1D1D0D0CF
+CFCFCECECDCDCDCCCCCBCBCBCACAC9C9C9C8C8C7C7C7C6C6C5C5C5C4C4C3C3C3C2C2C1C1C1C0C0BF
+BFBFBEBEBDBDBDBCBCBBBBBBBABAB9B9B9B8B8B7B7B7B6B6B5B5B5B4B4B3B3B3B2B2B1B1B1B0B0AF
+AFAFAEAEADADADACACABABABAAAAA9A9A9A8A8A7A7A7A6A6A5A5A5A4A4A3A3A3A2A2A1A1A1A0A09F
+9F9F9E9E9D9D9D9C9C9B9B9B9A9A9999
+>
+0
+1 %_Br
+[
+0 1 0.6 0 1 50 100 %_Bs
+0 0 1 0 1 50 0 %_Bs
+BD
+%AI5_EndGradient
+%AI5_BeginGradient: (Yellow & Blue Radial)
+(Yellow & Blue Radial) 1 2 Bd
+[
+<
+000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627
+28292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F
+505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F7071727374757677
+78797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9F
+A0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7
+C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF
+F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF
+>
+<
+1415161718191A1B1C1D1E1F1F202122232425262728292A2A2B2C2D2E2F30313233343536363738
+393A3B3C3D3E3F40414142434445464748494A4B4C4D4D4E4F50515253545556575858595A5B5C5D
+5E5F60616263646465666768696A6B6C6D6E6F6F707172737475767778797A7B7B7C7D7E7F808182
+83848586868788898A8B8C8D8E8F90919292939495969798999A9B9C9D9D9E9FA0A1A2A3A4A5A6A7
+A8A9A9AAABACADAEAFB0B1B2B3B4B4B5B6B7B8B9BABBBCBDBEBFC0C0C1C2C3C4C5C6C7C8C9CACBCB
+CCCDCECFD0D1D2D3D4D5D6D7D7D8D9DADBDCDDDEDFE0E1E2E2E3E4E5E6E7E8E9EAEBECEDEEEEEFF0
+F1F2F3F4F5F6F7F8F9F9FAFBFCFDFEFF
+>
+<
+ABAAAAA9A8A7A7A6A5A5A4A3A3A2A1A1A09F9F9E9D9D9C9B9B9A9999989797969595949393929191
+908F8F8E8D8D8C8B8B8A8989888787868585848383828181807F7F7E7D7D7C7B7B7A797978777776
+7575747373727171706F6F6E6D6D6C6B6B6A6969686767666565646362626160605F5E5E5D5C5C5B
+5A5A5958585756565554545352525150504F4E4E4D4C4C4B4A4A4948484746464544444342424140
+403F3E3E3D3C3C3B3A3A3938383736363534343332323130302F2E2E2D2C2C2B2A2A292828272626
+25242423222121201F1F1E1D1D1C1B1B1A1919181717161515141313121111100F0F0E0D0D0C0B0B
+0A090908070706050504030302010100
+>
+0
+1 %_Br
+[
+0 0.08 0.67 0 1 50 14 %_Bs
+1 1 0 0 1 50 100 %_Bs
+BD
+%AI5_EndGradient
+%AI5_End_NonPrinting--
+%AI5_BeginPalette
+144 161 Pb
+Pn
+Pc
+1 g
+Pc
+0 g
+Pc
+0 0 0 0 k
+Pc
+0.75 g
+Pc
+0.5 g
+Pc
+0.25 g
+Pc
+0 g
+Pc
+Bb
+2 (Black & White) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0.25 0 0 0 k
+Pc
+0.5 0 0 0 k
+Pc
+0.75 0 0 0 k
+Pc
+1 0 0 0 k
+Pc
+0.25 0.25 0 0 k
+Pc
+0.5 0.5 0 0 k
+Pc
+0.75 0.75 0 0 k
+Pc
+1 1 0 0 k
+Pc
+Bb
+2 (Red & Yellow) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0 0.25 0 0 k
+Pc
+0 0.5 0 0 k
+Pc
+0 0.75 0 0 k
+Pc
+0 1 0 0 k
+Pc
+0 0.25 0.25 0 k
+Pc
+0 0.5 0.5 0 k
+Pc
+0 0.75 0.75 0 k
+Pc
+0 1 1 0 k
+Pc
+Bb
+0 0 0 0 Bh
+2 (Yellow & Blue Radial) -4014 4716 0 0 1 0 0 1 0 0 Bg
+0 BB
+Pc
+0 0 0.25 0 k
+Pc
+0 0 0.5 0 k
+Pc
+0 0 0.75 0 k
+Pc
+0 0 1 0 k
+Pc
+0.25 0 0.25 0 k
+Pc
+0.5 0 0.5 0 k
+Pc
+0.75 0 0.75 0 k
+Pc
+1 0 1 0 k
+Pc
+(Yellow Stripe) 0 0 1 1 0 0 0 0 0 [1 0 0 1 0 0] p
+Pc
+0.25 0.125 0 0 k
+Pc
+0.5 0.25 0 0 k
+Pc
+0.75 0.375 0 0 k
+Pc
+1 0.5 0 0 k
+Pc
+0.125 0.25 0 0 k
+Pc
+0.25 0.5 0 0 k
+Pc
+0.375 0.75 0 0 k
+Pc
+0.5 1 0 0 k
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0 0.25 0.125 0 k
+Pc
+0 0.5 0.25 0 k
+Pc
+0 0.75 0.375 0 k
+Pc
+0 1 0.5 0 k
+Pc
+0 0.125 0.25 0 k
+Pc
+0 0.25 0.5 0 k
+Pc
+0 0.375 0.75 0 k
+Pc
+0 0.5 1 0 k
+Pc
+0 0.79 0.91 0 (PANTONE Warm Red CV) 0 x
+Pc
+0.125 0 0.25 0 k
+Pc
+0.25 0 0.5 0 k
+Pc
+0.375 0 0.75 0 k
+Pc
+0.5 0 1 0 k
+Pc
+0.25 0 0.125 0 k
+Pc
+0.5 0 0.25 0 k
+Pc
+0.75 0 0.375 0 k
+Pc
+1 0 0.5 0 k
+Pc
+0.5 1 0 0 k
+Pc
+0.25 0.125 0.125 0 k
+Pc
+0.5 0.25 0.25 0 k
+Pc
+0.75 0.375 0.375 0 k
+Pc
+1 0.5 0.5 0 k
+Pc
+0.25 0.25 0.125 0 k
+Pc
+0.5 0.5 0.25 0 k
+Pc
+0.75 0.75 0.375 0 k
+Pc
+1 1 0.5 0 k
+Pc
+0 1 0.5 0 k
+Pc
+0.125 0.25 0.125 0 k
+Pc
+0.25 0.5 0.25 0 k
+Pc
+0.375 0.75 0.375 0 k
+Pc
+0.5 1 0.5 0 k
+Pc
+0.125 0.25 0.25 0 k
+Pc
+0.25 0.5 0.5 0 k
+Pc
+0.375 0.75 0.75 0 k
+Pc
+0.5 1 1 0 k
+Pc
+0.75 0.75 0.375 0 k
+Pc
+0.125 0.125 0.25 0 k
+Pc
+0.25 0.25 0.5 0 k
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0.5 0.5 1 0 k
+Pc
+0.25 0.125 0.25 0 k
+Pc
+0.5 0.25 0.5 0 k
+Pc
+0.75 0.375 0.75 0 k
+Pc
+1 0.5 1 0 k
+Pc
+0 0.79 0.91 0 (PANTONE Warm Red CV) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0.5 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.25 1 0 (Orange Yellow) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 1 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0 0.5 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.45 1 0 (Orange) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0.375 0.375 0.75 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0.79 0.91 0 (PANTONE Warm Red CV) 0 x
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+1 0.65 0 0 k
+Pc
+0 0 0 0 k
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+Pc
+0 0 1 0 k
+Pc
+PB
+%AI5_EndPalette
+%%EndSetup
+%AI5_BeginLayer
+1 1 1 1 0 0 0 79 128 255 Lb
+(Layer 1) Ln
+0 A
+1 Ap
+0 O
+1 0.65 0 0 k
+800 Ar
+0 J 0 j 1 w 4 M []0 d
+%AI3_Note:
+0 D
+285.0121 311.7976 m
+357.5043 302.5199 L
+361.6071 392.7105 L
+376.3322 474.1377 L
+342.6527 475.6628 L
+327.6333 483.4165 L
+258.8269 486.3189 L
+254.4361 405.0427 L
+242.0523 312.2099 L
+285.0121 311.7976 L
+f
+0 0.79 0.91 0 k
+1.25 w
+295.4466 337.6172 m
+368.4943 335.3343 L
+363.9288 425.5026 L
+370.7771 507.9667 L
+337.1066 506.2547 L
+321.4128 512.5323 L
+252.6452 508.8228 L
+256.0692 427.5002 L
+252.6452 333.9077 L
+295.4466 337.6172 L
+f
+u
+0 Ap
+1 0.65 0 0 k
+1 w
+320.532 390.6149 m
+312.9017 388.534 l
+317.0637 398.5921 l
+321.2256 426.6854 l
+316.0232 427.7258 l
+322.2662 436.3965 l
+330.0436 465.6249 l
+316.3701 462.7557 l
+323.5798 475.9563 331.2311 484.5534 v
+321.2256 492.2363 l
+288.9913 478.0373 297.6622 431.9088 v
+290.9988 433.0755 l
+297.3888 384.7188 l
+291.9867 383.3315 l
+297.5214 372.0383 305.2714 366.6837 v
+305.9749 366.1976 295.5601 404.4882 306.6587 442.6395 c
+307.6992 440.2117 l
+298.855 399.5459 307.6992 366.6837 v
+308.1064 365.9033 312.5286 366.4235 v
+320.532 381.5106 320.532 390.6149 v
+f
+u
+*u
+1 g
+263.6948 355.9856 m
+265.2612 355.9856 L
+265.2612 359.2513 L
+265.9515 359.2513 266.6153 359.2513 267.2791 359.3575 c
+267.2791 355.9856 L
+269.6155 355.9856 L
+269.6155 355.3749 L
+267.2791 355.3749 L
+267.2791 347.2505 L
+267.2791 346.7726 267.2791 346.0558 268.288 346.0558 c
+268.9783 346.0558 269.35 346.5337 269.7748 347.0381 c
+270.1996 346.7461 L
+269.6951 345.7372 268.3942 345.1265 267.3322 345.1265 c
+265.4205 345.1265 265.2081 346.162 265.2081 347.4364 c
+265.2081 355.3749 L
+263.6948 355.3749 L
+263.6948 355.9856 l
+f
+*U
+*u
+285.7796 348.7639 m
+285.1689 346.8788 284.1069 345.2327 281.3457 345.1265 c
+277.2304 345.1265 275.9825 348.5515 275.9825 350.3835 c
+275.9825 355.1094 279.7792 356.2511 281.2926 356.2511 c
+283.0184 356.2511 285.461 355.4546 285.461 353.4102 c
+285.461 352.6934 285.0096 352.003 284.2662 352.003 c
+283.5494 352.003 283.0184 352.481 283.0184 353.2509 c
+283.0184 354.2864 283.868 354.4191 283.868 354.7112 c
+283.868 355.428 282.1953 355.7201 281.6112 355.7201 c
+279.0624 355.7201 278.3986 353.8616 278.3986 350.3835 c
+278.3986 348.7905 278.7969 347.5691 278.9562 347.1974 c
+279.3544 346.3213 280.1775 345.7637 281.5581 345.6841 c
+283.098 345.6044 284.5848 346.8523 285.222 348.7639 C
+285.7796 348.7639 l
+f
+*U
+*u
+291.9344 345.4717 m
+291.9344 346.0823 L
+293.9788 346.0823 L
+293.9788 363.1542 L
+291.9344 363.1542 L
+291.9344 363.7648 L
+293.0761 363.7648 L
+294.0585 363.7648 295.0939 363.8179 296.0497 364.0038 c
+296.0497 346.0823 L
+298.0941 346.0823 L
+298.0941 345.4717 L
+291.9344 345.4717 l
+f
+*U
+u
+310.0634 446.075 m
+305.3828 425.2059 306.7298 391.3708 v
+307.1338 381.222 308.2436 371.8929 309.5993 363.8029 C
+309.6066 363.8025 L
+310.4883 356.6987 311.0781 354.1272 313.3768 345.5676 C
+313.2426 340.0473 L
+294.8367 398.8155 310.0634 446.075 V
+f
+321.3622 464.1699 m
+325.5016 466.2317 331.4359 466.9819 v
+337.9224 455.0924 321.9584 434.793 v
+331.4821 456.0522 329.2358 462.7122 v
+326.7243 464.2727 321.3622 464.1699 v
+f
+319.4002 428.4819 m
+323.1177 427.6214 324.9024 429.0668 v
+321.386 415.3445 322.3077 407.7964 v
+323.2297 400.2483 316.5788 395.4159 y
+322.2441 402.584 320.4635 408.4226 v
+319.2289 412.4694 320.6101 422.8271 322.1681 426.1155 c
+320.7131 426.3196 319.4002 428.4819 v
+f
+315.7246 392.3281 m
+321.8677 393.0631 322.5131 396.1662 v
+323.265 377.6058 314.7299 369.9571 v
+321.2425 380.1152 320.2206 390.6235 v
+315.7246 392.3281 l
+f
+298.4445 384.6023 m
+296.4635 382.3836 290.5192 387.2778 v
+292.4131 374.803 304.1781 369.0924 v
+296.0814 375.1928 293.9 381.7824 v
+296.7611 382.6245 298.4445 384.6023 v
+f
+296.5483 389.3335 m
+288.5102 409.7356 290.2325 437.3036 v
+292.1098 432.3112 298.1424 430.5604 v
+295.3003 429.9794 293.6387 430.2313 v
+289.4335 418.5932 296.5483 389.3335 v
+f
+330.3126 484.1353 m
+327.3003 506.2722 308.4549 483.8853 v
+293.4491 466.0592 295.2373 450.9247 296.1578 442.4811 c
+296.3932 440.3206 293.366 465.0316 309.8067 481.2933 c
+326.2471 497.5553 329.9609 485.0794 330.3126 484.1353 c
+f
+U
+0 0 1 0 k
+302.5528 503.0164 m
+287.7656 507.2395 283.0593 458.227 v
+279.4282 473.3549 288.8204 494.7509 v
+298.2122 516.1468 302.5528 503.0164 y
+f
+284.2076 506.5994 m
+276.6655 495.2557 278.3767 483.1729 v
+272.6565 505.9183 284.2076 506.5994 v
+f
+339.7135 474.7902 m
+348.6321 478.0799 335.8615 444.8834 v
+342.4718 454.5848 346.6326 469.8253 v
+349.303 479.6062 339.7135 474.7902 y
+f
+354.1382 477.3767 m
+360.4435 471.669 355.9752 464.1187 v
+367.1908 475.904 354.1382 477.3767 v
+f
+U
+U
+*u
+1 g
+258.2029 317.4593 m
+256.6821 317.4593 L
+256.6821 325.2598 L
+258.7512 325.2598 L
+260.3858 325.2598 261.4514 324.608 261.4514 322.839 c
+261.4514 321.1837 260.5513 320.3767 258.9581 320.3767 c
+258.2029 320.3767 L
+258.2029 317.4593 l
+f
+1 D
+258.2029 321.6389 m
+258.5132 321.6389 L
+259.4133 321.6389 259.8995 321.8354 259.8995 322.8493 c
+259.8995 323.8528 259.3202 323.9976 258.4719 323.9976 c
+258.2029 323.9976 L
+258.2029 321.6389 l
+f
+*U
+*u
+0 D
+269.0694 321.3699 m
+269.0694 323.5528 270.6523 325.4667 272.9283 325.4667 c
+275.2043 325.4667 276.7871 323.5528 276.7871 321.3699 c
+276.7871 319.1353 275.2043 317.2524 272.9283 317.2524 c
+270.6523 317.2524 269.0694 319.1353 269.0694 321.3699 c
+f
+1 D
+270.6419 321.432 m
+270.6419 320.2526 271.6351 318.7525 272.9283 318.7525 c
+274.2215 318.7525 275.2146 320.2526 275.2146 321.432 c
+275.2146 322.6941 274.2628 323.9666 272.9283 323.9666 c
+271.5937 323.9666 270.6419 322.6941 270.6419 321.432 c
+f
+*U
+*u
+0 D
+287.2943 319.9422 m
+287.315 319.9422 L
+288.8668 325.3632 L
+289.7668 325.3632 L
+291.3807 319.9422 L
+291.4014 319.9422 L
+292.9326 325.2598 L
+294.5258 325.2598 L
+291.8877 317.3041 L
+290.7704 317.3041 L
+289.2185 322.4044 L
+289.1978 322.4044 L
+287.7288 317.3041 L
+286.6115 317.3041 L
+284.1286 325.2598 L
+285.7218 325.2598 L
+287.2943 319.9422 l
+f
+*U
+*u
+303.7595 323.9356 m
+303.7595 322.2182 L
+306.1803 322.2182 L
+306.1803 320.894 L
+303.7595 320.894 L
+303.7595 318.7835 L
+306.2734 318.7835 L
+306.2734 317.4593 L
+302.2387 317.4593 L
+302.2387 325.2598 L
+306.2734 325.2598 L
+306.2734 323.9356 L
+303.7595 323.9356 l
+f
+*U
+*u
+319.8602 317.4593 m
+318.0187 317.4593 L
+316.1255 320.6043 L
+316.1048 320.6043 L
+316.1048 317.4593 L
+314.5841 317.4593 L
+314.5841 325.2598 L
+316.6428 325.2598 L
+318.1843 325.2598 319.2499 324.577 319.2499 322.9114 c
+319.2499 321.9182 318.7015 320.925 317.6567 320.7492 C
+319.8602 317.4593 l
+f
+1 D
+316.1048 321.6699 m
+316.3014 321.6699 L
+317.1394 321.6699 317.7291 321.9182 317.7291 322.87 c
+317.7291 323.8321 317.1187 324.0183 316.3117 324.0183 c
+316.1048 324.0183 L
+316.1048 321.6699 l
+f
+*U
+*u
+0 D
+329.1754 323.9356 m
+329.1754 322.2182 L
+331.5962 322.2182 L
+331.5962 320.894 L
+329.1754 320.894 L
+329.1754 318.7835 L
+331.6894 318.7835 L
+331.6894 317.4593 L
+327.6546 317.4593 L
+327.6546 325.2598 L
+331.6894 325.2598 L
+331.6894 323.9356 L
+329.1754 323.9356 l
+f
+*U
+*u
+340 325.2598 m
+342.1725 325.2598 L
+344.4279 325.2598 345.9383 323.5735 345.9383 321.3492 c
+345.9383 319.156 344.3865 317.4593 342.1622 317.4593 c
+340 317.4593 L
+340 325.2598 l
+f
+1 D
+341.5208 318.7835 m
+341.7691 318.7835 L
+343.6416 318.7835 344.3658 319.8181 344.3658 321.3596 c
+344.3658 323.0562 343.4968 323.9356 341.7691 323.9356 c
+341.5208 323.9356 L
+341.5208 318.7835 l
+f
+*U
+LB
+%AI5_EndLayer--
+%%PageTrailer
+gsave annotatepage grestore showpage
+%%Trailer
+Adobe_IllustratorA_AI5 /terminate get exec
+Adobe_level2_AI5 /terminate get exec
+%%EOF
diff --git a/mplug_owl2/lib/tk8.6/msgs/cs.msg b/mplug_owl2/lib/tk8.6/msgs/cs.msg
new file mode 100644
index 0000000000000000000000000000000000000000..d6be730ae1e4a93d801b11d4ff1a1e6b7b956e76
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/cs.msg
@@ -0,0 +1,77 @@
+namespace eval ::tk {
+ ::msgcat::mcset cs "&Abort" "&P\u0159eru\u0161it"
+ ::msgcat::mcset cs "&About..." "&O programu..."
+ ::msgcat::mcset cs "All Files" "V\u0161echny soubory"
+ ::msgcat::mcset cs "Application Error" "Chyba programu"
+ ::msgcat::mcset cs "Bold Italic"
+ ::msgcat::mcset cs "&Blue" "&Modr\341"
+ ::msgcat::mcset cs "Cancel" "Zru\u0161it"
+ ::msgcat::mcset cs "&Cancel" "&Zru\u0161it"
+ ::msgcat::mcset cs "Cannot change to the directory \"%1\$s\".\nPermission denied." "Nemohu zm\u011bnit atku\341ln\355 adres\341\u0159 na \"%1\$s\".\nP\u0159\355stup odm\355tnut."
+ ::msgcat::mcset cs "Choose Directory" "V\375b\u011br adres\341\u0159e"
+ ::msgcat::mcset cs "Cl&ear" "Sma&zat"
+ ::msgcat::mcset cs "&Clear Console" "&Smazat konzolu"
+ ::msgcat::mcset cs "Color" "Barva"
+ ::msgcat::mcset cs "Console" "Konzole"
+ ::msgcat::mcset cs "&Copy" "&Kop\355rovat"
+ ::msgcat::mcset cs "Cu&t" "V&y\u0159\355znout"
+ ::msgcat::mcset cs "&Delete" "&Smazat"
+ ::msgcat::mcset cs "Details >>" "Detaily >>"
+ ::msgcat::mcset cs "Directory \"%1\$s\" does not exist." "Adres\341\u0159 \"%1\$s\" neexistuje."
+ ::msgcat::mcset cs "&Directory:" "&Adres\341\u0159:"
+ ::msgcat::mcset cs "&Edit" "&\332pravy"
+ ::msgcat::mcset cs "Error: %1\$s" "Chyba: %1\$s"
+ ::msgcat::mcset cs "E&xit" "&Konec"
+ ::msgcat::mcset cs "&File" "&Soubor"
+ ::msgcat::mcset cs "File \"%1\$s\" already exists.\nDo you want to overwrite it?" "Soubor \"%1\$s\" ji\u017e existuje.\nChcete jej p\u0159epsat?"
+ ::msgcat::mcset cs "File \"%1\$s\" already exists.\n\n" "Soubor \"%1\$s\" ji\u017e existuje.\n\n"
+ ::msgcat::mcset cs "File \"%1\$s\" does not exist." "Soubor \"%1\$s\" neexistuje."
+ ::msgcat::mcset cs "File &name:" "&Jm\351no souboru:"
+ ::msgcat::mcset cs "File &names:" "&Jm\351na soubor\u016f:"
+ ::msgcat::mcset cs "Files of &type:" "&Typy soubor\u016f:"
+ ::msgcat::mcset cs "Fi&les:" "Sou&bory:"
+ ::msgcat::mcset cs "&Filter" "&Filtr"
+ ::msgcat::mcset cs "Fil&ter:" "Fil&tr:"
+ ::msgcat::mcset cs "Font st&yle:"
+ ::msgcat::mcset cs "&Green" "Ze&len\341"
+ ::msgcat::mcset cs "&Help" "&N\341pov\u011bda"
+ ::msgcat::mcset cs "Hi" "Ahoj"
+ ::msgcat::mcset cs "&Hide Console" "&Schovat Konzolu"
+ ::msgcat::mcset cs "&Ignore" "&Ignorovat"
+ ::msgcat::mcset cs "Invalid file name \"%1\$s\"." "\u0160patn\351 jm\351no souboru \"%1\$s\"."
+ ::msgcat::mcset cs "Log Files" "Log soubory"
+ ::msgcat::mcset cs "&No" "&Ne"
+ ::msgcat::mcset cs "&OK"
+ ::msgcat::mcset cs "OK"
+ ::msgcat::mcset cs "Ok"
+ ::msgcat::mcset cs "Open" "Otev\u0159\355t"
+ ::msgcat::mcset cs "&Open" "&Otev\u0159\355t"
+ ::msgcat::mcset cs "Open Multiple Files" "Otev\u0159\355t v\355ce soubor\u016f"
+ ::msgcat::mcset cs "P&aste" "&Vlo\u017eit"
+ ::msgcat::mcset cs "&Quit" "&Ukon\u010dit"
+ ::msgcat::mcset cs "&Red" "\u010ce&rven\341"
+ ::msgcat::mcset cs "Replace existing file?" "Nahradit st\341vaj\355c\355 soubor?"
+ ::msgcat::mcset cs "&Retry" "Z&novu"
+ ::msgcat::mcset cs "&Save" "&Ulo\u017eit"
+ ::msgcat::mcset cs "Save As" "Ulo\u017eit jako"
+ ::msgcat::mcset cs "Save To Log" "Ulo\u017eit do logu"
+ ::msgcat::mcset cs "Select Log File" "Vybrat log soubor"
+ ::msgcat::mcset cs "Select a file to source" "Vybrat soubor k nahr\341n\355"
+ ::msgcat::mcset cs "&Selection:" "&V\375b\u011br:"
+ ::msgcat::mcset cs "Skip Messages" "P\u0159esko\u010dit zpr\341vy"
+ ::msgcat::mcset cs "&Source..." "&Zdroj..."
+ ::msgcat::mcset cs "Tcl Scripts" "Tcl skripty"
+ ::msgcat::mcset cs "Tcl for Windows" "Tcl pro Windows"
+ ::msgcat::mcset cs "Text Files" "Textov\351 soubory"
+ ::msgcat::mcset cs "abort" "p\u0159eru\u0161it"
+ ::msgcat::mcset cs "blue" "modr\341"
+ ::msgcat::mcset cs "cancel" "zru\u0161it"
+ ::msgcat::mcset cs "extension" "p\u0159\355pona"
+ ::msgcat::mcset cs "extensions" "p\u0159\355pony"
+ ::msgcat::mcset cs "green" "zelen\341"
+ ::msgcat::mcset cs "ignore" "ignorovat"
+ ::msgcat::mcset cs "ok"
+ ::msgcat::mcset cs "red" "\u010derven\341"
+ ::msgcat::mcset cs "retry" "znovu"
+ ::msgcat::mcset cs "yes" "ano"
+}
diff --git a/mplug_owl2/lib/tk8.6/msgs/de.msg b/mplug_owl2/lib/tk8.6/msgs/de.msg
new file mode 100644
index 0000000000000000000000000000000000000000..e420f8a2ec887b04f24b666f8145214656e6b769
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/de.msg
@@ -0,0 +1,91 @@
+namespace eval ::tk {
+ ::msgcat::mcset de "&Abort" "&Abbruch"
+ ::msgcat::mcset de "&About..." "&\u00dcber..."
+ ::msgcat::mcset de "All Files" "Alle Dateien"
+ ::msgcat::mcset de "Application Error" "Applikationsfehler"
+ ::msgcat::mcset de "&Apply" "&Anwenden"
+ ::msgcat::mcset de "Bold" "Fett"
+ ::msgcat::mcset de "Bold Italic" "Fett kursiv"
+ ::msgcat::mcset de "&Blue" "&Blau"
+ ::msgcat::mcset de "Cancel" "Abbruch"
+ ::msgcat::mcset de "&Cancel" "&Abbruch"
+ ::msgcat::mcset de "Cannot change to the directory \"%1\$s\".\nPermission denied." "Kann nicht in das Verzeichnis \"%1\$s\" wechseln.\nKeine Rechte vorhanden."
+ ::msgcat::mcset de "Choose Directory" "W\u00e4hle Verzeichnis"
+ ::msgcat::mcset de "Cl&ear" "&R\u00fccksetzen"
+ ::msgcat::mcset de "&Clear Console" "&Konsole l\u00f6schen"
+ ::msgcat::mcset de "Color" "Farbe"
+ ::msgcat::mcset de "Console" "Konsole"
+ ::msgcat::mcset de "&Copy" "&Kopieren"
+ ::msgcat::mcset de "Cu&t" "Aus&schneiden"
+ ::msgcat::mcset de "&Delete" "&L\u00f6schen"
+ ::msgcat::mcset de "Details >>"
+ ::msgcat::mcset de "Directory \"%1\$s\" does not exist." "Das Verzeichnis \"%1\$s\" existiert nicht."
+ ::msgcat::mcset de "&Directory:" "&Verzeichnis:"
+ ::msgcat::mcset de "&Edit" "&Bearbeiten"
+ ::msgcat::mcset de "Effects" "Effekte"
+ ::msgcat::mcset de "Error: %1\$s" "Fehler: %1\$s"
+ ::msgcat::mcset de "E&xit" "&Ende"
+ ::msgcat::mcset de "&File" "&Datei"
+ ::msgcat::mcset de "File \"%1\$s\" already exists.\nDo you want to overwrite it?" "Die Datei \"%1\$s\" ist bereits vorhanden.\nWollen sie diese Datei \u00fcberschreiben ?"
+ ::msgcat::mcset de "File \"%1\$s\" already exists.\n\n" "Die Datei \"%1\$s\" ist bereits vorhanden.\n\n"
+ ::msgcat::mcset de "File \"%1\$s\" does not exist." "Die Datei \"%1\$s\" existiert nicht."
+ ::msgcat::mcset de "File &name:" "Datei&name:"
+ ::msgcat::mcset de "File &names:" "Datei&namen:"
+ ::msgcat::mcset de "Files of &type:" "Dateien des &Typs:"
+ ::msgcat::mcset de "Fi&les:" "Dat&eien:"
+ ::msgcat::mcset de "&Filter"
+ ::msgcat::mcset de "Fil&ter:"
+ ::msgcat::mcset de "Font" "Schriftart"
+ ::msgcat::mcset de "&Font:" "Schriftart:"
+ ::msgcat::mcset de "Font st&yle:" "Schriftschnitt:"
+ ::msgcat::mcset de "&Green" "&Gr\u00fcn"
+ ::msgcat::mcset de "&Help" "&Hilfe"
+ ::msgcat::mcset de "Hi" "Hallo"
+ ::msgcat::mcset de "&Hide Console" "&Konsole unsichtbar machen"
+ ::msgcat::mcset de "&Ignore" "&Ignorieren"
+ ::msgcat::mcset de "Invalid file name \"%1\$s\"." "Ung\u00fcltiger Dateiname \"%1\$s\"."
+ ::msgcat::mcset de "Italic" "Kursiv"
+ ::msgcat::mcset de "Log Files" "Protokolldatei"
+ ::msgcat::mcset de "&No" "&Nein"
+ ::msgcat::mcset de "&OK"
+ ::msgcat::mcset de "OK"
+ ::msgcat::mcset de "Ok"
+ ::msgcat::mcset de "Open" "\u00d6ffnen"
+ ::msgcat::mcset de "&Open" "\u00d6&ffnen"
+ ::msgcat::mcset de "Open Multiple Files" "Mehrere Dateien \u00F6ffnen"
+ ::msgcat::mcset de "P&aste" "E&inf\u00fcgen"
+ ::msgcat::mcset de "&Quit" "&Beenden"
+ ::msgcat::mcset de "&Red" "&Rot"
+ ::msgcat::mcset de "Regular" "Standard"
+ ::msgcat::mcset de "Replace existing file?" "Existierende Datei ersetzen?"
+ ::msgcat::mcset de "&Retry" "&Wiederholen"
+ ::msgcat::mcset de "Sample" "Beispiel"
+ ::msgcat::mcset de "&Save" "&Speichern"
+ ::msgcat::mcset de "Save As" "Speichern unter"
+ ::msgcat::mcset de "Save To Log" "In Protokoll speichern"
+ ::msgcat::mcset de "Select Log File" "Protokolldatei ausw\u00e4hlen"
+ ::msgcat::mcset de "Select a file to source" "Auszuf\u00fchrende Datei ausw\u00e4hlen"
+ ::msgcat::mcset de "&Selection:" "Auswah&l:"
+ ::msgcat::mcset de "&Size:" "Schriftgrad:"
+ ::msgcat::mcset de "Show &Hidden Directories" "Zeige versteckte Dateien"
+ ::msgcat::mcset de "Show &Hidden Files and Directories" "Zeige versteckte Dateien und Verzeichnisse"
+ ::msgcat::mcset de "Skip Messages" "Weitere Nachrichten \u00fcberspringen"
+ ::msgcat::mcset de "&Source..." "&Ausf\u00fchren..."
+ ::msgcat::mcset de "Stri&keout" "&Durchgestrichen"
+ ::msgcat::mcset de "Tcl Scripts" "Tcl-Skripte"
+ ::msgcat::mcset de "Tcl for Windows" "Tcl f\u00fcr Windows"
+ ::msgcat::mcset de "Text Files" "Textdateien"
+ ::msgcat::mcset de "&Underline" "&Unterstrichen"
+ ::msgcat::mcset de "&Yes" "&Ja"
+ ::msgcat::mcset de "abort" "abbrechen"
+ ::msgcat::mcset de "blue" "blau"
+ ::msgcat::mcset de "cancel" "abbrechen"
+ ::msgcat::mcset de "extension" "Erweiterung"
+ ::msgcat::mcset de "extensions" "Erweiterungen"
+ ::msgcat::mcset de "green" "gr\u00fcn"
+ ::msgcat::mcset de "ignore" "ignorieren"
+ ::msgcat::mcset de "ok"
+ ::msgcat::mcset de "red" "rot"
+ ::msgcat::mcset de "retry" "wiederholen"
+ ::msgcat::mcset de "yes" "ja"
+}
diff --git a/mplug_owl2/lib/tk8.6/msgs/el.msg b/mplug_owl2/lib/tk8.6/msgs/el.msg
new file mode 100644
index 0000000000000000000000000000000000000000..2e3f236e19927634f0ab8966abe913cbaeab2e33
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/el.msg
@@ -0,0 +1,86 @@
+## Messages for the Greek (Hellenic - "el") language.
+## Please report any changes/suggestions to:
+## petasis@iit.demokritos.gr
+
+namespace eval ::tk {
+ ::msgcat::mcset el "&Abort" "\u03a4\u03b5\u03c1\u03bc\u03b1\u03c4\u03b9\u03c3\u03bc\u03cc\u03c2"
+ ::msgcat::mcset el "About..." "\u03a3\u03c7\u03b5\u03c4\u03b9\u03ba\u03ac..."
+ ::msgcat::mcset el "All Files" "\u038c\u03bb\u03b1 \u03c4\u03b1 \u0391\u03c1\u03c7\u03b5\u03af\u03b1"
+ ::msgcat::mcset el "Application Error" "\u039b\u03ac\u03b8\u03bf\u03c2 \u0395\u03c6\u03b1\u03c1\u03bc\u03bf\u03b3\u03ae\u03c2"
+ ::msgcat::mcset el "&Blue" "\u039c\u03c0\u03bb\u03b5"
+ ::msgcat::mcset el "&Cancel" "\u0391\u03ba\u03cd\u03c1\u03c9\u03c3\u03b7"
+ ::msgcat::mcset el \
+"Cannot change to the directory \"%1\$s\".\nPermission denied." \
+"\u0394\u03b5\u03bd \u03b5\u03af\u03bd\u03b1\u03b9 \u03b4\u03c5\u03bd\u03b1\u03c4\u03ae \u03b7 \u03b1\u03bb\u03bb\u03b1\u03b3\u03ae \u03ba\u03b1\u03c4\u03b1\u03bb\u03cc\u03b3\u03bf\u03c5 \u03c3\u03b5 \"%1\$s\".\n\u0397 \u03c0\u03c1\u03cc\u03c3\u03b2\u03b1\u03c3\u03b7 \u03b4\u03b5\u03bd \u03b5\u03c0\u03b9\u03c4\u03c1\u03ad\u03c0\u03b5\u03c4\u03b1\u03b9."
+ ::msgcat::mcset el "Choose Directory" "\u0395\u03c0\u03b9\u03bb\u03bf\u03b3\u03ae \u039a\u03b1\u03c4\u03b1\u03bb\u03cc\u03b3\u03bf\u03c5"
+ ::msgcat::mcset el "Clear" "\u039a\u03b1\u03b8\u03b1\u03c1\u03b9\u03c3\u03bc\u03cc\u03c2"
+ ::msgcat::mcset el "Color" "\u03a7\u03c1\u03ce\u03bc\u03b1"
+ ::msgcat::mcset el "Console" "\u039a\u03bf\u03bd\u03c3\u03cc\u03bb\u03b1"
+ ::msgcat::mcset el "Copy" "\u0391\u03bd\u03c4\u03b9\u03b3\u03c1\u03b1\u03c6\u03ae"
+ ::msgcat::mcset el "Cut" "\u0391\u03c0\u03bf\u03ba\u03bf\u03c0\u03ae"
+ ::msgcat::mcset el "Delete" "\u0394\u03b9\u03b1\u03b3\u03c1\u03b1\u03c6\u03ae"
+ ::msgcat::mcset el "Details >>" "\u039b\u03b5\u03c0\u03c4\u03bf\u03bc\u03ad\u03c1\u03b5\u03b9\u03b5\u03c2 >>"
+ ::msgcat::mcset el "Directory \"%1\$s\" does not exist." \
+ "\u039f \u03ba\u03b1\u03c4\u03ac\u03bb\u03bf\u03b3\u03bf\u03c2 \"%1\$s\" \u03b4\u03b5\u03bd \u03c5\u03c0\u03ac\u03c1\u03c7\u03b5\u03b9."
+ ::msgcat::mcset el "&Directory:" "&\u039a\u03b1\u03c4\u03ac\u03bb\u03bf\u03b3\u03bf\u03c2:"
+ ::msgcat::mcset el "Error: %1\$s" "\u039b\u03ac\u03b8\u03bf\u03c2: %1\$s"
+ ::msgcat::mcset el "Exit" "\u0388\u03be\u03bf\u03b4\u03bf\u03c2"
+ ::msgcat::mcset el \
+ "File \"%1\$s\" already exists.\nDo you want to overwrite it?" \
+ "\u03a4\u03bf \u03b1\u03c1\u03c7\u03b5\u03af\u03bf \"%1\$s\" \u03ae\u03b4\u03b7 \u03c5\u03c0\u03ac\u03c1\u03c7\u03b5\u03b9.\n\u0398\u03ad\u03bb\u03b5\u03c4\u03b5 \u03bd\u03b1 \u03b5\u03c0\u03b9\u03ba\u03b1\u03bb\u03c5\u03c6\u03b8\u03b5\u03af;"
+ ::msgcat::mcset el "File \"%1\$s\" already exists.\n\n" \
+ "\u03a4\u03bf \u03b1\u03c1\u03c7\u03b5\u03af\u03bf \"%1\$s\" \u03ae\u03b4\u03b7 \u03c5\u03c0\u03ac\u03c1\u03c7\u03b5\u03b9.\n\n"
+ ::msgcat::mcset el "File \"%1\$s\" does not exist." \
+ "\u03a4\u03bf \u03b1\u03c1\u03c7\u03b5\u03af\u03bf \"%1\$s\" \u03b4\u03b5\u03bd \u03c5\u03c0\u03ac\u03c1\u03c7\u03b5\u03b9."
+ ::msgcat::mcset el "File &name:" "\u038c&\u03bd\u03bf\u03bc\u03b1 \u03b1\u03c1\u03c7\u03b5\u03af\u03bf\u03c5:"
+ ::msgcat::mcset el "File &names:" "\u038c&\u03bd\u03bf\u03bc\u03b1 \u03b1\u03c1\u03c7\u03b5\u03af\u03c9\u03bd:"
+ ::msgcat::mcset el "Files of &type:" "\u0391\u03c1\u03c7\u03b5\u03af\u03b1 \u03c4\u03bf\u03c5 &\u03c4\u03cd\u03c0\u03bf\u03c5:"
+ ::msgcat::mcset el "Fi&les:" "\u0391\u03c1\u03c7\u03b5\u03af\u03b1:"
+ ::msgcat::mcset el "&Filter" "\u03a6\u03af\u03bb\u03c4\u03c1\u03bf"
+ ::msgcat::mcset el "Fil&ter:" "\u03a6\u03af\u03bb\u03c4\u03c1\u03bf:"
+ ::msgcat::mcset el "&Green" "\u03a0\u03c1\u03ac\u03c3\u03b9\u03bd\u03bf"
+ ::msgcat::mcset el "Hi" "\u0393\u03b5\u03b9\u03b1"
+ ::msgcat::mcset el "Hide Console" "\u0391\u03c0\u03cc\u03ba\u03c1\u03c5\u03c8\u03b7 \u03ba\u03bf\u03bd\u03c3\u03cc\u03bb\u03b1\u03c2"
+ ::msgcat::mcset el "&Ignore" "\u0391\u03b3\u03bd\u03cc\u03b7\u03c3\u03b7"
+ ::msgcat::mcset el "Invalid file name \"%1\$s\"." \
+ "\u0386\u03ba\u03c5\u03c1\u03bf \u03cc\u03bd\u03bf\u03bc\u03b1 \u03b1\u03c1\u03c7\u03b5\u03af\u03bf\u03c5 \"%1\$s\"."
+ ::msgcat::mcset el "Log Files" "\u0391\u03c1\u03c7\u03b5\u03af\u03b1 \u039a\u03b1\u03c4\u03b1\u03b3\u03c1\u03b1\u03c6\u03ae\u03c2"
+ ::msgcat::mcset el "&No" "\u038c\u03c7\u03b9"
+ ::msgcat::mcset el "&OK" "\u0395\u03bd\u03c4\u03ac\u03be\u03b5\u03b9"
+ ::msgcat::mcset el "OK" "\u0395\u03bd\u03c4\u03ac\u03be\u03b5\u03b9"
+ ::msgcat::mcset el "Ok" "\u0395\u03bd\u03c4\u03ac\u03be\u03b5\u03b9"
+ ::msgcat::mcset el "Open" "\u0386\u03bd\u03bf\u03b9\u03b3\u03bc\u03b1"
+ ::msgcat::mcset el "&Open" "\u0386\u03bd\u03bf\u03b9\u03b3\u03bc\u03b1"
+ ::msgcat::mcset el "Open Multiple Files" \
+ "\u0386\u03bd\u03bf\u03b9\u03b3\u03bc\u03b1 \u03c0\u03bf\u03bb\u03bb\u03b1\u03c0\u03bb\u03ce\u03bd \u03b1\u03c1\u03c7\u03b5\u03af\u03c9\u03bd"
+ ::msgcat::mcset el "P&aste" "\u0395\u03c0\u03b9\u03ba\u03cc\u03bb\u03bb\u03b7\u03c3\u03b7"
+ ::msgcat::mcset el "Quit" "\u0388\u03be\u03bf\u03b4\u03bf\u03c2"
+ ::msgcat::mcset el "&Red" "\u039a\u03cc\u03ba\u03ba\u03b9\u03bd\u03bf"
+ ::msgcat::mcset el "Replace existing file?" \
+ "\u0395\u03c0\u03b9\u03ba\u03ac\u03bb\u03c5\u03c8\u03b7 \u03c5\u03c0\u03ac\u03c1\u03c7\u03bf\u03bd\u03c4\u03bf\u03c2 \u03b1\u03c1\u03c7\u03b5\u03af\u03bf\u03c5;"
+ ::msgcat::mcset el "&Retry" "\u03a0\u03c1\u03bf\u03c3\u03c0\u03ac\u03b8\u03b7\u03c3\u03b5 \u03be\u03b1\u03bd\u03ac"
+ ::msgcat::mcset el "&Save" "\u0391\u03c0\u03bf\u03b8\u03ae\u03ba\u03b5\u03c5\u03c3\u03b7"
+ ::msgcat::mcset el "Save As" "\u0391\u03c0\u03bf\u03b8\u03ae\u03ba\u03b5\u03c5\u03c3\u03b7 \u03c3\u03b1\u03bd"
+ ::msgcat::mcset el "Save To Log" "\u0391\u03c0\u03bf\u03b8\u03ae\u03ba\u03b5\u03c5\u03c3\u03b7 \u03c3\u03c4\u03bf \u03b1\u03c1\u03c7\u03b5\u03af\u03bf \u03ba\u03b1\u03c4\u03b1\u03b3\u03c1\u03b1\u03c6\u03ae\u03c2"
+ ::msgcat::mcset el "Select Log File" "\u0395\u03c0\u03b9\u03bb\u03bf\u03b3\u03ae \u03b1\u03c1\u03c7\u03b5\u03af\u03bf\u03c5 \u03ba\u03b1\u03c4\u03b1\u03b3\u03c1\u03b1\u03c6\u03ae\u03c2"
+ ::msgcat::mcset el "Select a file to source" \
+ "\u0395\u03c0\u03b9\u03bb\u03ad\u03be\u03c4\u03b5 \u03b1\u03c1\u03c7\u03b5\u03af\u03bf \u03b3\u03b9\u03b1 \u03b5\u03ba\u03c4\u03ad\u03bb\u03b5\u03c3\u03b7"
+ ::msgcat::mcset el "&Selection:" "\u0395\u03c0\u03b9\u03bb\u03bf\u03b3\u03ae:"
+ ::msgcat::mcset el "Skip Messages" "\u0391\u03c0\u03bf\u03c6\u03c5\u03b3\u03ae\u03bc\u03b7\u03bd\u03c5\u03bc\u03ac\u03c4\u03c9\u03bd"
+ ::msgcat::mcset el "&Source..." "\u0395\u03ba\u03c4\u03ad\u03bb\u03b5\u03c3\u03b7..."
+ ::msgcat::mcset el "Tcl Scripts" "Tcl Scripts"
+ ::msgcat::mcset el "Tcl for Windows" "Tcl \u03b3\u03b9\u03b1 Windows"
+ ::msgcat::mcset el "Text Files" "\u0391\u03c1\u03c7\u03b5\u03af\u03b1 \u039a\u03b5\u03b9\u03bc\u03ad\u03bd\u03bf\u03c5"
+ ::msgcat::mcset el "&Yes" "\u039d\u03b1\u03b9"
+ ::msgcat::mcset el "abort" "\u03c4\u03b5\u03c1\u03bc\u03b1\u03c4\u03b9\u03c3\u03bc\u03cc\u03c2"
+ ::msgcat::mcset el "blue" "\u03bc\u03c0\u03bb\u03b5"
+ ::msgcat::mcset el "cancel" "\u03b1\u03ba\u03cd\u03c1\u03c9\u03c3\u03b7"
+ ::msgcat::mcset el "extension" "\u03b5\u03c0\u03ad\u03ba\u03c4\u03b1\u03c3\u03b7"
+ ::msgcat::mcset el "extensions" "\u03b5\u03c0\u03b5\u03ba\u03c4\u03ac\u03c3\u03b5\u03b9\u03c2"
+ ::msgcat::mcset el "green" "\u03c0\u03c1\u03ac\u03c3\u03b9\u03bd\u03bf"
+ ::msgcat::mcset el "ignore" "\u03b1\u03b3\u03bd\u03cc\u03b7\u03c3\u03b7"
+ ::msgcat::mcset el "ok" "\u03b5\u03bd\u03c4\u03ac\u03be\u03b5\u03b9"
+ ::msgcat::mcset el "red" "\u03ba\u03cc\u03ba\u03ba\u03b9\u03bd\u03bf"
+ ::msgcat::mcset el "retry" "\u03c0\u03c1\u03bf\u03c3\u03c0\u03ac\u03b8\u03b7\u03c3\u03b5 \u03be\u03b1\u03bd\u03ac"
+ ::msgcat::mcset el "yes" "\u03bd\u03b1\u03b9"
+}
diff --git a/mplug_owl2/lib/tk8.6/msgs/en_gb.msg b/mplug_owl2/lib/tk8.6/msgs/en_gb.msg
new file mode 100644
index 0000000000000000000000000000000000000000..efafa38c6d7df3db70ce84d35f2874c64cbe7b78
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/en_gb.msg
@@ -0,0 +1,3 @@
+namespace eval ::tk {
+ ::msgcat::mcset en_gb Color Colour
+}
diff --git a/mplug_owl2/lib/tk8.6/msgs/it.msg b/mplug_owl2/lib/tk8.6/msgs/it.msg
new file mode 100644
index 0000000000000000000000000000000000000000..2e1b4bd30853a43c313d484e7293f7ffb08cf74c
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/it.msg
@@ -0,0 +1,73 @@
+namespace eval ::tk {
+ ::msgcat::mcset it "&Abort" "&Interrompi"
+ ::msgcat::mcset it "&About..." "Informazioni..."
+ ::msgcat::mcset it "All Files" "Tutti i file"
+ ::msgcat::mcset it "Application Error" "Errore dell' applicazione"
+ ::msgcat::mcset it "&Blue" "&Blu"
+ ::msgcat::mcset it "Cancel" "Annulla"
+ ::msgcat::mcset it "&Cancel" "&Annulla"
+ ::msgcat::mcset it "Cannot change to the directory \"%1\$s\".\nPermission denied." "Impossibile accedere alla directory \"%1\$s\".\nPermesso negato."
+ ::msgcat::mcset it "Choose Directory" "Scegli una directory"
+ ::msgcat::mcset it "Cl&ear" "Azzera"
+ ::msgcat::mcset it "&Clear Console" "Azzera Console"
+ ::msgcat::mcset it "Color" "Colore"
+ ::msgcat::mcset it "Console"
+ ::msgcat::mcset it "&Copy" "Copia"
+ ::msgcat::mcset it "Cu&t" "Taglia"
+ ::msgcat::mcset it "Delete" "Cancella"
+ ::msgcat::mcset it "Details >>" "Dettagli >>"
+ ::msgcat::mcset it "Directory \"%1\$s\" does not exist." "La directory \"%1\$s\" non esiste."
+ ::msgcat::mcset it "&Directory:"
+ ::msgcat::mcset it "Error: %1\$s" "Errore: %1\$s"
+ ::msgcat::mcset it "E&xit" "Esci"
+ ::msgcat::mcset it "File \"%1\$s\" already exists.\nDo you want to overwrite it?" "Il file \"%1\$s\" esiste gi\u00e0.\nVuoi sovrascriverlo?"
+ ::msgcat::mcset it "File \"%1\$s\" already exists.\n\n" "Il file \"%1\$s\" esiste gi\u00e0.\n\n"
+ ::msgcat::mcset it "File \"%1\$s\" does not exist." "Il file \"%1\$s\" non esiste."
+ ::msgcat::mcset it "File &name:" "&Nome del file:"
+ ::msgcat::mcset it "File &names:" "&Nomi dei file:"
+ ::msgcat::mcset it "Files of &type:" "File di &tipo:"
+ ::msgcat::mcset it "Fi&les:" "Fi&le:"
+ ::msgcat::mcset it "&Filter" "&Filtro"
+ ::msgcat::mcset it "Fil&ter:" "Fil&tro:"
+ ::msgcat::mcset it "&Green" "&Verde"
+ ::msgcat::mcset it "Hi" "Salve"
+ ::msgcat::mcset it "&Hide Console" "Nascondi la console"
+ ::msgcat::mcset it "&Ignore" "&Ignora"
+ ::msgcat::mcset it "Invalid file name \"%1\$s\"." "Nome di file non valido \"%1\$s\"."
+ ::msgcat::mcset it "Log Files" "File di log"
+ ::msgcat::mcset it "&No"
+ ::msgcat::mcset it "&OK"
+ ::msgcat::mcset it "OK"
+ ::msgcat::mcset it "Ok"
+ ::msgcat::mcset it "Open" "Apri"
+ ::msgcat::mcset it "&Open" "A&pri"
+ ::msgcat::mcset it "Open Multiple Files" "Apri file multipli"
+ ::msgcat::mcset it "P&aste" "Incolla"
+ ::msgcat::mcset it "&Quit" "Esci"
+ ::msgcat::mcset it "&Red" "&Rosso"
+ ::msgcat::mcset it "Replace existing file?" "Sostituisci il file esistente?"
+ ::msgcat::mcset it "&Retry" "&Riprova"
+ ::msgcat::mcset it "&Save" "&Salva"
+ ::msgcat::mcset it "Save As" "Salva come"
+ ::msgcat::mcset it "Save To Log" "Salva il log"
+ ::msgcat::mcset it "Select Log File" "Scegli un file di log"
+ ::msgcat::mcset it "Select a file to source" "Scegli un file da eseguire"
+ ::msgcat::mcset it "&Selection:" "&Selezione:"
+ ::msgcat::mcset it "Skip Messages" "Salta i messaggi"
+ ::msgcat::mcset it "Source..." "Esegui..."
+ ::msgcat::mcset it "Tcl Scripts" "Script Tcl"
+ ::msgcat::mcset it "Tcl for Windows" "Tcl per Windows"
+ ::msgcat::mcset it "Text Files" "File di testo"
+ ::msgcat::mcset it "&Yes" "&S\u00ec"
+ ::msgcat::mcset it "abort" "interrompi"
+ ::msgcat::mcset it "blue" "blu"
+ ::msgcat::mcset it "cancel" "annulla"
+ ::msgcat::mcset it "extension" "estensione"
+ ::msgcat::mcset it "extensions" "estensioni"
+ ::msgcat::mcset it "green" "verde"
+ ::msgcat::mcset it "ignore" "ignora"
+ ::msgcat::mcset it "ok"
+ ::msgcat::mcset it "red" "rosso"
+ ::msgcat::mcset it "retry" "riprova"
+ ::msgcat::mcset it "yes" "s\u00ec"
+}
diff --git a/mplug_owl2/lib/tk8.6/msgs/nl.msg b/mplug_owl2/lib/tk8.6/msgs/nl.msg
new file mode 100644
index 0000000000000000000000000000000000000000..148a9e6d51d52ba507475150c118ab59528241a5
--- /dev/null
+++ b/mplug_owl2/lib/tk8.6/msgs/nl.msg
@@ -0,0 +1,91 @@
+namespace eval ::tk {
+ ::msgcat::mcset nl "&Abort" "&Afbreken"
+ ::msgcat::mcset nl "&About..." "Over..."
+ ::msgcat::mcset nl "All Files" "Alle Bestanden"
+ ::msgcat::mcset nl "Application Error" "Toepassingsfout"
+ ::msgcat::mcset nl "&Apply" "Toepassen"
+ ::msgcat::mcset nl "Bold" "Vet"
+ ::msgcat::mcset nl "Bold Italic" "Vet Cursief"
+ ::msgcat::mcset nl "&Blue" "&Blauw"
+ ::msgcat::mcset nl "Cancel" "Annuleren"
+ ::msgcat::mcset nl "&Cancel" "&Annuleren"
+ ::msgcat::mcset nl "Cannot change to the directory \"%1\$s\".\nPermission denied." "Kan niet naar map \"%1\$s\" gaan.\nU heeft hiervoor geen toestemming."
+ ::msgcat::mcset nl "Choose Directory" "Kies map"
+ ::msgcat::mcset nl "Cl&ear" "Wissen"
+ ::msgcat::mcset nl "&Clear Console" "&Wis Console"
+ ::msgcat::mcset nl "Color" "Kleur"
+ ::msgcat::mcset nl "Console"
+ ::msgcat::mcset nl "&Copy" "Kopi\u00ebren"
+ ::msgcat::mcset nl "Cu&t" "Knippen"
+ ::msgcat::mcset nl "&Delete" "Wissen"
+ ::msgcat::mcset nl "Details >>"
+ ::msgcat::mcset nl "Directory \"%1\$s\" does not exist." "Map \"%1\$s\" bestaat niet."
+ ::msgcat::mcset nl "&Directory:" "&Map:"
+ ::msgcat::mcset nl "&Edit" "Bewerken"
+ ::msgcat::mcset nl "Effects" "Effecten"
+ ::msgcat::mcset nl "Error: %1\$s" "Fout: %1\$s"
+ ::msgcat::mcset nl "E&xit" "Be\u00ebindigen"
+ ::msgcat::mcset nl "&File" "Bestand"
+ ::msgcat::mcset nl "File \"%1\$s\" already exists.\nDo you want to overwrite it?" "Bestand \"%1\$s\" bestaat al.\nWilt u het overschrijven?"
+ ::msgcat::mcset nl "File \"%1\$s\" already exists.\n\n" "Bestand \"%1\$s\" bestaat al.\n\n"
+ ::msgcat::mcset nl "File \"%1\$s\" does not exist." "Bestand \"%1\$s\" bestaat niet."
+ ::msgcat::mcset nl "File &name:" "Bestands&naam:"
+ ::msgcat::mcset nl "File &names:" "Bestands&namen:"
+ ::msgcat::mcset nl "Files of &type:" "Bestanden van het &type:"
+ ::msgcat::mcset nl "Fi&les:" "&Bestanden:"
+ ::msgcat::mcset nl "&Filter"
+ ::msgcat::mcset nl "Fil&ter:"
+ ::msgcat::mcset nl "Font"
+ ::msgcat::mcset nl "&Font:"
+ ::msgcat::mcset nl "Font st&yle:" "Font stijl:"
+ ::msgcat::mcset nl "&Green" "&Groen"
+ ::msgcat::mcset nl "&Help"
+ ::msgcat::mcset nl "Hi" "H\u00e9"
+ ::msgcat::mcset nl "&Hide Console" "Verberg Console"
+ ::msgcat::mcset nl "&Ignore" "&Negeren"
+ ::msgcat::mcset nl "Invalid file name \"%1\$s\"." "Ongeldige bestandsnaam \"%1\$s\"."
+ ::msgcat::mcset nl "Italic" "Cursief"
+ ::msgcat::mcset nl "Log Files" "Log Bestanden"
+ ::msgcat::mcset nl "&No" "&Nee"
+ ::msgcat::mcset nl "&OK"
+ ::msgcat::mcset nl "OK"
+ ::msgcat::mcset nl "Ok"
+ ::msgcat::mcset nl "Open" "Openen"
+ ::msgcat::mcset nl "&Open" "&Openen"
+ ::msgcat::mcset nl "Open Multiple Files" "Open meerdere bestanden"
+ ::msgcat::mcset nl "P&aste" "Pl&akken"
+ ::msgcat::mcset nl "&Quit" "Stoppen"
+ ::msgcat::mcset nl "&Red" "&Rood"
+ ::msgcat::mcset nl "Regular" "Standaard"
+ ::msgcat::mcset nl "Replace existing file?" "Vervang bestaand bestand?"
+ ::msgcat::mcset nl "&Retry" "&Herhalen"
+ ::msgcat::mcset nl "Sample"
+ ::msgcat::mcset nl "&Save" "Op&slaan"
+ ::msgcat::mcset nl "Save As" "Opslaan als"
+ ::msgcat::mcset nl "Save To Log" "Opslaan naar Log"
+ ::msgcat::mcset nl "Select Log File" "Selecteer Log bestand"
+ ::msgcat::mcset nl "Select a file to source" "Selecteer bronbestand"
+ ::msgcat::mcset nl "&Selection:" "&Selectie:"
+ ::msgcat::mcset nl "&Size:" "Grootte"
+ ::msgcat::mcset nl "Show &Hidden Directories" "Laat verborgen mappen zien"
+ ::msgcat::mcset nl "Show &Hidden Files and Directories" "Laat verborgen bestanden mappen zien"
+ ::msgcat::mcset nl "Skip Messages" "Berichten overslaan"
+ ::msgcat::mcset nl "&Source..." "Bron..."
+ ::msgcat::mcset nl "Stri&keout"
+ ::msgcat::mcset nl "Tcl Scripts"
+ ::msgcat::mcset nl "Tcl for Windows" "Tcl voor Windows"
+ ::msgcat::mcset nl "Text Files" "Tekstbestanden"
+ ::msgcat::mcset nl "&Underline" "Onderstreept"
+ ::msgcat::mcset nl "&Yes" "&Ja"
+ ::msgcat::mcset nl "abort" "afbreken"
+ ::msgcat::mcset nl "blue" "blauw"
+ ::msgcat::mcset nl "cancel" "annuleren"
+ ::msgcat::mcset nl "extension"
+ ::msgcat::mcset nl "extensions"
+ ::msgcat::mcset nl "green" "groen"
+ ::msgcat::mcset nl "ignore" "negeren"
+ ::msgcat::mcset nl "ok"
+ ::msgcat::mcset nl "red" "rood"
+ ::msgcat::mcset nl "retry" "opnieuw"
+ ::msgcat::mcset nl "yes" "ja"
+}
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..244c63f4f01e81491de9e9d1ff1a6a8540a875dd
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfd358482706b94d76a833b4d014be415ab39db6
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..54037995229f829e961f96670b86066097d69471
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
+
+
+_import_structure = {"tokenization_herbert": ["HerbertTokenizer"]}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"]
+
+
+if TYPE_CHECKING:
+ from .tokenization_herbert import HerbertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_herbert_fast import HerbertTokenizerFast
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce105a236ec38c6fe6f7257c5efac7f76bd7775a
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1fab8f26d699e35b5c67f0122dc7fd2a196257ec
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd240dbe1666e7b60f6149afbf26ce4ec5728af8
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..91ce0dcca584630c96ac23a1aaf62485be041511
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py
@@ -0,0 +1,659 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import os
+import re
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
+ },
+ "merges_file": {
+ "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514}
+PRETRAINED_INIT_CONFIGURATION = {}
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
+def replace_unicode_punct(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
+ """
+ text = text.replace(",", ",")
+ text = re.sub(r"。\s*", ". ", text)
+ text = text.replace("、", ",")
+ text = text.replace("”", '"')
+ text = text.replace("“", '"')
+ text = text.replace("∶", ":")
+ text = text.replace(":", ":")
+ text = text.replace("?", "?")
+ text = text.replace("《", '"')
+ text = text.replace("》", '"')
+ text = text.replace(")", ")")
+ text = text.replace("!", "!")
+ text = text.replace("(", "(")
+ text = text.replace(";", ";")
+ text = text.replace("1", "1")
+ text = text.replace("」", '"')
+ text = text.replace("「", '"')
+ text = text.replace("0", "0")
+ text = text.replace("3", "3")
+ text = text.replace("2", "2")
+ text = text.replace("5", "5")
+ text = text.replace("6", "6")
+ text = text.replace("9", "9")
+ text = text.replace("7", "7")
+ text = text.replace("8", "8")
+ text = text.replace("4", "4")
+ text = re.sub(r".\s*", ". ", text)
+ text = text.replace("~", "~")
+ text = text.replace("’", "'")
+ text = text.replace("…", "...")
+ text = text.replace("━", "-")
+ text = text.replace("〈", "<")
+ text = text.replace("〉", ">")
+ text = text.replace("【", "[")
+ text = text.replace("】", "]")
+ text = text.replace("%", "%")
+ return text
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
+def remove_non_printing_char(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
+ """
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat.startswith("C"):
+ continue
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+class HerbertTokenizer(PreTrainedTokenizer):
+ """
+ Construct a BPE tokenizer for HerBERT.
+
+ Peculiarities:
+
+ - uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
+ punctuation character will be treated separately.
+
+ - Such pretokenized input is BPE subtokenized
+
+ This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the
+ superclass for more information regarding methods.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ tokenizer_file=None,
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sep_token="",
+ bos_token="",
+ do_lowercase_and_remove_accent=False,
+ additional_special_tokens=[
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ],
+ lang2id=None,
+ id2lang=None,
+ **kwargs,
+ ):
+ super().__init__(
+ unk_token=unk_token,
+ bos_token=bos_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ lang2id=lang2id,
+ id2lang=id2lang,
+ do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
+ tokenizer_file=None,
+ **kwargs,
+ )
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use HerbertTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
+
+ # cache of sm.MosesPunctNormalizer instance
+ self.cache_moses_punct_normalizer = {}
+ # cache of sm.MosesTokenizer instance
+ self.cache_moses_tokenizer = {}
+ self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
+ # True for current supported model (v1.2.0), False for XLM-17 & 100
+ self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
+ self.lang2id = lang2id
+ self.id2lang = id2lang
+ if lang2id is not None and id2lang is not None:
+ assert len(lang2id) == len(id2lang)
+
+ self.ja_word_tokenizer = None
+ self.zh_word_tokenizer = None
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:2]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ self.bert_pre_tokenizer = BasicTokenizer(
+ do_lower_case=False,
+ never_split=self.all_special_tokens,
+ tokenize_chinese_chars=False,
+ strip_accents=False,
+ )
+
+ @property
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.do_lowercase_and_remove_accent
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
+ def moses_punct_norm(self, text, lang):
+ if lang not in self.cache_moses_punct_normalizer:
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
+ else:
+ punct_normalizer = self.cache_moses_punct_normalizer[lang]
+ return punct_normalizer.normalize(text)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
+ def moses_tokenize(self, text, lang):
+ if lang not in self.cache_moses_tokenizer:
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
+ else:
+ moses_tokenizer = self.cache_moses_tokenizer[lang]
+ return moses_tokenizer.tokenize(text, return_str=False, escape=False)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
+ def moses_pipeline(self, text, lang):
+ text = replace_unicode_punct(text)
+ text = self.moses_punct_norm(text, lang)
+ text = remove_non_printing_char(text)
+ return text
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
+ def ja_tokenize(self, text):
+ if self.ja_word_tokenizer is None:
+ try:
+ import Mykytea
+
+ self.ja_word_tokenizer = Mykytea.Mykytea(
+ f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
+ )
+ except (AttributeError, ImportError):
+ logger.error(
+ "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
+ " (https://github.com/chezou/Mykytea-python) with the following steps"
+ )
+ logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
+ logger.error("2. autoreconf -i")
+ logger.error("3. ./configure --prefix=$HOME/local")
+ logger.error("4. make && make install")
+ logger.error("5. pip install kytea")
+ raise
+ return list(self.ja_word_tokenizer.getWS(text))
+
+ @property
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.encoder)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ pre_tokens = self.bert_pre_tokenizer.tokenize(text)
+
+ split_tokens = []
+ for token in pre_tokens:
+ if token:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+
+ return split_tokens
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = "".join(tokens).replace("", " ").strip()
+ return out_string
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+
+ """
+ bos = [self.bos_token_id]
+ sep = [self.sep_token_id]
+
+ if token_ids_1 is None:
+ return bos + token_ids_0 + sep
+ return bos + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sm"] = None
+ return state
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..67e38c1c5ee7bd9d0cfbff7750ae592555c94335
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_herbert import HerbertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
+ },
+ "merges_file": {
+ "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514}
+PRETRAINED_INIT_CONFIGURATION = {}
+
+
+class HerbertTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
+
+ Peculiarities:
+
+ - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
+ a punctuation character will be treated separately.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
+ superclass for more information regarding methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ slow_tokenizer_class = HerbertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sep_token="",
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ sep_token=sep_token,
+ **kwargs,
+ )
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An HerBERT, like BERT sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ if token_ids_1 is None:
+ return cls + token_ids_0 + sep
+
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like
+ BERT sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff218cfdddae357fdf56da39dfcb68c01a851799
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/processing_instructblip.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py b/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab4fa0f6753df3932a252156715c6125d4df572b
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py
@@ -0,0 +1,172 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former.
+"""
+
+import os
+from typing import List, Optional, Union
+
+from ...image_processing_utils import BatchFeature
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+from ..auto import AutoTokenizer
+
+
+class InstructBlipProcessor(ProcessorMixin):
+ r"""
+ Constructs an InstructBLIP processor which wraps a BLIP image processor and a LLaMa/T5 tokenizer into a single
+ processor.
+
+ [`InstructBlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the
+ docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`BlipImageProcessor`):
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
+ tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
+ qformer_tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
+ """
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "BlipImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer, qformer_tokenizer):
+ super().__init__(image_processor, tokenizer)
+
+ # add QFormer tokenizer
+ self.qformer_tokenizer = qformer_tokenizer
+
+ def __call__(
+ self,
+ images: ImageInput = None,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_token_type_ids: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ if images is None and text is None:
+ raise ValueError("You have to specify at least images or text.")
+
+ encoding = BatchFeature()
+
+ if text is not None:
+ text_encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_token_type_ids=return_token_type_ids,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ encoding.update(text_encoding)
+ qformer_text_encoding = self.qformer_tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_token_type_ids=return_token_type_ids,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ encoding["qformer_input_ids"] = qformer_text_encoding.pop("input_ids")
+ encoding["qformer_attention_mask"] = qformer_text_encoding.pop("attention_mask")
+
+ if images is not None:
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
+ encoding.update(image_encoding)
+
+ return encoding
+
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ # overwrite to save the Q-Former tokenizer in a separate folder
+ def save_pretrained(self, save_directory, **kwargs):
+ if os.path.isfile(save_directory):
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
+ os.makedirs(save_directory, exist_ok=True)
+ qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
+ self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
+ return super().save_pretrained(save_directory, **kwargs)
+
+ # overwrite to load the Q-Former tokenizer from a separate folder
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer")
+ args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
+ args.append(qformer_tokenizer)
+ return cls(*args)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d96fba4d47b5e755ea40dd00df466b09b4e98ad5
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py
@@ -0,0 +1,70 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_jukebox": [
+ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "JukeboxConfig",
+ "JukeboxPriorConfig",
+ "JukeboxVQVAEConfig",
+ ],
+ "tokenization_jukebox": ["JukeboxTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_jukebox"] = [
+ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "JukeboxModel",
+ "JukeboxPreTrainedModel",
+ "JukeboxVQVAE",
+ "JukeboxPrior",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_jukebox import (
+ JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ JukeboxConfig,
+ JukeboxPriorConfig,
+ JukeboxVQVAEConfig,
+ )
+ from .tokenization_jukebox import JukeboxTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_jukebox import (
+ JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
+ JukeboxModel,
+ JukeboxPreTrainedModel,
+ JukeboxPrior,
+ JukeboxVQVAE,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7da9394717f25dbbbb1aac8a81974fe2370bc305
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb636cc13dd1ae0f5b11ba96a6e4b3be2c0a3f5f
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1960436a02a8bce3fb2f720b5530588731d7b8de
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d2ff12dd849030ceb122133e87c6194b7c7e85e
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41809574b3f2baae5ebd764a85757c24ac821528
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4a8f0a0072cfcce8e73c9a1343d06d83a249c96
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py
@@ -0,0 +1,614 @@
+# coding=utf-8
+# Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Jukebox configuration"""
+
+import os
+from typing import List, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "openai/jukebox-5b-lyrics": "https://huggingface.co/openai/jukebox-5b-lyrics/blob/main/config.json",
+ "openai/jukebox-1b-lyrics": "https://huggingface.co/openai/jukebox-1b-lyrics/blob/main/config.json",
+}
+
+_LARGE_ATTENTION = [
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "block_attn",
+ "transpose_block_attn",
+ "prev_block_attn",
+ "cross_attention",
+]
+_RawColumnPreviousRowAttention = ["block_attn", "transpose_block_attn", "prev_block_attn"]
+_FullDenseAttention = ["dense_attention"]
+_PrimePrimeDenseAttention = ["prime_attn", "prime_attn", "dense_attn"]
+
+
+def full_dense_attention(layer):
+ return _FullDenseAttention[0]
+
+
+def raw_column_previous_row_attention(layer):
+ return _RawColumnPreviousRowAttention[layer % 3]
+
+
+def large_separated_enc_dec_w_lyrics(layer):
+ return _LARGE_ATTENTION[layer % 79]
+
+
+def enc_dec_with_lyrics(layer):
+ if layer % 16 == 15:
+ return _PrimePrimeDenseAttention[layer % 3]
+ return _RawColumnPreviousRowAttention[layer % 3]
+
+
+ATTENTION_PATTERNS = {
+ "full_dense_attention": full_dense_attention,
+ "raw_column_previous_row_attention": raw_column_previous_row_attention, # Alternate row, column and previous row attn
+ "large_separated_enc_dec_w_lyrics": large_separated_enc_dec_w_lyrics, # Used by large separated_enc_dec model with lyrics
+ "enc_dec_with_lyrics": enc_dec_with_lyrics, # Used by encoder_decoder model with lyrics
+}
+
+
+class JukeboxPriorConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a
+ `JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the top level prior from the
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox
+ -1b-lyrics) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+
+ Args:
+ act_fn (`str`, *optional*, defaults to `"quick_gelu"`):
+ Activation function.
+ alignment_head (`int`, *optional*, defaults to 2):
+ Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio
+ alignment
+ alignment_layer (`int`, *optional*, defaults to 68):
+ Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the
+ lyric to audio alignment
+ attention_multiplier (`float`, *optional*, defaults to 0.25):
+ Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that
+ 0.25*width of the model will be used.
+ attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`):
+ Which attention pattern to use for the decoder/
+ attn_dropout (`int`, *optional*, defaults to 0):
+ Dropout probability for the post-attention layer dropout in the decoder.
+ attn_res_scale (`bool`, *optional*, defaults to `False`):
+ Whether or not to scale the residuals in the attention conditioner block.
+ blocks (`int`, *optional*, defaults to 64):
+ Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len //
+ blocks]` in the `JukeboxAttention` layer.
+ conv_res_scale (`int`, *optional*):
+ Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a
+ conditioner, the default value is to None and should not be modified.
+ num_layers (`int`, *optional*, defaults to 72):
+ Number of layers of the transformer architecture.
+ emb_dropout (`int`, *optional*, defaults to 0):
+ Embedding dropout used in the lyric decoder.
+ encoder_config (`JukeboxPriorConfig`, *optional*) :
+ Configuration of the encoder which models the prior on the lyrics.
+ encoder_loss_fraction (`float`, *optional*, defaults to 0.4):
+ Multiplication factor used in front of the lyric encoder loss.
+ hidden_size (`int`, *optional*, defaults to 2048):
+ Hidden dimension of the attention layers.
+ init_scale (`float`, *optional*, defaults to 0.2):
+ Initialization scales for the prior modules.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is
+ greater than 0, the `encoder` args should be specified for the lyric encoding.
+ mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to mask the previous positions in the attention.
+ max_duration (`int`, *optional*, defaults to 600):
+ Maximum supported duration of the generated song in seconds.
+ max_nb_genres (`int`, *optional*, defaults to 1):
+ Maximum number of genres that can be used to condition the model.
+ merged_decoder (`bool`, *optional*, defaults to `True`):
+ Whether or not the decoder and the encoder inputs are merged. This is used for the separated
+ encoder-decoder architecture
+ metadata_conditioning (`bool`, *optional*, defaults to `True)`:
+ Whether or not to condition on the artist and genre metadata.
+ metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`):
+ Number of genres and the number of artists that were used to train the embedding layers of the prior
+ models.
+ min_duration (`int`, *optional*, defaults to 0):
+ Minimum duration of the generated audio on which the model was trained.
+ mlp_multiplier (`float`, *optional*, defaults to 1.0):
+ Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of
+ the model will be used.
+ music_vocab_size (`int`, *optional*, defaults to 2048):
+ Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`.
+ n_ctx (`int`, *optional*, defaults to 6144):
+ Number of context tokens for each prior. The context tokens are the music tokens that are attended to when
+ generating music tokens.
+ n_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads.
+ nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384):
+ Number of lyric tokens that are used when sampling a single window of length `n_ctx`
+ res_conv_depth (`int`, *optional*, defaults to 3):
+ Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
+ `JukeboxMusicTokenConditioner`.
+ res_conv_width (`int`, *optional*, defaults to 128):
+ Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
+ `JukeboxMusicTokenConditioner`.
+ res_convolution_multiplier (`int`, *optional*, defaults to 1):
+ Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`.
+ res_dilation_cycle (`int`, *optional*):
+ Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the
+ corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level
+ tokens.
+ res_dilation_growth_rate (`int`, *optional*, defaults to 1):
+ Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner`
+ res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
+ Downsampling rates used in the audio conditioning network
+ res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
+ Striding used in the audio conditioning network
+ resid_dropout (`int`, *optional*, defaults to 0):
+ Residual dropout used in the attention pattern.
+ sampling_rate (`int`, *optional*, defaults to 44100):
+ Sampling rate used for training.
+ spread (`int`, *optional*):
+ Spread used in the `summary_spread_attention` pattern
+ timing_dims (`int`, *optional*, defaults to 64):
+ Dimension of the timing embedding.
+ zero_out (`bool`, *optional*, defaults to `False`):
+ Whether or not to zero out convolution weights when initializing.
+ """
+
+ model_type = "jukebox_prior"
+ attribute_map = {
+ "max_position_embeddings": "n_positions",
+ "num_attention_heads": "n_head",
+ }
+
+ def __init__(
+ self,
+ act_fn="quick_gelu",
+ level=0,
+ alignment_head=2,
+ alignment_layer=68,
+ attention_multiplier=0.25,
+ attention_pattern="enc_dec_with_lyrics",
+ attn_dropout=0,
+ attn_res_scale=False,
+ blocks=64,
+ conv_res_scale=None,
+ num_layers=72,
+ emb_dropout=0,
+ encoder_config=None,
+ encoder_loss_fraction=0.4,
+ hidden_size=2048,
+ init_scale=0.2,
+ is_encoder_decoder=True,
+ lyric_vocab_size=80,
+ mask=False,
+ max_duration=600,
+ max_nb_genres=1,
+ merged_decoder=True,
+ metadata_conditioning=True,
+ metadata_dims=[604, 7898],
+ min_duration=0,
+ mlp_multiplier=1.0,
+ music_vocab_size=2048,
+ n_ctx=6144,
+ n_heads=2,
+ nb_relevant_lyric_tokens=384,
+ res_conv_depth=3,
+ res_conv_width=128,
+ res_convolution_multiplier=1,
+ res_dilation_cycle=None,
+ res_dilation_growth_rate=1,
+ res_downs_t=[3, 2, 2],
+ res_strides_t=[2, 2, 2],
+ resid_dropout=0,
+ sampling_rate=44100,
+ spread=None,
+ timing_dims=64,
+ zero_out=False,
+ **kwargs,
+ ):
+ self.act_fn = act_fn
+ self.alignment_head = alignment_head
+ self.alignment_layer = alignment_layer
+ self.attention_multiplier = attention_multiplier
+ self.attention_pattern = attention_pattern
+ self.attn_dropout = attn_dropout
+ self.attn_res_scale = attn_res_scale
+ self.blocks = blocks
+ self.conv_res_scale = conv_res_scale
+ self.num_layers = num_layers
+ self.emb_dropout = emb_dropout
+ self.music_vocab_size = music_vocab_size
+ if encoder_config is not None:
+ self.encoder_config = JukeboxPriorConfig(**encoder_config)
+ else:
+ self.encoder_config = None
+ self.encoder_loss_fraction = encoder_loss_fraction
+ self.init_scale = init_scale
+ self.is_encoder_decoder = is_encoder_decoder
+ self.lyric_vocab_size = lyric_vocab_size
+ self.level = level
+ self.mask = mask
+ self.max_duration = max_duration
+ self.max_nb_genres = max_nb_genres
+ self.merged_decoder = merged_decoder
+ self.metadata_conditioning = metadata_conditioning
+ self.metadata_dims = metadata_dims
+ self.min_duration = min_duration
+ self.mlp_multiplier = mlp_multiplier
+ self.n_ctx = n_ctx
+ self.n_heads = n_heads
+ self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens
+ self.res_conv_depth = res_conv_depth
+ self.res_conv_width = res_conv_width
+ self.res_convolution_multiplier = res_convolution_multiplier
+ self.res_dilation_cycle = res_dilation_cycle
+ self.res_dilation_growth_rate = res_dilation_growth_rate
+ self.res_downs_t = res_downs_t
+ self.res_strides_t = res_strides_t
+ self.resid_dropout = resid_dropout
+ self.sampling_rate = sampling_rate
+ self.spread = spread
+ self.timing_dims = timing_dims
+ self.hidden_size = hidden_size
+ self.zero_out = zero_out
+
+ @classmethod
+ def from_pretrained(
+ cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs
+ ) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the prior config dict if we are loading from JukeboxConfig
+ if config_dict.get("model_type") == "jukebox":
+ config_dict = config_dict[f"prior_{level}"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class JukeboxVQVAEConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a
+ `JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the VQVAE from
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ act_fn (`str`, *optional*, defaults to `"relu"`):
+ Activation function of the model.
+ nb_discrete_codes (`int`, *optional*, defaults to 2048):
+ Number of codes of the VQVAE.
+ commit (`float`, *optional*, defaults to 0.02):
+ Commit loss multiplier.
+ conv_input_shape (`int`, *optional*, defaults to 1):
+ Number of audio channels.
+ conv_res_scale (`bool`, *optional*, defaults to `False`):
+ Whether or not to scale the residuals of the `JukeboxResConv1DBlock`.
+ embed_dim (`int`, *optional*, defaults to 64):
+ Embedding dimension of the codebook vectors.
+ hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`):
+ Fraction of non-intersecting window used when continuing the sampling process.
+ levels (`int`, *optional*, defaults to 3):
+ Number of hierarchical levels that used in the VQVAE.
+ lmu (`float`, *optional*, defaults to 0.99):
+ Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1
+ of the original [VQVAE paper](https://arxiv.org/pdf/1711.00937v2.pdf)
+ multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
+ Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth`
+ res_conv_depth (`int`, *optional*, defaults to 4):
+ Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
+ res_conv_width (`int`, *optional*, defaults to 32):
+ Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
+ res_convolution_multiplier (`int`, *optional*, defaults to 1):
+ Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`.
+ res_dilation_cycle (`int`, *optional*):
+ Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth
+ reduced by a power of `res_dilation_cycle`.
+ res_dilation_growth_rate (`int`, *optional*, defaults to 3):
+ Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth)
+ res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
+ Downsampling rate for each level of the hierarchical VQ-VAE.
+ res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
+ Stride used for each level of the hierarchical VQ-VAE.
+ sample_length (`int`, *optional*, defaults to 1058304):
+ Provides the max input shape of the VQVAE. Is used to compute the input shape of each level.
+ init_scale (`float`, *optional*, defaults to 0.2):
+ Initialization scale.
+ zero_out (`bool`, *optional*, defaults to `False`):
+ Whether or not to zero out convolution weights when initializing.
+ """
+
+ model_type = "jukebox_vqvae"
+
+ def __init__(
+ self,
+ act_fn="relu",
+ nb_discrete_codes=2048,
+ commit=0.02,
+ conv_input_shape=1,
+ conv_res_scale=False,
+ embed_dim=64,
+ hop_fraction=[0.125, 0.5, 0.5],
+ levels=3,
+ lmu=0.99,
+ multipliers=[2, 1, 1],
+ res_conv_depth=4,
+ res_conv_width=32,
+ res_convolution_multiplier=1,
+ res_dilation_cycle=None,
+ res_dilation_growth_rate=3,
+ res_downs_t=[3, 2, 2],
+ res_strides_t=[2, 2, 2],
+ sample_length=1058304,
+ init_scale=0.2,
+ zero_out=False,
+ **kwargs,
+ ):
+ self.hop_fraction = hop_fraction
+ self.conv_input_shape = conv_input_shape
+ self.sample_length = sample_length
+
+ # VQVAE parameters (all used)
+ self.levels = levels
+ self.embed_dim = embed_dim
+ self.nb_discrete_codes = nb_discrete_codes
+ self.res_conv_width = res_conv_width
+ self.res_conv_depth = res_conv_depth
+ self.res_convolution_multiplier = res_convolution_multiplier
+ self.res_dilation_growth_rate = res_dilation_growth_rate
+ self.res_dilation_cycle = res_dilation_cycle
+ self.multipliers = multipliers
+ self.res_downs_t = res_downs_t
+ self.res_strides_t = res_strides_t
+ self.lmu = lmu
+ self.commit = commit
+ self.conv_res_scale = conv_res_scale
+ self.act_fn = act_fn
+ self.init_scale = init_scale
+ self.zero_out = zero_out
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the text config dict if we are loading from CLIPConfig
+ if config_dict.get("model_type") == "jukebox":
+ config_dict = config_dict["vqvae_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class JukeboxConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`JukeboxModel`].
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will
+ yield a similar configuration to that of
+ [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
+
+
+ The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling =
+ (5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256
+ to get the second level codes. This is mostly true for training the top level prior and the upsamplers.
+
+ Args:
+ vqvae_config (`JukeboxVQVAEConfig`, *optional*):
+ Configuration for the `JukeboxVQVAE` model.
+ prior_config_list (`List[JukeboxPriorConfig]`, *optional*):
+ List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors.
+ nb_priors (`int`, *optional*, defaults to 3):
+ Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive
+ (decoder) model, apart from the top prior, which can include a lyric encoder. The available models were
+ trained using a top prior and 2 upsampler priors.
+ sampling_rate (`int`, *optional*, defaults to 44100):
+ Sampling rate of the raw audio.
+ timing_dims (`int`, *optional*, defaults to 64):
+ Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding
+ layer. The timing embedding layer converts the absolute and relative position in the currently sampled
+ audio to a tensor of length `timing_dims` that will be added to the music tokens.
+ min_duration (`int`, *optional*, defaults to 0):
+ Minimum duration of the audios to generate
+ max_duration (`float`, *optional*, defaults to 600.0):
+ Maximum duration of the audios to generate
+ max_nb_genres (`int`, *optional*, defaults to 5):
+ Maximum number of genres that can be used to condition a single sample.
+ metadata_conditioning (`bool`, *optional*, defaults to `True`):
+ Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum
+ duration.
+
+ Example:
+
+ ```python
+ >>> from transformers import JukeboxModel, JukeboxConfig
+
+ >>> # Initializing a Jukebox configuration
+ >>> configuration = JukeboxConfig()
+
+ >>> # Initializing a model from the configuration
+ >>> model = JukeboxModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "jukebox"
+
+ def __init__(
+ self,
+ vqvae_config=None,
+ prior_config_list=None,
+ nb_priors=3,
+ sampling_rate=44100,
+ timing_dims=64,
+ min_duration=0,
+ max_duration=600.0,
+ max_nb_genres=5,
+ metadata_conditioning=True,
+ **kwargs,
+ ):
+ if vqvae_config is None:
+ vqvae_config = {}
+ logger.info("vqvae_config is None. initializing the JukeboxVQVAE with default values.")
+
+ self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config)
+ if prior_config_list is not None:
+ self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list]
+ else:
+ self.prior_configs = []
+ for prior_idx in range(nb_priors):
+ prior_config = kwargs.pop(f"prior_{prior_idx}", None)
+ if prior_config is None:
+ prior_config = {}
+ logger.info(
+ f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default"
+ " values."
+ )
+ self.prior_configs.append(JukeboxPriorConfig(**prior_config))
+
+ self.hop_fraction = self.vqvae_config.hop_fraction
+
+ self.nb_priors = nb_priors
+
+ # Metadata conditioning
+ self.max_nb_genres = max_nb_genres
+ self.sampling_rate = sampling_rate
+ self.timing_dims = timing_dims
+ self.min_duration = min_duration
+ self.max_duration = max_duration
+ self.metadata_conditioning = metadata_conditioning
+
+ super().__init__(**kwargs)
+
+ @classmethod
+ def from_configs(cls, prior_configs: List[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs):
+ r"""
+ Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model
+ configuration.
+
+ Returns:
+ [`JukeboxConfig`]: An instance of a configuration object
+ """
+ prior_config_list = [config.to_dict() for config in prior_configs]
+ return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs)
+
+ def to_dict(self):
+ # Override the default to_dict to apply to_dict to the list of prior configs.
+ result = super().to_dict()
+ result["prior_config_list"] = [config.to_dict() for config in result.pop("prior_configs")]
+ return result
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py
new file mode 100644
index 0000000000000000000000000000000000000000..b56a25c57c70d113bfa12003fa92a86e272f8e86
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py
@@ -0,0 +1,279 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Jukebox checkpoints"""
+
+import argparse
+import json
+import os
+from pathlib import Path
+
+import requests
+import torch
+
+from transformers import JukeboxConfig, JukeboxModel
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+PREFIX = "https://openaipublic.azureedge.net/jukebox/models/"
+MODEL_MAPPING = {
+ "jukebox-1b-lyrics": [
+ "5b/vqvae.pth.tar",
+ "5b/prior_level_0.pth.tar",
+ "5b/prior_level_1.pth.tar",
+ "1b_lyrics/prior_level_2.pth.tar",
+ ],
+ "jukebox-5b-lyrics": [
+ "5b/vqvae.pth.tar",
+ "5b/prior_level_0.pth.tar",
+ "5b/prior_level_1.pth.tar",
+ "5b_lyrics/prior_level_2.pth.tar",
+ ],
+}
+
+
+def replace_key(key):
+ if key.endswith(".model.1.bias") and len(key.split(".")) > 10:
+ key = key.replace(".model.1.bias", ".conv1d_1.bias")
+ elif key.endswith(".model.1.weight") and len(key.split(".")) > 10:
+ key = key.replace(".model.1.weight", ".conv1d_1.weight")
+ elif key.endswith(".model.3.bias") and len(key.split(".")) > 10:
+ key = key.replace(".model.3.bias", ".conv1d_2.bias")
+ elif key.endswith(".model.3.weight") and len(key.split(".")) > 10:
+ key = key.replace(".model.3.weight", ".conv1d_2.weight")
+
+ if "conditioner_blocks.0." in key:
+ key = key.replace("conditioner_blocks.0", "conditioner_blocks")
+
+ if "prime_prior" in key:
+ key = key.replace("prime_prior", "encoder")
+
+ if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
+ key = key.replace(".emb.", ".")
+
+ if key.endswith("k"): # replace vqvae.X.k with vqvae.X.codebook
+ return key.replace(".k", ".codebook")
+ if "y_emb." in key:
+ return key.replace("y_emb.", "metadata_embedding.")
+
+ if "x_emb.emb." in key:
+ key = key.replace("0.x_emb.emb", "embed_tokens")
+
+ if "prime_state_ln" in key:
+ return key.replace("prime_state_ln", "encoder.final_layer_norm")
+ if ".ln" in key:
+ return key.replace(".ln", ".layer_norm")
+ if "_ln" in key:
+ return key.replace("_ln", "_layer_norm")
+
+ if "prime_state_proj" in key:
+ return key.replace("prime_state_proj", "encoder.proj_in")
+ if "prime_x_out" in key:
+ return key.replace("prime_x_out", "encoder.lm_head")
+ if "prior.x_out" in key:
+ return key.replace("x_out", "fc_proj_out")
+ if "x_emb" in key:
+ return key.replace("x_emb", "embed_tokens")
+
+ return key
+
+
+def fix_jukebox_keys(state_dict, model_state_dict, key_prefix, mapping):
+ new_dict = {}
+ import re
+
+ re_encoder_block_conv_in = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
+ re_encoder_block_resnet = re.compile(
+ r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)"
+ )
+ re_encoder_block_proj_out = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
+
+ re_decoder_block_conv_out = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
+ re_decoder_block_resnet = re.compile(
+ r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)"
+ )
+ re_decoder_block_proj_in = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
+
+ re_prior_cond_conv_out = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)")
+ re_prior_cond_resnet = re.compile(
+ r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)"
+ )
+ re_prior_cond_proj_in = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)")
+
+ for original_key, value in state_dict.items():
+ # rename vqvae.encoder keys
+ if re_encoder_block_conv_in.fullmatch(original_key):
+ regex_match = re_encoder_block_conv_in.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[2]) * 2 + int(groups[3])
+ re_new_key = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
+ key = re_encoder_block_conv_in.sub(re_new_key, original_key)
+
+ elif re_encoder_block_resnet.fullmatch(original_key):
+ regex_match = re_encoder_block_resnet.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[2]) * 2 + int(groups[3])
+ conv_index = {"1": 1, "3": 2}[groups[-2]]
+ prefix = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
+ resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
+ re_new_key = prefix + resnet_block
+ key = re_encoder_block_resnet.sub(re_new_key, original_key)
+
+ elif re_encoder_block_proj_out.fullmatch(original_key):
+ regex_match = re_encoder_block_proj_out.match(original_key)
+ groups = regex_match.groups()
+ re_new_key = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
+ key = re_encoder_block_proj_out.sub(re_new_key, original_key)
+
+ # rename vqvae.decoder keys
+ elif re_decoder_block_conv_out.fullmatch(original_key):
+ regex_match = re_decoder_block_conv_out.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[2]) * 2 + int(groups[3]) - 2
+ re_new_key = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
+ key = re_decoder_block_conv_out.sub(re_new_key, original_key)
+
+ elif re_decoder_block_resnet.fullmatch(original_key):
+ regex_match = re_decoder_block_resnet.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[2]) * 2 + int(groups[3]) - 2
+ conv_index = {"1": 1, "3": 2}[groups[-2]]
+ prefix = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
+ resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
+ re_new_key = prefix + resnet_block
+ key = re_decoder_block_resnet.sub(re_new_key, original_key)
+
+ elif re_decoder_block_proj_in.fullmatch(original_key):
+ regex_match = re_decoder_block_proj_in.match(original_key)
+ groups = regex_match.groups()
+ re_new_key = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
+ key = re_decoder_block_proj_in.sub(re_new_key, original_key)
+
+ # rename prior cond.model to upsampler.upsample_block and resnet
+ elif re_prior_cond_conv_out.fullmatch(original_key):
+ regex_match = re_prior_cond_conv_out.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[1]) * 2 + int(groups[2]) - 2
+ re_new_key = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
+ key = re_prior_cond_conv_out.sub(re_new_key, original_key)
+
+ elif re_prior_cond_resnet.fullmatch(original_key):
+ regex_match = re_prior_cond_resnet.match(original_key)
+ groups = regex_match.groups()
+ block_index = int(groups[1]) * 2 + int(groups[2]) - 2
+ conv_index = {"1": 1, "3": 2}[groups[-2]]
+ prefix = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
+ resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
+ re_new_key = prefix + resnet_block
+ key = re_prior_cond_resnet.sub(re_new_key, original_key)
+
+ elif re_prior_cond_proj_in.fullmatch(original_key):
+ regex_match = re_prior_cond_proj_in.match(original_key)
+ groups = regex_match.groups()
+ re_new_key = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
+ key = re_prior_cond_proj_in.sub(re_new_key, original_key)
+
+ # keep original key
+ else:
+ key = original_key
+
+ key = replace_key(key)
+
+ if f"{key_prefix}.{key}" not in model_state_dict or key is None:
+ print(f"failed converting {original_key} to {key}, does not match")
+
+ # handle missmatched shape
+ elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
+ val = model_state_dict[f"{key_prefix}.{key}"]
+ print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match")
+ key = original_key
+
+ mapping[key] = original_key
+ new_dict[key] = value
+
+ return new_dict
+
+
+@torch.no_grad()
+def convert_openai_checkpoint(model_name=None, pytorch_dump_folder_path=None):
+ """
+ Copy/paste/tweak model's weights to our Jukebox structure.
+ """
+ for file in MODEL_MAPPING[model_name]:
+ if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}"):
+ r = requests.get(f"{PREFIX}{file}", allow_redirects=True)
+ os.makedirs(f"{pytorch_dump_folder_path}/", exist_ok=True)
+ open(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}", "wb").write(r.content)
+
+ model_to_convert = MODEL_MAPPING[model_name.split("/")[-1]]
+
+ config = JukeboxConfig.from_pretrained(model_name)
+ model = JukeboxModel(config)
+
+ weight_dict = []
+ mapping = {}
+ for i, dict_name in enumerate(model_to_convert):
+ old_dic = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/')[-1]}")["model"]
+
+ new_dic = {}
+ for k in old_dic.keys():
+ if k.endswith(".b"):
+ new_dic[k.replace("b", "bias")] = old_dic[k]
+ elif k.endswith(".w"):
+ new_dic[k.replace("w", "weight")] = old_dic[k]
+ elif "level_2" not in dict_name and "cond.model." in k:
+ new_dic[k.replace(".blocks.", ".model.")] = old_dic[k]
+ else:
+ new_dic[k] = old_dic[k]
+
+ key_prefix = "vqvae" if i == 0 else f"priors.{3 - i}"
+ new_dic = fix_jukebox_keys(new_dic, model.state_dict(), key_prefix, mapping)
+ weight_dict.append(new_dic)
+
+ vqvae_state_dict = weight_dict.pop(0)
+ model.vqvae.load_state_dict(vqvae_state_dict)
+ for i in range(len(weight_dict)):
+ model.priors[i].load_state_dict(weight_dict[2 - i])
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ with open(f"{pytorch_dump_folder_path}/mapping.json", "w") as txtfile:
+ json.dump(mapping, txtfile)
+
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ return weight_dict
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="jukebox-5b-lyrics",
+ type=str,
+ help="Name of the model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="jukebox-5b-lyrics-converted",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ args = parser.parse_args()
+ convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py
new file mode 100644
index 0000000000000000000000000000000000000000..236d1f4ff37bcad727b659cd00384bbc753ef83a
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py
@@ -0,0 +1,2669 @@
+# coding=utf-8
+# Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Jukebox model."""
+
+import math
+import os
+from typing import List, Optional, Tuple
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import LayerNorm as FusedLayerNorm
+
+from ...activations import ACT2FN
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, logging
+from ...utils.logging import tqdm
+from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
+
+
+logger = logging.get_logger(__name__)
+
+JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "openai/jukebox-1b-lyrics",
+ "openai/jukebox-5b-lyrics",
+ # See all Jukebox models at https://huggingface.co/models?filter=jukebox
+]
+
+
+def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
+ """
+ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
+
+ Args:
+ logits (`torch.Tensor`):
+ logits distribution shape (vocabulary size)
+ top_k (`int`, *optional*, defaults to 0):
+ When `top_k >0` keep only top key tokens with highest probability (top-k filtering).
+ top_p (`int`, *optional*, defaults to 0):
+ When `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering).
+ """
+ logits = logits.clone()
+ top_k = min(top_k, logits.size(-1)) # Safety check
+
+ if top_k > 0:
+ # Remove all tokens with a probability less than the last token of the top-k
+ indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:]
+ logits[indices_to_remove] = filter_value
+
+ if top_p > 0.0:
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
+
+ # Remove tokens with cumulative probability above the threshold
+ sorted_indices_to_remove = cumulative_probs > top_p
+ # Shift the indices to the right to keep also the first token above the threshold
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
+ sorted_indices_to_remove[..., 0] = 0
+
+ # indices_to_remove = sorted_indices[sorted_indices_to_remove]
+ indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_(
+ dim=-1, index=sorted_indices, src=sorted_indices_to_remove
+ )
+ logits[indices_to_remove] = filter_value
+ return logits
+
+
+def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration):
+ """
+ Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be
+ returned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the
+ midpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on
+ the most relevant tokens (in time) for the sequence.
+
+ Args:
+ full_tokens (`List[int]`):
+ List containing the token ids of the entire lyrics.
+ total_length (`int`):
+ Total expected length of the music (not all of it is generated, see duration), in samples.
+ offset (`int`):
+ Starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into
+ account
+ duration (`int`):
+ Expected duration of the generated music, in samples. The duration has to be smaller than the total length,
+ which represent the overall length of the signal,
+ """
+ full_tokens = full_tokens[0]
+ if len(full_tokens) < max_n_lyric_tokens:
+ tokens = torch.cat(
+ [torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens]
+ )
+ indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens)))
+ else:
+ midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length)
+ midpoint = min(max(midpoint, max_n_lyric_tokens // 2), len(full_tokens) - max_n_lyric_tokens // 2)
+ tokens = full_tokens[midpoint - max_n_lyric_tokens // 2 : midpoint + max_n_lyric_tokens // 2]
+ indices = list(range(midpoint - max_n_lyric_tokens // 2, midpoint + max_n_lyric_tokens // 2))
+ return tokens.unsqueeze(dim=0), indices
+
+
+# Break total_length into hops/windows of size n_ctx separated by hop_length
+def get_starts(total_length, n_ctx, hop_length):
+ starts = []
+ for start in range(0, total_length - n_ctx + hop_length, hop_length):
+ if start + n_ctx >= total_length:
+ # Last hop could be smaller, we make it n_ctx to maximise context
+ start = total_length - n_ctx
+ starts.append(start)
+ return starts
+
+
+def get_alignment(music_tokens, labels, prior, config):
+ level = prior.levels - 1 # Top level used
+ n_ctx = prior.n_ctx
+ tokens = music_tokens[level]
+ batch_size, total_length = tokens.shape[0], tokens.shape[1]
+ if total_length < n_ctx:
+ padding_length = n_ctx - total_length
+ tokens = torch.cat(
+ [tokens, torch.zeros(batch_size, n_ctx - total_length, dtype=tokens.dtype, device=tokens.device)], dim=1
+ )
+ total_length = tokens.shape[1]
+ else:
+ padding_length = 0
+
+ hop_length = int(config.hop_fraction[-level - 1] * prior.n_ctx)
+ alignment_head, alignment_layer = config.prior_alignment_head[0], config.prior_alignment_layer[0]
+ attn_layers = {alignment_layer}
+ alignment_hops = {}
+ indices_hops = {}
+ for start in tqdm(get_starts(total_length, n_ctx, hop_length), desc="Computing lyric to music alignment "):
+ end = start + n_ctx
+ # set metadata offset, sample_length and lyrics tokens
+ metadata, indices_hop = prior.get_metadata(labels, start, config.sample_length, get_indices=True, offset=0)
+ tokens_bs = torch.chunk(tokens, batch_size, dim=0)
+ metadata_bs = torch.chunk(metadata, batch_size, dim=0)
+ w_hops = []
+ for tokens_i, metadata_i in zip(tokens_bs, metadata_bs):
+ w_hop = prior.forward_tokens(tokens_i[:, start:end], [], metadata_i, get_attn_weights=attn_layers)
+ w_hops.append(w_hop[0][:, alignment_head])
+ del w_hop
+ weights = torch.cat(w_hops, dim=0)
+ del w_hops
+ alignment_hop = weights.float().cpu().numpy()
+ del weights
+
+ # alignment_hop has shape (bs, n_ctx, nb_relevant_lyric_tokens)
+ # indices_hop is a list of len=bs, each entry of len hps.nb_relevant_lyric_tokens
+ indices_hops[start] = indices_hop
+ alignment_hops[start] = alignment_hop
+
+ # Combine attn for each hop into attn for full range
+ # Use indices to place them into correct place for corresponding source tokens
+ alignments = []
+ for item in range(batch_size):
+ # Note each item has different length lyrics
+ full_tokens = labels[0, 3:]
+ alignment = np.zeros((total_length, len(full_tokens) + 1))
+ for start in reversed(get_starts(total_length, n_ctx, hop_length)):
+ end = start + n_ctx
+ alignment_hop = alignment_hops[start][item]
+ indices = indices_hops[start][item]
+ alignment[start:end, indices] = alignment_hop
+ alignment = alignment[: total_length - padding_length, :-1] # remove token padding, and last lyric index
+ alignments.append(alignment)
+ return alignments
+
+
+def save_temp_audio(fname, lvl, metas, aud):
+ aud = torch.clamp(aud, -1, 1).cpu().numpy()
+ for i in list(range(aud.shape[0])):
+ if metas is not None:
+ artists, genres, lyrics = list(metas)[i].values()
+ path = f"{fname}/lvl_{lvl}-{artists}-{genres}-{lyrics[:5]}-{i}"
+ np.save(path, aud[i])
+ else:
+ np.save(f"{fname}/lvl_{lvl}-sample-{i}", aud[i])
+
+
+def get_mask(mask, query_length, key_value_length, blocks, spread, device, sample, sample_t):
+ # returns a mask of shape 1 x 1 x query_length x key_value_length or None if masking is not needed.
+ if mask is None or query_length == 1:
+ return None
+ offset = sample_t - query_length if sample else max(key_value_length - query_length, 0)
+ if mask == "autoregressive":
+ # Masked dense
+ mask = torch.ones(query_length, key_value_length, device=device).tril(offset)
+ elif mask == "summary":
+ # Masked summary
+ mask = torch.ones(query_length, query_length, device=device).tril()
+ mask = torch.ones(query_length, query_length, device=device).tril()
+ mask = mask.view(query_length, blocks, query_length // blocks)[:, :-1, -key_value_length // blocks :]
+ mask = (
+ torch.nn.functional.pad(
+ mask,
+ (0, 0, 1, 0),
+ value=1,
+ )
+ .contiguous()
+ .view(query_length, key_value_length)
+ )
+ elif mask == "prime":
+ mask = torch.ones(query_length, key_value_length, device=device).tril(offset)
+ return mask.view(1, 1, query_length, key_value_length)
+
+
+class JukeboxConv1D(nn.Module):
+ def __init__(self, input_width, output_width):
+ super().__init__()
+ self.input_width = input_width
+ self.output_width = output_width
+ weight = torch.empty(input_width, output_width)
+ bias = torch.zeros(output_width)
+ self.weight = nn.Parameter(weight)
+ self.bias = nn.Parameter(bias)
+
+ def forward(self, hidden_states):
+ size_out = (*hidden_states.size()[:-1], self.output_width)
+ hidden_states = torch.addmm(
+ self.bias.type_as(hidden_states),
+ hidden_states.view(-1, hidden_states.size(-1)),
+ self.weight.type_as(hidden_states),
+ )
+ hidden_states = hidden_states.view(*size_out)
+ return hidden_states
+
+
+class JukeboxResConv1DBlock(nn.Module):
+ def __init__(self, config, conv_width, depth=1, res_scale=1.0):
+ super().__init__()
+ hidden_dim = config.res_convolution_multiplier * conv_width
+ dilation = config.res_dilation_growth_rate**depth
+ padding = dilation
+
+ self.res_scale = res_scale
+ self.activation = nn.ReLU()
+ self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation)
+ self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0)
+
+ def forward(self, hidden_states):
+ residuals = hidden_states
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.conv1d_1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.conv1d_2(hidden_states)
+ return residuals + self.res_scale * hidden_states
+
+
+class JukeboxResnet1D(nn.Module):
+ def __init__(self, config, conv_width, n_depth, reverse_dilation=False):
+ super().__init__()
+ self.dilation_cycle = config.res_dilation_cycle
+ res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth)
+
+ blocks = []
+ for depth in range(n_depth):
+ block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle
+ blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale))
+
+ if reverse_dilation:
+ blocks = blocks[::-1]
+ self.resnet_block = nn.ModuleList(blocks)
+
+ def forward(self, hidden_states):
+ for block in self.resnet_block:
+ hidden_states = block(hidden_states)
+ return hidden_states
+
+
+class JukeboxEncoderConvBlock(nn.Module):
+ def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t):
+ super().__init__()
+ blocks = []
+ filter_t = stride_t * 2
+ pad_t = stride_t // 2
+ if down_t > 0:
+ for i in range(down_t):
+ blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t))
+ blocks.append(JukeboxResnet1D(config, hidden_dim, depth))
+ self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1)
+ self.downsample_block = nn.ModuleList(blocks)
+
+ def forward(self, hidden_states):
+ for block in self.downsample_block:
+ hidden_states = block(hidden_states)
+ hidden_states = self.proj_out(hidden_states)
+ return hidden_states
+
+
+class JukeboxEncoder(nn.Module):
+ def __init__(self, config, width, depth, levels, downs_t, strides_t):
+ super().__init__()
+ self.levels = levels
+ self.level_blocks = nn.ModuleList()
+
+ iterator = zip(list(range(self.levels)), downs_t, strides_t)
+ for i, down_t, stride_t in iterator:
+ self.level_blocks.append(
+ JukeboxEncoderConvBlock(
+ config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t
+ )
+ )
+
+ def forward(self, hidden_states):
+ all_hidden_states = []
+
+ # 64, 32, ...
+ for level in range(self.levels):
+ level_block = self.level_blocks[level]
+ hidden_states = level_block(hidden_states)
+ all_hidden_states.append(hidden_states)
+
+ return all_hidden_states
+
+
+class JukeboxDecoderConvBock(nn.Module):
+ def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True):
+ self.embed_dim = embed_dim
+ self.hidden_dim = hidden_dim
+ super().__init__()
+ blocks = []
+ if down_t > 0:
+ filter_t = stride_t * 2
+ pad_t = stride_t // 2
+ self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1)
+ for i in range(down_t):
+ blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation))
+ blocks.append(
+ nn.ConvTranspose1d(
+ hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t
+ )
+ )
+ self.upsample_block = nn.ModuleList(blocks)
+
+ def forward(self, hidden_states):
+ hidden_states = self.proj_in(hidden_states)
+ for block in self.upsample_block:
+ hidden_states = block(hidden_states)
+ return hidden_states
+
+
+class JukeboxDecoder(nn.Module):
+ def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t):
+ super().__init__()
+ self.levels = levels
+ self.level_blocks = nn.ModuleList()
+ for level, down_t, stride_t in zip(list(range(self.levels)), downs_t, strides_t):
+ self.level_blocks.append(
+ JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t)
+ )
+
+ self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1)
+
+ def forward(self, hidden_states, all_levels=True):
+ hidden_state = hidden_states[-1]
+
+ # 32, 64 ...
+ for level in reversed(range(self.levels)):
+ level_block = self.level_blocks[level]
+ hidden_state = level_block(hidden_state)
+
+ if level != 0 and all_levels:
+ hidden_state = hidden_state + hidden_states[level - 1]
+
+ hidden_state = self.out(hidden_state)
+ return hidden_state
+
+
+class JukeboxBottleneckBlock(nn.Module):
+ def __init__(self, config: JukeboxVQVAEConfig):
+ super().__init__()
+ self.nb_discrete_codes = config.nb_discrete_codes
+ self.codebook_width = config.embed_dim
+ self.mu = config.lmu
+ self.threshold = 1.0
+ self.init = False
+ self.codebook_sum = None
+ self.codebook_elem = None
+ self.register_buffer("codebook", torch.zeros(self.nb_discrete_codes, self.codebook_width))
+
+ def _tile(self, hidden_states):
+ dim, embed_width = hidden_states.shape
+ if dim < self.nb_discrete_codes:
+ n_repeats = (self.nb_discrete_codes + dim - 1) // dim
+ std = 0.01 / np.sqrt(embed_width)
+ hidden_states = hidden_states.repeat(n_repeats, 1)
+ hidden_states = hidden_states + torch.randn_like(hidden_states) * std
+ return hidden_states
+
+ def init_codebook(self, hidden_states):
+ nb_discrete_codes = self.nb_discrete_codes
+ self.init = True
+ codes = self._tile(hidden_states)
+ self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes]
+ self.codebook_sum = self.codebook
+ self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device)
+
+ def update_codebook(self, hidden_states, latent_states):
+ mu, codebook_width, nb_discrete_codes = self.mu, self.codebook_width, self.nb_discrete_codes
+ with torch.no_grad():
+ # Calculate new centres
+ # nb_discrete_codes, batch_size * seq_length
+ latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device)
+ latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1)
+
+ _codebook_sum = torch.matmul(latent_states_onehot, hidden_states)
+ _codebook_elem = latent_states_onehot.sum(dim=-1) # nb_discrete_codes
+ codes = self._tile(hidden_states)
+ _random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes]
+
+ # Update centres
+ old_codebook = self.codebook
+ self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum
+ self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem # nb_discrete_codes
+ usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float()
+
+ norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view(
+ nb_discrete_codes, 1
+ )
+ self.codebook = usage * (norm_code) + (1 - usage) * _random_codebook
+ _codebook_prob = _codebook_elem / torch.sum(_codebook_elem) # prob of each bin
+ entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-8)) # entropy ie how diverse
+ used_curr = (_codebook_elem >= self.threshold).sum()
+ usage = torch.sum(usage)
+ dk = torch.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape))
+ return {"entropy": entropy, "used_curr": used_curr, "usage": usage, "dk": dk}
+
+ def preprocess(self, hidden_states):
+ hidden_states = hidden_states.permute(0, 2, 1).contiguous()
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
+
+ if hidden_states.shape[-1] == self.codebook_width:
+ prenorm = torch.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape))
+ elif hidden_states.shape[-1] == 2 * self.codebook_width:
+ x1, x2 = hidden_states[..., : self.codebook_width], hidden_states[..., self.codebook_width :]
+ prenorm = (torch.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape))) + (
+ torch.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape))
+ )
+
+ # Normalise
+ hidden_states = x1 + x2
+
+ return hidden_states, prenorm
+
+ def postprocess(self, latent_states, dequantised_states, x_shape):
+ batch_size, time = x_shape
+ dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous()
+ latent_states = latent_states.view(batch_size, time)
+ return latent_states, dequantised_states
+
+ def quantise(self, latent_states):
+ # Calculate latent code latent_states
+ codebook_weights = self.codebook.t()
+ distance = (
+ torch.sum(latent_states**2, dim=-1, keepdim=True)
+ - 2 * torch.matmul(latent_states, codebook_weights)
+ + torch.sum(codebook_weights**2, dim=0, keepdim=True)
+ ) # (batch_size * latent_states , codebook_weights)
+ min_distance, music_tokens = torch.min(distance, dim=-1)
+ fit = torch.mean(min_distance)
+ return music_tokens, fit
+
+ def dequantise(self, music_tokens):
+ dequantised_states = F.embedding(music_tokens, self.codebook)
+ return dequantised_states
+
+ def encode(self, latent_states):
+ samples, _, seq_len = latent_states.shape
+
+ # Preprocess.
+ latent_states, _ = self.preprocess(latent_states)
+
+ # Quantise
+ music_tokens, _ = self.quantise(latent_states)
+
+ # Postprocess.
+ music_tokens = music_tokens.view(samples, seq_len)
+ return music_tokens
+
+ def decode(self, music_tokens):
+ samples, seq_len = music_tokens.shape
+
+ # Dequantise
+ dequantised_states = self.dequantise(music_tokens)
+
+ # Postprocess
+ dequantised_states = (
+ dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous()
+ )
+ return dequantised_states
+
+ def forward(self, hidden_states, update_codebook=True):
+ samples, _, seq_len = hidden_states.shape
+
+ # Preprocess
+ hidden_states, prenorm = self.preprocess(hidden_states)
+
+ # Init codebook if not inited
+ if update_codebook and not self.init:
+ self.init_codebook(hidden_states)
+
+ # Quantise and dequantise through bottleneck
+ music_tokens, fit = self.quantise(hidden_states)
+ dequantised_states = self.dequantise(music_tokens)
+
+ # Update embeddings
+ if update_codebook:
+ update_metrics = self.update_codebook(hidden_states, music_tokens)
+ else:
+ update_metrics = {}
+
+ # Loss
+ commit_loss = torch.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape)
+
+ # Passthrough
+ dequantised_states = hidden_states + (dequantised_states - hidden_states).detach()
+
+ # Postprocess
+ music_tokens, dequantised_states = self.postprocess(music_tokens, dequantised_states, (samples, seq_len))
+ return music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics)
+
+
+class JukeboxBottleneck(nn.Module):
+ def __init__(self, config, levels):
+ super().__init__()
+ self.levels = levels
+ self.level_blocks = nn.ModuleList()
+ for level in range(self.levels):
+ self.level_blocks.append(JukeboxBottleneckBlock(config))
+
+ def encode(self, raw_audio):
+ music_tokens = [
+ level_block.encode(hidden_states) for (level_block, hidden_states) in zip(self.level_blocks, raw_audio)
+ ]
+ return music_tokens
+
+ def decode(self, music_tokens, start_level=0, end_level=None):
+ if end_level is None:
+ end_level = self.levels
+ quantised_audio = [
+ level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], music_tokens)
+ ]
+ return quantised_audio
+
+ def forward(self, input_audio):
+ music_tokens, quantised_states, commit_losses, metrics = [], [], [], []
+ for level in range(self.levels):
+ level_block = self.level_blocks[-level - 1]
+ hidden_states = input_audio[level]
+ sampled_tokens, quantised_state, commit_loss, metric = level_block(
+ hidden_states, update_codebook=self.training
+ )
+ music_tokens.append(sampled_tokens)
+ if not self.training:
+ # Be extra paranoid and make sure the encoder weights can't
+ # change from straight-through estimator
+ quantised_state = quantised_state.detach()
+ quantised_states.append(quantised_state)
+ commit_losses.append(commit_loss)
+ if self.training:
+ metrics.append(metric)
+ return music_tokens, quantised_states, commit_losses, metrics
+
+
+JUKEBOX_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config (`JukeboxConfig`): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ """The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam
+Ringer, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://arxiv.org/abs/2002.08111).
+
+ """,
+ JUKEBOX_START_DOCSTRING,
+)
+class JukeboxVQVAE(PreTrainedModel):
+ config_class = JukeboxVQVAEConfig
+ base_model_prefix = "vqvae"
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Embedding): # embed_tokens
+ module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale)
+ elif isinstance(module, JukeboxConv1D):
+ if self.config.zero_out:
+ module.weight.data.zero_()
+ else:
+ module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale)
+ elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out:
+ module.conv1d_2.weight.data.zero_()
+ module.conv1d_2.bias.data.zero_()
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+ def __init__(self, config: JukeboxVQVAEConfig):
+ super().__init__(config)
+ downs_t = config.res_downs_t
+ strides_t = config.res_strides_t
+ if not config.sample_length:
+ downsamples = [stride**down for stride, down in zip(strides_t, downs_t)]
+ top_raw_to_tokens = np.prod(downsamples)
+ config.sample_length = (
+ config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens
+ ) * top_raw_to_tokens
+ config.sample_length = config.sample_length.astype(int)
+
+ self.nb_discrete_codes = config.nb_discrete_codes
+ self.commit = config.commit
+ self.sample_length = config.sample_length
+
+ self.downsamples = [stride**down for stride, down in zip(strides_t, downs_t)]
+ self.hop_lengths = np.cumprod(self.downsamples)
+ self.levels = levels = config.levels
+ self.music_tokens_shapes = [
+ (int(self.sample_length // self.hop_lengths[-level - 1])) for level in range(levels)
+ ]
+
+ self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels
+
+ self.encoders = nn.ModuleList()
+ self.decoders = nn.ModuleList()
+ for level in range(levels):
+ width = config.res_conv_width * self.multipliers[level]
+ depth = config.res_conv_depth * self.multipliers[level]
+ self.encoders.append(
+ JukeboxEncoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1])
+ )
+ self.decoders.append(
+ JukeboxDecoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1])
+ )
+
+ self.bottleneck = JukeboxBottleneck(config, levels)
+
+ def _decode(self, music_tokens, start_level=0, end_level=None):
+ # Decode
+ if end_level is None:
+ end_level = self.levels
+ latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level)
+ # Use only lowest level
+ decoder, dequantised_state = self.decoders[start_level], latent_states[0:1]
+ dequantised_state = decoder(dequantised_state, all_levels=False)
+ dequantised_state = dequantised_state.permute(0, 2, 1)
+ return dequantised_state
+
+ def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor:
+ """
+ Transforms the input `music_tokens` to their `raw_audio` representation.
+
+ Args:
+ music_tokens (`torch.LongTensor`):
+ Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token
+ should be an index to a corresponding `code` vector in the codebook.
+ start_level (`int`, *optional*):
+ Level at which the decoding process will start. Default to 0.
+ end_level (`int`, *optional*):
+ Level at which the decoding process will start. Default to None.
+ bs_chunks (int, *optional*):
+ Number of chunks to process at the same time.
+ """
+ token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens]
+ dequantised_states = []
+ for i in range(bs_chunks):
+ music_tokens_i = [chunks[i] for chunks in token_chunks]
+ dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level)
+ dequantised_states.append(dequantised_state)
+ return torch.cat(dequantised_states, dim=0)
+
+ def _encode(self, raw_audio, start_level=0, end_level=None):
+ # Encode
+ if end_level is None:
+ end_level = self.levels
+ input_audio = raw_audio.permute(0, 2, 1).float()
+ latent_states = []
+ for level in range(self.levels):
+ encoder = self.encoders[level]
+ latent_state = encoder(input_audio)
+ latent_states.append(latent_state[-1])
+ music_tokens = self.bottleneck.encode(latent_states)
+ return music_tokens[start_level:end_level]
+
+ def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
+ """
+ Transforms the `input_audio` to a discrete representation made out of `music_tokens`.
+
+ Args:
+ input_audio (`torch.Tensor`):
+ Raw audio which will be encoded to its discrete representation using the codebook. The closest `code`
+ form the codebook will be computed for each sequence of samples.
+ start_level (`int`, *optional*, defaults to 0):
+ Level at which the encoding process will start. Default to 0.
+ end_level (`int`, *optional*):
+ Level at which the encoding process will start. Default to None.
+ bs_chunks (int, *optional*, defaults to 1):
+ Number of chunks of raw audio to process at the same time.
+ """
+ audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0)
+ music_tokens_list = []
+ for chunk_i in audio_chunks:
+ music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level)
+ music_tokens_list.append(music_tokens_i)
+ music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)]
+ return music_tokens
+
+ def sample(self, n_samples):
+ music_tokens = [
+ torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device="cpu")
+ for music_tokens_shape in self.music_tokens_shapes
+ ]
+ return self.decode(music_tokens)
+
+ def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level.
+ The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is
+ computed.
+
+ Args:
+ raw_audio (`torch.FloatTensor`):
+ Audio input which will be encoded and decoded.
+
+ Returns:
+ `Tuple[torch.Tensor, torch.Tensor]`
+
+
+ Example:
+ ```python
+ >>> from transformers import JukeboxVQVAE, set_seed
+ >>> import torch
+
+ >>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval()
+ >>> set_seed(0)
+ >>> zs = [torch.randint(100, (4, 1))]
+ >>> model.decode(zs).shape
+ torch.Size([4, 8, 1])
+ ```
+ """
+
+ # Encode/Decode
+ input_audio = raw_audio.permute(0, 2, 1).float()
+ latent_states = []
+ for level in range(self.levels):
+ encoder = self.encoders[level]
+ latent_state = encoder(input_audio)
+ latent_states.append(latent_state[-1])
+
+ _, music_tokens, commit_losses, _ = self.bottleneck(latent_states)
+ dequantised_states = []
+ for level in range(self.levels):
+ decoder = self.decoders[level]
+ dequantised_state = decoder(music_tokens[level : level + 1], all_levels=False)
+ dequantised_states.append(dequantised_state.permute(0, 2, 1))
+
+ commit_loss = sum(commit_losses)
+ loss = self.commit * commit_loss
+
+ return dequantised_states, loss
+
+
+class JukeboxMLP(nn.Module):
+ def __init__(self, config):
+ # a single channel is always used in original code
+ super().__init__()
+ embed_dim = config.hidden_size
+ hidden_dim = int(config.mlp_multiplier * embed_dim)
+
+ self.c_fc = JukeboxConv1D(embed_dim, hidden_dim)
+ self.c_proj = JukeboxConv1D(hidden_dim, embed_dim)
+ self.act = ACT2FN[config.act_fn]
+ self.dropout = nn.Dropout(config.resid_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class JukeboxLayerNorm(FusedLayerNorm):
+ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
+ super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine)
+ self.width = np.prod(normalized_shape)
+ self.max_numel = 65535 * self.width
+
+ def forward(self, input):
+ if input.numel() > self.max_numel:
+ return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input)
+ else:
+ return super().forward(input).type_as(input)
+
+
+class JukeboxAttention(nn.Module):
+ def __init__(self, config, n_ctx, attn_func="dense_attn"):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.n_heads = config.n_heads
+ self.dropout = config.attn_dropout
+ hidden_dim = int(config.attention_multiplier * self.embed_dim)
+
+ self.head_dim = hidden_dim // config.n_heads
+ self.n_ctx = n_ctx
+ self.hidden_dim = hidden_dim
+ self.scale = self.head_dim**-0.25
+ self.mask = config.mask
+
+ if attn_func == "cross_attention":
+ self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim)
+ self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2)
+ else:
+ self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3)
+
+ self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim)
+ self.attn_dropout = nn.Dropout(config.attn_dropout)
+ self.resid_dropout = nn.Dropout(config.resid_dropout)
+
+ # Sequence of length seq_len is factored as [blocks, seq_len // blocks]
+ self.attn_func = attn_func
+ if attn_func == "cross_attention":
+ self.qkv = self.decode_qkv
+ elif attn_func == "prime_attn":
+ self.qkv = self.prime_qkv
+ else:
+ self.qkv = self.factored_qkv
+
+ ATTENTION_MAP = {
+ "dense_attn": (self.dense_attn, "autoregressive"),
+ "block_attn": (self.block_attn, "autoregressive"),
+ "transpose_block_attn": (self.transpose_block_attn, "autoregressive"),
+ "prev_block_attn": (self.prev_block_attn, None),
+ "summary_attn": (self.summary_attn, "summary"),
+ "summary_spread_attn": (self.summary_spread_attn, "summary"),
+ "cross_attention": (self.dense_attn, None),
+ "prime_attn": (self.prime_attn, "prime"),
+ }
+ self.attn, self.attn_mask = ATTENTION_MAP[attn_func]
+
+ self.blocks = config.blocks
+ self.spread = config.spread
+ if self.blocks is not None:
+ self.block_ctx = self.n_ctx // self.blocks
+
+ self.sample_t = 0
+ self.cache = {}
+ self.encoder_len = config.nb_relevant_lyric_tokens # length of the encoder input ids
+ self.record_attn = False
+
+ def _attn(self, query_states, key_states, value_states, sample):
+ scale = self.scale
+ if self.training:
+ attention_weight = torch.matmul(query_states * scale, key_states * scale)
+ else:
+ attention_weight = torch.matmul(query_states, key_states)
+ attention_weight.mul_(scale * scale)
+ attn_weight_type = attention_weight.dtype
+ attention_weight = attention_weight.float()
+ if self.mask:
+ # Generate appropriate mask to mask out all positions before current
+ # Might take up lot of memory for dense, so can cache it
+ mask = get_mask(
+ self.attn_mask,
+ query_states.size(-2),
+ key_states.size(-1),
+ self.blocks,
+ self.spread,
+ attention_weight.device,
+ sample,
+ self.sample_t,
+ )
+ if mask is not None:
+ attention_weight = attention_weight * mask + -1e9 * (1 - mask)
+ attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type)
+ if self.record_attn:
+ self.attention_prob = attention_prob
+ if self.attn_func == "prime_attn":
+ # only keep music queries and lyrics keys/values
+ self.attention_prob = self.attention_prob[:, :, self.encoder_len :, : self.encoder_len]
+ attention_prob = self.attn_dropout(attention_prob)
+ context_states = torch.matmul(attention_prob, value_states)
+ return context_states
+
+ def merge_heads(self, hidden_states):
+ hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous()
+ new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1))
+ return hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct merge_states
+
+ def split_heads(self, hidden_states, is_key=False):
+ new_hidden_states_shape = (
+ *hidden_states.size()[:-1],
+ self.n_heads,
+ hidden_states.size(-1) // self.n_heads,
+ )
+ hidden_states = hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct split_states
+ if is_key:
+ return hidden_states.permute(0, 2, 3, 1)
+ else:
+ return hidden_states.permute(0, 2, 1, 3)
+
+ def dense_attn(self, query, key, value, sample):
+ query = self.split_heads(query)
+ key = self.split_heads(key, is_key=True)
+ value = self.split_heads(value)
+ context_states = self._attn(query, key, value, sample)
+ context_states = self.merge_heads(context_states)
+ return context_states
+
+ def block_attn(self, query, key, value, sample):
+ block_ctx = self.block_ctx
+ batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t
+ if sample:
+ return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
+ else:
+ query_length = query.shape[1]
+ query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim)
+ if query_length < seq_len:
+ seq_len = query_length
+ key = key[:, -seq_len:].contiguous()
+ value = value[:, -seq_len:].contiguous()
+ key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
+ value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
+ return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
+
+ def transpose_block_attn(self, query, key, value, sample):
+ block_ctx = self.block_ctx
+ batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t
+ if sample:
+ block_len = (seq_len - 1) % block_ctx
+ key = key[:, block_len::block_ctx, :]
+ value = value[:, block_len::block_ctx, :]
+ return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
+ else:
+ query_length = query.shape[1]
+ query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim)
+ query = query.transpose(1, 2).contiguous()
+ query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim)
+
+ key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)
+ key = key.transpose(1, 2).contiguous()
+ key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim)
+
+ value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)
+ value = value.transpose(1, 2).contiguous()
+ value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim)
+
+ block_attn = self.dense_attn(query, key, value, sample)
+ block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim)
+ block_attn = block_attn.transpose(1, 2).contiguous()
+ block_attn = block_attn.view(batch_size, query_length, embed_dim)
+
+ return block_attn
+
+ def prev_block_attn(self, query, key, value, sample):
+ block_ctx = self.block_ctx
+ batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t
+ if sample:
+ block = (seq_len - 1) // block_ctx
+ prev_l = (block - 1) * block_ctx
+ if block > 0:
+ key = key[:, prev_l : prev_l + block_ctx, :]
+ value = value[:, prev_l : prev_l + block_ctx, :]
+ else:
+ key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype)
+ value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype)
+ return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
+ else:
+ query_length = query.shape[1]
+ query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim)
+
+ key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :]
+ key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0))
+ key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
+
+ value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :]
+ value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0))
+ value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
+
+ if query_length < seq_len:
+ nb_query_blocks = query_length // block_ctx
+ nb_key_blocks = seq_len // block_ctx
+ seq_len = query_length
+ key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:]
+ key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim)
+
+ value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:]
+ value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim)
+
+ return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
+
+ def summary_attn(self, query, key, value, sample):
+ blocks = self.blocks
+ block_ctx = self.block_ctx
+ batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t
+ if sample:
+ key = key[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :]
+ key = torch.nn.functional.pad(key, (0, 0, 1, 0))
+
+ value = value[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :]
+ value = torch.nn.functional.pad(value, (0, 0, 1, 0))
+ return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
+ else:
+ key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :]
+ key = torch.nn.functional.pad(key, (0, 0, 1, 0)) # batch_size, blocks, embed_dim
+
+ value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :]
+ value = torch.nn.functional.pad(value, (0, 0, 1, 0)) # batch_size, blocks, embed_dim
+ return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
+
+ def summary_spread_attn(self, query, key, value, sample):
+ blocks = self.blocks
+ spread = self.spread
+
+ batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t
+ if sample:
+ raise NotImplementedError
+ else:
+ key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :]
+ key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous()
+ key = key.view(batch_size, blocks * spread, embed_dim)
+
+ value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :]
+ value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous()
+ value = value.view(batch_size, blocks * spread, embed_dim)
+
+ return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
+
+ def prime_attn(self, query, key, value, sample):
+ encoder_len = self._encoder_len
+ key = key[:, :encoder_len]
+ value = value[:, :encoder_len]
+ return self.dense_attn(query, key, value, sample)
+
+ def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
+ curr_ctx = hidden_states.shape[1]
+ if last_encoder_hidden_states is not None:
+ raise TypeError("last_encoder_hidden_states should be None")
+
+ query, key, value = hidden_states.chunk(3, dim=2)
+ if sample:
+ self.sample_t += curr_ctx
+ key, value = self._append_cache(key, value)
+ l_cache = self._suff_cache_len()
+ if self._cache_len() > l_cache:
+ self._slice_cache(-l_cache)
+ if curr_ctx > 1:
+ if self.attn_func != "dense_attn":
+ query = self._pad_to_block_ctx(query, query=True)
+ key = self._pad_to_block_ctx(key)
+ value = self._pad_to_block_ctx(value)
+ sample = False
+ else:
+ key = self.cache["key"]
+ value = self.cache["value"]
+ return query, key, value, sample
+
+ def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
+ curr_ctx = hidden_states.shape[1]
+ if last_encoder_hidden_states is not None:
+ raise TypeError("last_encoder_hidden_states should be None")
+ query, key, value = hidden_states.chunk(3, dim=2)
+ if sample:
+ if self._cache_len() < self._encoder_len:
+ self._append_cache(key, value)
+ if self._cache_len() > self._encoder_len:
+ self._slice_cache(0, self._encoder_len)
+ key, value = self.cache["key"], self.cache["value"]
+ self.sample_t += curr_ctx
+ return query, key, value, sample
+
+ def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
+ curr_ctx = hidden_states.shape[1]
+ query = hidden_states
+ if sample:
+ if self.sample_t == 0:
+ self.cache["key"], self.cache["value"] = self.c_enc_kv(
+ last_encoder_hidden_states.type_as(hidden_states)
+ ).chunk(2, dim=2)
+ key, value = self.cache["key"], self.cache["value"]
+ self.sample_t += curr_ctx
+ else:
+ key, value = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2)
+ return query, key, value, sample
+
+ def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
+ curr_ctx = hidden_states.shape[1]
+ hidden_states = self.c_attn(hidden_states)
+ query, key, value, sample = self.qkv(
+ hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample
+ )
+ attention_scores = self.attn(query, key, value, sample)
+ if attention_scores.shape[1] != curr_ctx:
+ offset = self._offset(curr_ctx)
+ attention_scores = attention_scores[:, offset : offset + curr_ctx, :].contiguous()
+ attention_scores = self.c_proj(attention_scores)
+ return self.resid_dropout(attention_scores)
+
+ @property
+ def _encoder_len(self):
+ encoder_len = self.encoder_len
+ encoder_blocks = (encoder_len // self.blocks) + 1
+ return encoder_blocks * self.blocks
+
+ def _offset(self, curr_ctx):
+ if self.attn_func == "dense_attn":
+ return 0
+ return (self.sample_t - curr_ctx) % self.block_ctx
+
+ def _pad_to_block_ctx(self, hidden_states, query=False):
+ seq_len = hidden_states.shape[1]
+ offset = self._offset(seq_len) if query else 0
+ n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx
+ pad = n_blocks * self.block_ctx - seq_len - offset
+ if pad == 0 and offset == 0:
+ return hidden_states
+ else:
+ return F.pad(hidden_states, (0, 0, offset, pad))
+
+ def _cache_len(self):
+ return 0 if "key" not in self.cache else self.cache["key"].shape[1]
+
+ def _suff_cache_len(self):
+ """
+ Precondition:
+ key and value are appended with the current context and self.sample_t reflects the 1-indexed sample
+ location in the context.
+ """
+ previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx
+ REQUIRED_CACHE_LEN = {
+ "dense_attn": self.sample_t,
+ "block_attn": (self.sample_t - 1) % self.block_ctx + 1,
+ "transpose_block_attn": self.sample_t,
+ "prev_block_attn": self.sample_t if self.sample_t <= self.block_ctx else previous_block_length,
+ "cross_attn": self.encoder_len,
+ "prime_attn": min(self.sample_t, self._encoder_len),
+ }
+
+ return REQUIRED_CACHE_LEN[self.attn_func]
+
+ def _slice_cache(self, start, end=None):
+ self.cache["key"] = self.cache["key"][:, start:end]
+ self.cache["value"] = self.cache["value"][:, start:end]
+
+ def _append_cache(self, key, value):
+ if "key" not in self.cache:
+ self.cache["key"] = key
+ self.cache["value"] = value
+ else:
+ old_key, old_value = key, value
+ key = torch.cat([self.cache["key"], old_key], dim=1)
+ value = torch.cat([self.cache["value"], old_value], dim=1)
+ del self.cache["key"]
+ del self.cache["value"]
+ del old_key
+ del old_value
+ self.cache["key"] = key
+ self.cache["value"] = value
+ return self.cache["key"], self.cache["value"]
+
+ def del_cache(self):
+ self.sample_t = 0
+ if "key" in self.cache:
+ del self.cache["key"]
+ if "value" in self.cache:
+ del self.cache["value"]
+ self.cache = {}
+
+
+class JukeboxBlock(nn.Module):
+ def __init__(self, config, n_ctx, attn_func="dense_attn"):
+ super().__init__()
+ self.width = config.hidden_size
+ self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func)
+
+ self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size)
+ self.mlp = JukeboxMLP(config)
+ self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size)
+ self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0
+ self.attn_func = attn_func
+
+ def forward(self, hidden_states, last_encoder_hidden_states, sample=False):
+ residuals = hidden_states
+ hidden_states = self.layer_norm_0(hidden_states)
+ hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample)
+
+ output_states = self.layer_norm_1(residuals + hidden_states)
+ output_states = self.mlp(output_states)
+ if self.res_scale == 1.0:
+ output = residuals + hidden_states + output_states
+ else:
+ output = residuals + self.res_scale * (hidden_states + output_states)
+ return output
+
+
+class JukeboxLayerStack(nn.Module):
+ def __init__(self, config, n_ctx):
+ super().__init__()
+ self.n_ctx = n_ctx
+ self.width = config.hidden_size
+ self.num_layers = config.num_layers
+ self.blocks = config.blocks
+ self.attention_pattern = config.attention_pattern
+ if self.blocks is not None:
+ self.block_ctx = n_ctx // self.blocks
+ self.encoder_len = config.nb_relevant_lyric_tokens
+ self.n_heads = config.n_heads
+
+ # Orders of attn_func
+ attention_pattern = ATTENTION_PATTERNS[self.attention_pattern]
+ self._attn_mods = nn.ModuleList()
+ for depth in range(self.num_layers):
+ self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth)))
+
+ self.saved_attn_weights = []
+
+ def set_record_attn(self, record_attn):
+ """
+ Makes forward prop dump self-attention softmaxes to self.saved_attn_weights.
+
+ Args:
+ record_attn (`Union[bool,set]`):
+ Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether
+ to dump all.
+ """
+
+ def _should_record_attn(layer_idx):
+ if isinstance(record_attn, bool):
+ return record_attn
+ return layer_idx in record_attn
+
+ for i, layer in enumerate(self._attn_mods):
+ layer.attn.record_attn = _should_record_attn(i)
+
+ if not record_attn:
+ self.saved_attn_weights = []
+
+ def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
+ # Blocks
+ for i, attn_layer in enumerate(self._attn_mods):
+ if attn_layer.attn_func == "cross_attention": # attend to the lyrics
+ hidden_states = attn_layer(
+ hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample
+ )
+ else:
+ hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample)
+ if attn_layer.attn.record_attn:
+ self.saved_attn_weights.append(attn_layer.attn.c_attn.weight)
+ return hidden_states
+
+ def del_cache(self):
+ for attn_layer in self._attn_mods:
+ attn_layer.attn.del_cache()
+
+
+class JukeboxPositionalEmbedding(nn.Module):
+ def __init__(self, embed_dim, width):
+ super().__init__()
+ self.pos_emb = nn.Parameter(torch.empty((embed_dim, width)))
+
+ def forward(self):
+ pos_emb = self.pos_emb
+ return pos_emb
+
+
+class JukeboxConditionalAutoregressive(nn.Module):
+ def __init__(
+ self,
+ config,
+ n_ctx=None,
+ embed_dim=None,
+ audio_conditioning=False,
+ metadata_conditioning=False,
+ is_encoder=False,
+ ):
+ """
+ Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly
+ set fro each configuration.
+
+ Args:
+ config (`JukeboxPriorConfig`):
+ Model configuration class with all the parameters of the model. Initializing with a config file does
+ not load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ n_ctx (`int`, *optional*):
+ Number of tokens or lyrics tokens provided in a single pass.
+ embed_dim (`int`, *optional*):
+ Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codeboook dimension,
+ if the model combines lyrics and music tokens, or simply n_vocab if the model is a seperate encoder
+ audio_conditioning (`bool`, *optional*, defaults to `False`):
+ Whether or not the prior supports conditionning on audio.
+ metadata_conditioning (`bool`, *optional*, defaults to `False`):
+ Whether or not the prior supports conditionning on artitst, genres, lyrics and timing.
+ is_encoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is an encoder only model.
+ """
+
+ super().__init__()
+ self.width = config.hidden_size
+ self.num_layers = config.num_layers
+ self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx
+ self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size
+ self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size)
+ self.embed_tokens_dropout = nn.Dropout(config.emb_dropout)
+ self.metadata_conditioning = metadata_conditioning
+ self.audio_conditioning = audio_conditioning
+ if not metadata_conditioning:
+ self.start_token = nn.Parameter(torch.empty((1, config.hidden_size)))
+ self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size)
+ self.pos_emb_dropout = nn.Dropout(config.emb_dropout)
+
+ self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx)
+ self.is_encoder = is_encoder
+ self.encoder_len = config.nb_relevant_lyric_tokens
+
+ if config.merged_decoder:
+ # Merged piped model uses this setup
+ self.add_cond_after_transformer = False
+ self.share_embed_tokens_fc_proj_out = False
+ else:
+ self.add_cond_after_transformer = True
+ self.share_embed_tokens_fc_proj_out = True
+
+ if not is_encoder:
+ self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False)
+ if self.share_embed_tokens_fc_proj_out:
+ self.fc_proj_out.weight = self.embed_tokens.weight
+ self.loss = torch.nn.CrossEntropyLoss()
+
+ def forward(
+ self,
+ tokens,
+ audio_conditioning=None,
+ metadata_conditioning=None,
+ last_encoder_hidden_states=None,
+ get_preds=False,
+ get_acts=False,
+ get_sep_loss=False,
+ ):
+ """
+ Args:
+ tokens (`torch.tensor`):
+ Can represent music tokens, lyrics tokens or both, depending on the configuration.
+ """
+ # Preprocess.
+ batch_size = tokens.shape[0]
+ with torch.no_grad():
+ tokens = tokens.view(batch_size, -1).long()
+
+ if not self.audio_conditioning:
+ audio_conditioning = torch.zeros(
+ (batch_size, 1, self.width),
+ device=tokens.device,
+ dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype,
+ )
+
+ target = tokens # Target
+ hidden_states = self.embed_tokens(tokens)
+ # Shift by 1, and fill in start token
+ hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1)
+ if self.metadata_conditioning:
+ hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width)
+ else:
+ hidden_states[:, 0] = self.start_token
+
+ hidden_states = (
+ self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning
+ ) # Pos emb and dropout
+
+ hidden_states = self.transformer(
+ hidden_states, last_encoder_hidden_states=last_encoder_hidden_states
+ ) # Transformer
+ if self.add_cond_after_transformer: # Piped doesnt add x_cond
+ hidden_states = hidden_states + audio_conditioning
+
+ activations = hidden_states
+ if self.is_encoder:
+ return hidden_states
+
+ hidden_states = self.fc_proj_out(hidden_states) # Predictions
+ loss_fn = nn.CrossEntropyLoss()
+ if get_sep_loss:
+ lyric_hidden_states = hidden_states[:, : self.encoder_len].reshape(-1, self.embed_dim)
+ token_hidden_states = hidden_states[:, self.encoder_len :].reshape(-1, self.embed_dim)
+
+ lyric_loss = loss_fn(lyric_hidden_states, target[:, : self.encoder_len].reshape(-1)) / np.log(2.0)
+ music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len :].reshape(-1)) / np.log(2.0)
+
+ loss = (lyric_loss, music_token_loss) # Note order! Lyric is first
+ else:
+ loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0) # Loss
+
+ if get_preds:
+ return loss, hidden_states
+ elif get_acts:
+ return loss, activations
+ else:
+ return loss, None
+
+ def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning):
+ if sample_t == 0:
+ hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to(
+ self.embed_tokens.weight.device
+ )
+ if self.metadata_conditioning:
+ hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width)
+ else:
+ hidden_states[:, 0] = self.start_token
+ else:
+ hidden_states = self.embed_tokens(tokens)
+ if audio_conditioning.shape == (n_samples, self.n_ctx, self.width):
+ cond = audio_conditioning[:, sample_t : sample_t + 1, :]
+ else:
+ cond = audio_conditioning
+ # Pos emb, dropout is identity at eval time
+ hidden_states = hidden_states + self.pos_emb()[sample_t : sample_t + 1] + cond
+ return hidden_states, cond
+
+ def sample(
+ self,
+ n_samples,
+ audio_conditioning=None,
+ metadata_conditioning=None,
+ last_encoder_hidden_states=None,
+ temp=1.0,
+ top_k=0,
+ top_p=0.0,
+ get_preds=False,
+ sample_tokens=None,
+ ):
+ if sample_tokens is None:
+ sample_tokens = self.n_ctx
+
+ if not self.audio_conditioning:
+ audio_conditioning = torch.zeros(
+ (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype
+ ).to(self.fc_proj_out.device)
+
+ with torch.no_grad():
+ sampled_tokens = []
+ tokens = None
+ if get_preds:
+ preds = []
+
+ iter = tqdm(range(0, sample_tokens), leave=False)
+ for sample_t in iter:
+ iter.set_description(f"Ancestral sampling {sample_tokens} music tokens", refresh=True)
+ hidden_states, cond = self.get_emb(
+ sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning
+ )
+
+ hidden_states = self.transformer(
+ hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True
+ )
+ if self.add_cond_after_transformer:
+ hidden_states = hidden_states + cond
+ hidden_states = self.fc_proj_out(hidden_states) # Predictions
+ if get_preds:
+ preds.append(hidden_states.clone())
+ # Adjust logits
+ hidden_states = hidden_states / temp
+ hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p)
+ # Sample and replace hidden_states
+ tokens = torch.distributions.Categorical(logits=hidden_states).sample()
+ sampled_tokens.append(tokens.clone())
+
+ del tokens
+ self.transformer.del_cache()
+
+ tokens = torch.cat(sampled_tokens, dim=1)
+ if get_preds:
+ preds = torch.cat(preds, dim=1)
+ if get_preds:
+ return tokens, preds
+ else:
+ return tokens
+
+ def split_chunks(self, length, chunk_size):
+ n_passes = (length + chunk_size - 1) // chunk_size
+ chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1]
+ return chunk_sizes
+
+ def primed_sample(
+ self,
+ n_samples,
+ lyric_and_music_tokens,
+ audio_conditioning=None,
+ metadata_conditioning=None,
+ last_encoder_hidden_states=None,
+ temp=1.0,
+ top_k=0,
+ top_p=0.0,
+ get_preds=False,
+ chunk_size=None,
+ sample_tokens=None,
+ ):
+ if sample_tokens is None:
+ sample_tokens = self.n_ctx
+ # Preprocess.
+ batch_size = lyric_and_music_tokens.shape[0]
+ with torch.no_grad():
+ lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long()
+
+ sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1)
+ sampled_audio = list(sampled_audio)
+
+ if not self.audio_conditioning:
+ audio_conditioning = torch.zeros(
+ (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype
+ ).to(lyric_and_music_tokens.device)
+
+ with torch.no_grad():
+ if get_preds:
+ preds = []
+
+ # Fill up key/value cache for past context by runing forward pass.
+ # We do so in chunks instead of doing the whole past in one forward pass to reduce max memory usage.
+ if chunk_size is None:
+ chunk_size = len(sampled_audio)
+ chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size)
+ x_primes = []
+ start = 0
+ token = None
+
+ for current_chunk_size in tqdm(chunk_sizes, desc="Preparing past key value", leave=False):
+ sampled_audio_prime, conds_prime = [], []
+ for sample_t in range(start, start + current_chunk_size):
+ x_prime, cond_prime = self.get_emb(
+ sample_t, n_samples, token, audio_conditioning, metadata_conditioning
+ )
+ token = sampled_audio[sample_t]
+ sampled_audio_prime.append(x_prime)
+ conds_prime.append(cond_prime)
+ start = start + current_chunk_size
+ x_prime, cond_prime = torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1)
+ del sampled_audio_prime
+ del conds_prime
+ if not get_preds:
+ del cond_prime
+ x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True)
+
+ if get_preds:
+ if self.add_cond_after_transformer:
+ x_prime = x_prime + cond_prime
+ del cond_prime
+ x_primes.append(x_prime)
+ else:
+ del x_prime
+
+ if get_preds:
+ x_prime = torch.cat(x_primes, dim=1)
+ x_prime = self.fc_proj_out(x_prime) # Predictions
+ preds.append(x_prime)
+
+ # the input of the encoder and decoder can be merged into (lyrics, music tokens)
+ input_tokens = sampled_audio[-1]
+
+ itererator = tqdm(
+ range(len(sampled_audio), sample_tokens),
+ desc=f"Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens",
+ leave=False,
+ )
+ for sample_t in itererator:
+ hidden_states, cond = self.get_emb(
+ sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning
+ )
+
+ hidden_states = self.transformer(
+ hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True
+ )
+ if self.add_cond_after_transformer:
+ hidden_states = hidden_states + cond
+ hidden_states = self.fc_proj_out(hidden_states) # Predictions
+ if get_preds:
+ preds.append(hidden_states)
+ # Adjust logits
+ hidden_states = hidden_states / temp
+ hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p)
+ # only music tokens are sampled
+ music_tokens = torch.distributions.Categorical(logits=hidden_states).sample()
+ sampled_audio.append(music_tokens.clone())
+ input_tokens = music_tokens
+
+ del input_tokens, music_tokens
+ self.transformer.del_cache()
+
+ music_tokens = torch.cat(sampled_audio, dim=1)
+ if get_preds:
+ preds = torch.cat(preds, dim=1)
+ if get_preds:
+ return music_tokens, preds
+ else:
+ return music_tokens
+
+
+class JukeboxMusicTokenConditioner(nn.Module):
+ """
+ The `JukeboxMusicTokenConditioner` takes music tokens as an input (coresponding to the codes of the VQVAE's
+ codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE).
+ """
+
+ def __init__(self, config, level):
+ super().__init__()
+ self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size)
+ config.embed_dim = config.music_vocab_size # setting correct argument for the `JukeboxDecoder`
+
+ self.upsampler = JukeboxDecoderConvBock(
+ config,
+ config.hidden_size,
+ config.res_conv_width,
+ config.res_conv_depth,
+ config.res_downs_t[level],
+ config.res_strides_t[level],
+ reverse_dilation=False,
+ )
+ self.layer_norm = JukeboxLayerNorm(config.hidden_size)
+
+ def forward(self, music_tokens, raw_audio_conditionning=None):
+ """
+ Args:
+ music_tokens (`torch.LongTensor`):
+ Music tokens form the uper level in range(nb_discrete_codes)
+ raw_audio_conditionning (`torch.LongTensor`, *optional*):
+ Audio used when primed sampling, raw audio information that conditions the generation
+ """
+ if raw_audio_conditionning is None:
+ raw_audio_conditionning = 0.0
+ # Embed music_tokens
+ music_tokens = music_tokens.long()
+ hidden_states = self.embed_tokens(music_tokens)
+ hidden_states = hidden_states + raw_audio_conditionning
+
+ # Run conditioner
+ hidden_states = hidden_states.permute(0, 2, 1)
+ hidden_states = self.upsampler(hidden_states)
+ hidden_states = hidden_states.permute(0, 2, 1)
+ hidden_states = self.layer_norm(hidden_states)
+ return hidden_states
+
+
+class JukeboxRangeEmbedding(nn.Module):
+ """
+ The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional
+ embedding of length `n_ctx`.
+
+ Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end)
+ -> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <=
+ end
+ """
+
+ def __init__(self, n_time, embed_dim, range, out_width, clamp=False):
+ super().__init__()
+ self.n_time = n_time
+ self.embed_dim = embed_dim
+ self.emb = nn.Embedding(embed_dim, out_width)
+ self.pos_min, self.pos_max = range
+ self.clamp = clamp
+
+ def forward(self, pos_start, pos_end=None):
+ # Check if [pos_start,pos_end] in [pos_min, pos_max)
+ if not len(pos_start.shape) == 2:
+ raise TypeError(f"Expected shape with 2 dims, got {pos_start.shape}")
+ if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all():
+ raise TypeError(f"Range is [{self.pos_min},{self.pos_max}), got {pos_start}")
+
+ pos_start = pos_start.float()
+ if pos_end is not None:
+ if self.clamp:
+ pos_end = pos_end.clamp(self.pos_min, self.pos_max)
+
+ pos_end = pos_end.float()
+ # Interpolate so that [pos_start, ..., pos_end] <-> position tensor of length n_ctx
+ n_time = self.n_time
+ if n_time != 1:
+ interpolation = (
+ torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time
+ )
+ position = pos_start + (pos_end - pos_start) * interpolation
+ else:
+ position = pos_start
+
+ # Bin each value to bins_
+ # [0,1) -> [0,1..,embed_dim) -> [0,1...,embed_dim-1
+ normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min)
+ bins_ = (self.embed_dim * normalised_position).floor().long().detach()
+ return self.emb(bins_)
+
+
+class JukeboxLabelConditioner(nn.Module):
+ def __init__(self, config, include_time_signal):
+ super().__init__()
+
+ embed_dim = config.hidden_size
+ timing_dims = config.timing_dims
+ sampling_rate = config.sampling_rate
+ nb_genres, nb_artists = config.metadata_dims
+ music_tokens_shape = config.n_ctx
+
+ self.max_nb_genres = config.max_nb_genres
+ self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim)
+ self.artist_emb = nn.Embedding(nb_artists, embed_dim)
+ self.include_time_signal = include_time_signal
+ if self.include_time_signal:
+ total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate)
+ absolute_pos_range = (0.0, config.max_duration * sampling_rate)
+ relative_pos_range = (0.0, 1.0)
+ self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim)
+ self.absolute_pos_emb = JukeboxRangeEmbedding(
+ music_tokens_shape, timing_dims, absolute_pos_range, embed_dim
+ )
+ self.relative_pos_emb = JukeboxRangeEmbedding(
+ music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True
+ )
+
+ def forward(self, metadata):
+ total_length = metadata[:, 0:1]
+ offset = metadata[:, 1:2]
+ length = metadata[:, 2:3]
+ artist = metadata[:, 3:4]
+ genre = metadata[:, 4:]
+
+ # Start embedding of length 1
+ artist_emb = self.artist_emb(artist)
+ # Empty genre slots are denoted by -1. We mask these out.
+ mask = (genre >= 0).float().unsqueeze(2)
+ genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True)
+ start_emb = genre_emb + artist_emb
+
+ # Pos embedding of length n_ctx
+ if self.include_time_signal:
+ start, end = offset, offset + length
+ total_length = total_length.float()
+ start = start.float()
+ end = end.float()
+ pos_emb = (
+ self.total_length_emb(total_length)
+ + self.absolute_pos_emb(start, end)
+ + self.relative_pos_emb(start / total_length, end / total_length)
+ )
+ else:
+ pos_emb = None
+ return start_emb, pos_emb
+
+
+class JukeboxPrior(PreTrainedModel):
+ """
+ The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be
+ seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù
+ is defined, it also models the `next character` prediction on the lyrics. Can be conditionned on timing, artist,
+ genre, lyrics and codes from lower-levels Priors.
+
+ Args:
+ config (`JukeboxPriorConfig`):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ level (`int`, *optional*):
+ Current level of the Prior. Should be in range `[0,nb_priors]`.
+ nb_priors (`int`, *optional*, defaults to 3):
+ Total number of priors.
+ vqvae_encoder (`Callable`, *optional*):
+ Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of
+ the vqvae module to avoid getting the parameters.
+ vqvae_decoder (`Callable`, *optional*):
+ Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of
+ the vqvae module to avoid getting the parameters.
+ """
+
+ config_class = JukeboxPriorConfig
+
+ def _init_weights(self, module):
+ init_scale = self.config.init_scale
+
+ if isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
+ elif isinstance(module, JukeboxConv1D):
+ if self.config.zero_out:
+ module.weight.data.zero_()
+ else:
+ module.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
+ elif isinstance(module, JukeboxPositionalEmbedding):
+ module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale)
+ elif isinstance(module, JukeboxRangeEmbedding):
+ module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale)
+ elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "lm_head"):
+ module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
+ elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "start_token"):
+ module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale)
+ elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out:
+ module.conv1d_2.weigth.data.zero_()
+ module.conv1d_2.bias.data.zero_()
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+ def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None):
+ super().__init__(config)
+ # Passing functions instead of the vqvae module to avoid getting params, only used in the
+ # forward loop
+ self.vqvae_encoder = vqvae_encoder
+ self.vqvae_decoder = vqvae_decoder
+
+ self.levels = nb_priors
+ self.level = level if level is not None else config.level
+
+ self.base_model_prefix = f"priors.{self.level}"
+
+ self.n_ctx = config.n_ctx
+
+ self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0
+ self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens
+ self.encoder_loss_fraction = config.encoder_loss_fraction
+
+ # Audio conditioning : conditioning on music tokens (either from audio or from previous levels or both)
+ self.audio_conditioning = self.level != 0
+ self.cond_level = self.level - 1
+ if self.audio_conditioning:
+ self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level)
+
+ # metadata conditioning : contioning on timing, genres, and artist
+ self.metadata_conditioning = config.metadata_conditioning
+ if self.metadata_conditioning:
+ self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning)
+
+ # define encoder-decoder or encoder and decoder
+ self.is_encoder_decoder = config.is_encoder_decoder
+ if config.is_encoder_decoder:
+ # encoder-decoder transformer
+ self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx]
+ self.embed_dim_shift = [0, config.lyric_vocab_size]
+ self.width = config.hidden_size
+
+ self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens
+
+ self.prior = JukeboxConditionalAutoregressive(
+ config,
+ n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx,
+ embed_dim=config.lyric_vocab_size + config.music_vocab_size,
+ audio_conditioning=(self.audio_conditioning or self.metadata_conditioning),
+ metadata_conditioning=True,
+ )
+
+ else:
+ # Separate encoder-decoder transformer
+ encoder_config = config.encoder_config
+
+ if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning:
+ self.lyric_acts_width = encoder_config.hidden_size
+ self.encoder_width = config.hidden_size
+ self.encoder_dim = config.lyric_vocab_size
+ self.encoder = JukeboxConditionalAutoregressive(
+ encoder_config,
+ n_ctx=self.nb_relevant_lyric_tokens,
+ embed_dim=self.encoder_dim,
+ audio_conditioning=False,
+ metadata_conditioning=False,
+ is_encoder=True,
+ )
+ self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size)
+ self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size)
+ self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False)
+ else:
+ self.nb_relevant_lyric_tokens = 0
+
+ # decoder model on the tokens
+ self.prior = JukeboxConditionalAutoregressive(
+ config,
+ audio_conditioning=(self.audio_conditioning or self.metadata_conditioning),
+ metadata_conditioning=self.metadata_conditioning,
+ )
+
+ self.next_token_prediction_loss_dims = config.n_ctx
+ self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims
+
+ self.downsamples = [stride**down for stride, down in zip(config.res_strides_t, config.res_downs_t)]
+ self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None
+ self.raw_to_tokens = np.prod(self.downsamples[: nb_priors - self.level])
+ self.sample_length = self.n_ctx * self.raw_to_tokens
+
+ logger.info(
+ f"Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample"
+ f" length:{self.sample_length}"
+ )
+
+ def get_metadata(self, labels, start, total_length, offset, get_indices=False):
+ metadata = labels.clone()
+ metadata[:, 0] = total_length
+ # Set sample_length to match this level
+ metadata[:, 2] = int(self.sample_length)
+
+ # Set offset
+ metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens)
+ # here since metadata has the full token_list, we just need to selected the ones that are relevant
+
+ # Set lyric tokens
+ metadata, indices = self.set_metadata_lyric_tokens(metadata)
+ if get_indices:
+ return metadata, indices
+ else:
+ return metadata
+
+ def set_metadata_lyric_tokens(self, labels):
+ """
+ Processes the full labels to only retreive the relevant lyric tokens and keep the metadata conditioning tokens.
+ """
+ if self.nb_relevant_lyric_tokens > 0:
+ tokens_list = torch.zeros(
+ (labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device
+ )
+ indices_list = [] # whats the index of each current character in original array
+ for idx in range(labels.shape[0]):
+ full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres :]
+ total_length, offset, duration = labels[idx, 0], labels[idx, 1], labels[idx, 2]
+ tokens, indices = get_relevant_lyric_tokens(
+ full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration
+ )
+ tokens_list[idx, :] = tokens
+ indices_list.append(indices)
+
+ return (
+ torch.cat((labels[:, : 4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1),
+ indices_list,
+ )
+ else:
+ return labels, None
+
+ def get_music_tokens_conds(self, music_tokens, start, end):
+ """
+ Extracts current level's conditioning music tokens.
+ """
+ if self.level != 0:
+ music_tokens_cond = music_tokens[self.level - 1]
+ music_tokens = music_tokens_cond[:, start // self.cond_downsample : end // self.cond_downsample]
+ missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1]
+ if missing_cond_len > 0:
+ init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device)
+ music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long()
+ music_tokens_conds = [music_tokens_cond]
+ else:
+ music_tokens_conds = None
+ return music_tokens_conds
+
+ def prior_preprocess(self, tokens, conds):
+ """
+ Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music
+ tokens should be shifted by. It is equal to `lyric_vocab_size`.
+ """
+ batch_size = tokens[0].shape[0]
+ for i in range(len(tokens)):
+ tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1)
+
+ for i in range(len(conds)):
+ if conds[i] is None:
+ conds[i] = torch.zeros(
+ (batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device
+ )
+
+ return torch.cat(tokens, dim=1), torch.cat(conds, dim=1)
+
+ def prior_postprocess(self, tokens):
+ """
+ Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is
+ shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music
+ tokens.
+ """
+ batch_size = tokens.shape[0]
+ dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0])
+ tokens = list(torch.split(tokens, dims, dim=1))
+
+ # Some of the input tokens might be shifted to take into account the voccabulary fusion
+ for i in range(len(tokens)):
+ bins_shift = int(self.embed_dim_shift[i])
+ tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1)
+ tokens[i] = torch.clamp(tokens[i], min=0)
+ # If not masking loss, model may have generated lyric/midi tokens which are now shifted <0 by bin_shift
+ return tokens[-1]
+
+ def embed_tokens(self, music_tokens_conds):
+ """
+ Embeds the upper level music tokens and upsamples them to provide as audio conditioning.
+ """
+ music_tokens_conds = music_tokens_conds[: self.cond_level + 1]
+ audio_conditioning = None
+ for music_tokens_cond, conditioner_block in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))):
+ audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning)
+ return audio_conditioning
+
+ def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1):
+ """
+ Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states.
+ """
+ if start_level is None:
+ start_level = self.level
+ if end_level is None:
+ end_level = self.levels
+ # Get latents
+ with torch.no_grad():
+ latent_states = self.vqvae_encoder(
+ hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks
+ )
+ return latent_states
+
+ def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1):
+ """
+ Usamples the sequence of codebook vectors to a raw audio.
+ """
+ if start_level is None:
+ start_level = self.level
+ if end_level is None:
+ end_level = self.levels
+ with torch.no_grad():
+ output = self.vqvae_decoder(
+ music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks
+ )
+ return output
+
+ def get_cond(self, music_tokens_conds, metadata):
+ """
+ Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens
+ can be None.
+ """
+ if metadata is not None:
+ n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens
+ metadata, lyric_tokens = metadata[:, :n_labels], metadata[:, n_labels:]
+ else:
+ metadata, lyric_tokens = None, None
+ metadata_conditioning, metadata_pos = (
+ self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None)
+ )
+ audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos
+ return audio_conditioning, metadata_conditioning, lyric_tokens
+
+ def sample(
+ self,
+ n_samples,
+ music_tokens=None,
+ music_tokens_conds=None,
+ metadata=None,
+ temp=1.0,
+ top_k=0,
+ top_p=0.0,
+ chunk_size=None,
+ sample_tokens=None,
+ ):
+ """
+ Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas.
+
+ Args:
+ n_samples (`int`):
+ Number of samples to generate.
+ music_tokens (`List[torch.LongTensor]`, *optional*):
+ Previously gemerated tokens at the current level. Used as context for the generation.
+ music_tokens_conds (`List[torch.FloatTensor]`, *optional*):
+ Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not
+ conditionned on the upper-level tokens.
+ metadata (`List[torch.LongTensor]`, *optional*):
+ List containing the metatdata tensor with the artist, genre and the lyric tokens.
+ temp (`float`, *optional*, defaults to 1.0):
+ Sampling temperature.
+ top_k (`int`, *optional*, defaults to 0):
+ Top k probabilities used for filtering.
+ top_p (`float`, *optional*, defaults to 0.0):
+ Top p probabilities used for filtering.
+ chunk_size (`int`, *optional*):
+ Size of the chunks used to prepare the cache of the transformer.
+ sample_tokens (`int`, *optional*):
+ Number of tokens to sample.
+
+ """
+ no_past_context = music_tokens is None or music_tokens.shape[1] == 0
+ name = {True: "Ancestral", False: "Primed"}[no_past_context]
+ logger.info(f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}")
+
+ with torch.no_grad():
+ # Currently audio_conditioning only uses immediately above layer
+ audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata)
+ if self.is_encoder_decoder:
+ if no_past_context: # the prime_sample function will be used with music_tokens set to None
+ lyric_and_music_tokens, audio_conditioning = self.prior_preprocess(
+ [lyric_tokens], [None, audio_conditioning]
+ )
+ else:
+ lyric_and_music_tokens, audio_conditioning = self.prior_preprocess(
+ [lyric_tokens, music_tokens], [None, audio_conditioning]
+ )
+ if sample_tokens is not None:
+ sample_tokens += self.nb_relevant_lyric_tokens
+ music_tokens = self.prior.primed_sample(
+ n_samples,
+ lyric_and_music_tokens,
+ audio_conditioning,
+ metadata_conditioning,
+ temp=temp,
+ top_k=top_k,
+ top_p=top_p,
+ chunk_size=chunk_size,
+ sample_tokens=sample_tokens,
+ )
+ music_tokens = self.prior_postprocess(music_tokens)
+ else:
+ last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True)
+ if no_past_context:
+ music_tokens = self.prior.sample(
+ n_samples,
+ audio_conditioning,
+ metadata_conditioning,
+ last_encoder_hidden_states,
+ temp=temp,
+ top_k=top_k,
+ top_p=top_p,
+ sample_tokens=sample_tokens,
+ )
+ else:
+ music_tokens = self.prior.primed_sample(
+ n_samples,
+ music_tokens,
+ audio_conditioning,
+ metadata_conditioning,
+ last_encoder_hidden_states,
+ temp=temp,
+ top_k=top_k,
+ top_p=top_p,
+ chunk_size=chunk_size,
+ sample_tokens=sample_tokens,
+ )
+ return music_tokens
+
+ def get_encoder_states(self, lyric_tokens, sample=False):
+ """
+ Retreive the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through
+ the lyric encoder.
+ """
+ if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning:
+ if sample:
+ self.encoder = self.encoder.to(lyric_tokens.device)
+ lyric_acts = self.encoder(lyric_tokens, None, None, None)
+ lyric_acts = self.encoder.proj_in(lyric_acts)
+ last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts)
+ else:
+ last_encoder_hidden_states = None
+ return last_encoder_hidden_states
+
+ def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics):
+ """
+ Computes the loss for the lyric encoder: next lyric token prediction.
+ """
+ if self.lyric_conditioning:
+ last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states)
+ encoder_loss = nn.functional.cross_entropy(
+ last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1)
+ ) / np.log(2.0)
+ else:
+ encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device)
+ return encoder_loss
+
+ def forward_tokens(
+ self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False
+ ):
+ """
+ Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the
+ vqvae's encoding layers.
+ """
+ if get_attn_weights:
+ self.prior.transformer.set_record_attn(get_attn_weights)
+ audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata)
+
+ if self.is_encoder_decoder: # the preprocess returns the full tokens (Lyrics and Music tokens), shifted
+ tokens, audio_conditioning = self.prior_preprocess(
+ [lyric_tokens, music_tokens], [None, audio_conditioning]
+ )
+ (encoder_loss, next_token_prediction_loss), preds = self.prior(
+ tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds
+ )
+ else:
+ last_encoder_hidden_states = self.get_encoder_states(lyric_tokens)
+ encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens)
+ next_token_prediction_loss, preds = self.prior(
+ music_tokens,
+ audio_conditioning,
+ metadata_conditioning,
+ last_encoder_hidden_states,
+ get_preds=get_preds,
+ )
+ loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims
+ loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims
+
+ metrics = {
+ "bpd": next_token_prediction_loss.clone().detach(),
+ "encoder_loss": encoder_loss.clone().detach(),
+ "next_token_prediction_loss": next_token_prediction_loss.clone().detach(),
+ }
+ if get_preds:
+ metrics["preds"] = preds.clone().detach()
+ if get_attn_weights:
+ saved_attn_weights = self.prior.transformer.saved_attn_weights
+ self.prior.transformer.set_record_attn(False)
+ return saved_attn_weights
+ else:
+ return loss, metrics
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ metadata: Optional[List[torch.LongTensor]],
+ decode: Optional[bool] = False,
+ get_preds: Optional[bool] = False,
+ ) -> List[torch.Tensor]:
+ """
+ Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`
+ function. The loss is the sum of the `encoder` loss and the `decoder` loss.
+
+ Args:
+ hidden_states (`torch.Tensor`):
+ Hidden states which should be raw audio
+ metadata (`List[torch.LongTensor]`, *optional*):
+ List containing the metadata conditioning tensorwith the lyric and the metadata tokens.
+ decode (`bool`, *optional*, defaults to `False`):
+ Whether or not to decode the encoded to tokens.
+ get_preds (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the actual predicitons of the model.
+ """
+ batch_size = hidden_states.shape[0]
+ music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size)
+ loss, metrics = self.forward_tokens(
+ music_tokens=music_tokens,
+ music_tokens_conds=music_tokens_conds,
+ metadata=metadata,
+ get_preds=get_preds,
+ )
+ if decode:
+ dequantised_states = self.decode([music_tokens, *music_tokens_conds])
+ else:
+ dequantised_states = None
+ return dequantised_states, loss, metrics
+
+
+class JukeboxPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = JukeboxConfig
+ base_model_prefix = "jukebox"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module):
+ if isinstance(module, JukeboxPrior) or isinstance(module, JukeboxVQVAE):
+ module.apply(module._init_weights)
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+
+JUKEBOX_SAMPLING_INPUT_DOCSTRING = r"""
+ labels (`List[torch.LongTensor]` of length `n_sample`, and shape `(self.levels, self.config.max_nb_genre + lyric_sequence_length)` :
+ List of metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to
+ condition the generation.
+ sampling_kwargs (`Dict[Any]`):
+ Various additional sampling arguments that are used by the `_sample` function. A detail list of the
+ arguments can bee seen in the [`_sample`] function documentation.
+"""
+
+
+@add_start_docstrings(
+ """The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`,
+ `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If
+ you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior
+ individually.
+ """,
+ JUKEBOX_START_DOCSTRING,
+)
+class JukeboxModel(JukeboxPreTrainedModel):
+ _no_split_modules = ["JukeboxBlock"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ vqvae_config = config.vqvae_config
+ self.vqvae = JukeboxVQVAE(vqvae_config)
+ self.set_shared_params(config)
+ self.priors = nn.ModuleList(
+ [JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)]
+ )
+
+ def set_shared_params(self, model_config):
+ """
+ Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig`
+ is nest, and is thus unreachable in the `from_dict` function
+ """
+ for config in model_config.prior_configs:
+ config.sampling_rate = model_config.sampling_rate
+ config.timing_dims = model_config.timing_dims
+ config.min_duration = model_config.min_duration
+ config.max_duration = model_config.max_duration
+ config.max_nb_genres = model_config.max_nb_genres
+ config.metadata_conditioning = model_config.metadata_conditioning
+
+ def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1):
+ return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks)
+
+ def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
+ return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks)
+
+ def split_batch(self, obj, n_samples, split_size):
+ n_passes = (n_samples + split_size - 1) // split_size
+ if isinstance(obj, torch.Tensor):
+ return torch.split(obj, split_size, dim=0)
+ elif isinstance(obj, list):
+ return list(zip(*[torch.split(item, split_size, dim=0) for item in obj]))
+ elif obj is None:
+ return [None] * n_passes
+ else:
+ raise TypeError("Unknown input type")
+
+ # Sample a partial window of length= self.priors[level].n_ctx:
+ iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length)
+ for start in iterator:
+ music_tokens = self.sample_single_window(
+ music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size
+ )
+
+ else:
+ music_tokens = self.sample_partial_window(
+ music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size
+ )
+ return music_tokens
+
+ @torch.no_grad()
+ def _sample(
+ self,
+ music_tokens,
+ labels,
+ sample_levels,
+ metas=None,
+ chunk_size=32,
+ sampling_temperature=0.98,
+ lower_batch_size=16,
+ max_batch_size=16,
+ sample_length_in_seconds=24,
+ compute_alignments=False,
+ sample_tokens=None,
+ offset=0,
+ save_results=True,
+ sample_length=None,
+ ) -> List[torch.LongTensor]:
+ """
+ Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving
+ the generated raw audio at each step.
+
+ Args:
+ music_tokens (`List[torch.LongTensor]`):
+ A sequence of music tokens of length `self.levels` which will be used as context to continue the
+ sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain
+ level.
+ labels (`List[torch.LongTensor]`):
+ List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +
+ lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens
+ which are used to condition the generation.
+ sample_levels (`List[int]`):
+ List of the desired levels at which the sampling will be done. A level is equivalent to the index of
+ the prior in the list of priors
+ metas (`List[Any]`, *optional*):
+ Metadatas used to generate the `labels`
+ chunk_size (`int`, *optional*, defaults to 32):
+ Size of a chunk of audio, used to fill up the memory in chuncks to prevent OOM erros. Bigger chunks
+ means faster memory filling but more consumption.
+ sampling_temperature (`float`, *optional*, defaults to 0.98):
+ Temperature used to ajust the randomness of the sampling.
+ lower_batch_size (`int`, *optional*, defaults to 16):
+ Maximum batch size for the lower level priors
+ max_batch_size (`int`, *optional*, defaults to 16):
+ Maximum batch size for the top level priors
+ sample_length_in_seconds (`int`, *optional*, defaults to 24):
+ Desired length of the generation in seconds
+ compute_alignments (`bool`, *optional*, defaults to `False`):
+ Whether or not to compute the alignment between the lyrics and the audio using the top_prior
+ sample_tokens (`int`, *optional*):
+ Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy
+ experiments
+ offset (`int`, *optional*, defaults to 0):
+ Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is
+ greater than 0, the lyrics will be shifted take that intoaccount
+ save_results (`bool`, *optional*, defaults to `True`):
+ Whether or not to save the intermediate results. If `True`, will generate a folder named with the start
+ time.
+ sample_length (`int`, *optional*):
+ Desired length of the generation in samples.
+
+ Returns: torch.Tensor
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, JukeboxModel, set_seed
+ >>> import torch
+
+ >>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
+ >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
+
+ >>> labels = tokenizer(**metas)["input_ids"]
+ >>> set_seed(0)
+ >>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)]
+ >>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False)
+ >>> zs[0]
+ tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519,
+ 353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647,
+ 1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528,
+ 1804, 541, 1804, 1434]])
+ ```
+ """
+
+ top_prior = self.priors[0]
+ if sample_length is not None:
+ total_length = sample_length
+ else:
+ total_length = (
+ int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens
+ ) * top_prior.raw_to_tokens
+
+ if sample_levels is None:
+ sample_levels = range(len(self.priors))
+
+ # total length of the signal, might be bit different from the actual generated length
+ self.total_length = total_length
+ for level in sample_levels:
+ sampling_kwargs = {
+ "temp": 0.99 if level == len(self.priors) - 1 else sampling_temperature,
+ "chunk_size": chunk_size,
+ "sample_tokens": sample_tokens,
+ }
+ # Set correct total_length, hop_length, labels and sampling_kwargs for level
+
+ total_token_to_sample = total_length // self.priors[level].raw_to_tokens
+ hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx)
+ max_batch_size = lower_batch_size if level != sample_levels else max_batch_size
+ music_tokens = self.sample_level(
+ music_tokens,
+ labels[level],
+ offset,
+ sampling_kwargs,
+ level,
+ total_token_to_sample,
+ hop_length,
+ max_batch_size,
+ )
+
+ if save_results:
+ self.vqvae.to(music_tokens[level].device)
+ # Decode sample
+ with torch.no_grad():
+ start_level = len(self.priors) - level - 1 # vqvae levels are reversed
+ raw_audio = self.vqvae.decode(
+ music_tokens[: level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0]
+ )
+ logdir = f"jukebox/level_{level}"
+ if not os.path.exists(logdir):
+ os.makedirs(logdir)
+ save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float())
+ if compute_alignments and self.priors[0] is not None and self.priors[0].nb_relevant_lyric_tokens > 0:
+ with torch.no_grad():
+ alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config)
+ torch.save({"alignments": alignments}, f"{logdir}/lyric_alignments.pt")
+
+ return music_tokens
+
+ @add_start_docstrings(
+ """
+ Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically
+ upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use
+ the VQ-VAE decoder to convert the music tokens to raw audio.
+
+ Args:
+ labels (`List[torch.LongTensor]`) :
+ List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +
+ lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens
+ which are used to condition the generation.
+ n_samples (`int`, *optional*, default to 1) :
+ Number of samples to be generated in parallel.
+ """,
+ )
+ def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> List[torch.LongTensor]:
+ """
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, JukeboxModel, set_seed
+
+ >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
+
+ >>> lyrics = "Hey, are you awake? Can you talk to me?"
+ >>> artist = "Zac Brown Band"
+ >>> genre = "Country"
+ >>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics)
+ >>> set_seed(0)
+ >>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400)
+
+ >>> with torch.no_grad():
+ ... model.decode(music_tokens)[:, :10].squeeze(-1)
+ tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405,
+ -0.0818, -0.0697]])
+ ```
+ """
+
+ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors))))
+ music_tokens = [
+ torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors))
+ ]
+ music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
+ return music_tokens
+
+ @add_start_docstrings(
+ """Generates a continuation of the previously generated tokens.
+
+ Args:
+ music_tokens (`List[torch.LongTensor]` of length `self.levels` ) :
+ A sequence of music tokens which will be used as context to continue the sampling process. Should have
+ `self.levels` tensors, each corresponding to the generation at a certain level.
+ """,
+ JUKEBOX_SAMPLING_INPUT_DOCSTRING,
+ )
+ def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]:
+ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors))))
+ music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
+ return music_tokens
+
+ @add_start_docstrings(
+ """Upsamples a sequence of music tokens using the prior at level `level`.
+
+ Args:
+ music_tokens (`List[torch.LongTensor]` of length `self.levels` ) :
+ A sequence of music tokens which will be used as context to continue the sampling process. Should have
+ `self.levels` tensors, each corresponding to the generation at a certain level.
+ """,
+ JUKEBOX_SAMPLING_INPUT_DOCSTRING,
+ )
+ def upsample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]:
+ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors) - 1)))
+ music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
+ return music_tokens
+
+ @add_start_docstrings(
+ """Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the
+ generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are
+ used: as conditioning for each level, which means that no ancestral sampling is required.
+
+ Args:
+ raw_audio (`List[torch.Tensor]` of length `n_samples` ) :
+ A list of raw audio that will be used as conditioning information for each samples that will be
+ generated.
+ """,
+ JUKEBOX_SAMPLING_INPUT_DOCSTRING,
+ )
+ def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.LongTensor]:
+ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors))))
+ self.vqvae.to(raw_audio.device).float()
+ with torch.no_grad():
+ music_tokens = self.vqvae.encode(
+ raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0]
+ )
+ music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
+ return music_tokens
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a4a37b871e485547231c6406340dceda36c6265
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py
@@ -0,0 +1,418 @@
+# coding=utf-8
+# Copyright 2022 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for OpenAI Jukebox."""
+
+
+import json
+import os
+import re
+import unicodedata
+from json.encoder import INFINITY
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import regex
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...tokenization_utils_base import BatchEncoding
+from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
+from ...utils.generic import _is_jax, _is_numpy
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "artists_file": "artists.json",
+ "lyrics_file": "lyrics.json",
+ "genres_file": "genres.json",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "artists_file": {
+ "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
+ },
+ "genres_file": {
+ "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
+ },
+ "lyrics_file": {
+ "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
+ },
+}
+
+PRETRAINED_LYRIC_TOKENS_SIZES = {
+ "jukebox": 512,
+}
+
+
+class JukeboxTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a Jukebox tokenizer. Jukebox can be conditioned on 3 different inputs :
+ - Artists, unique ids are associated to each artist from the provided dictionary.
+ - Genres, unique ids are associated to each genre from the provided dictionary.
+ - Lyrics, character based tokenization. Must be initialized with the list of characters that are inside the
+ vocabulary.
+
+ This tokenizer does not require training. It should be able to process a different number of inputs:
+ as the conditioning of the model can be done on the three different queries. If None is provided, defaults values will be used.:
+
+ Depending on the number of genres on which the model should be conditioned (`n_genres`).
+ ```python
+ >>> from transformers import JukeboxTokenizer
+
+ >>> tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
+ >>> tokenizer("Alan Jackson", "Country Rock", "old town road")["input_ids"]
+ [tensor([[ 0, 0, 0, 6785, 546, 41, 38, 30, 76, 46, 41, 49,
+ 40, 76, 44, 41, 27, 30]]), tensor([[ 0, 0, 0, 145, 0]]), tensor([[ 0, 0, 0, 145, 0]])]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ If nothing is provided, the genres and the artist will either be selected randomly or set to None
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to:
+ this superclass for more information regarding those methods.
+
+ However the code does not allow that and only supports composing from various genres.
+
+ Args:
+ artists_file (`str`):
+ Path to the vocabulary file which contains a mapping between artists and ids. The default file supports
+ both "v2" and "v3"
+ genres_file (`str`):
+ Path to the vocabulary file which contain a mapping between genres and ids.
+ lyrics_file (`str`):
+ Path to the vocabulary file which contains the accepted characters for the lyrics tokenization.
+ version (`List[str]`, `optional`, default to `["v3", "v2", "v2"]`) :
+ List of the tokenizer versions. The `5b-lyrics`'s top level prior model was trained using `v3` instead of
+ `v2`.
+ n_genres (`int`, `optional`, defaults to 1):
+ Maximum number of genres to use for composition.
+ max_n_lyric_tokens (`int`, `optional`, defaults to 512):
+ Maximum number of lyric tokens to keep.
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_lyric_input_size = PRETRAINED_LYRIC_TOKENS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ artists_file,
+ genres_file,
+ lyrics_file,
+ version=["v3", "v2", "v2"],
+ max_n_lyric_tokens=512,
+ n_genres=5,
+ unk_token="<|endoftext|>",
+ **kwargs,
+ ):
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ super().__init__(
+ unk_token=unk_token,
+ n_genres=n_genres,
+ version=version,
+ max_n_lyric_tokens=max_n_lyric_tokens,
+ **kwargs,
+ )
+ self.version = version
+ self.max_n_lyric_tokens = max_n_lyric_tokens
+ self.n_genres = n_genres
+
+ with open(artists_file, encoding="utf-8") as vocab_handle:
+ self.artists_encoder = json.load(vocab_handle)
+
+ with open(genres_file, encoding="utf-8") as vocab_handle:
+ self.genres_encoder = json.load(vocab_handle)
+
+ with open(lyrics_file, encoding="utf-8") as vocab_handle:
+ self.lyrics_encoder = json.load(vocab_handle)
+
+ oov = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
+ # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
+ if len(self.lyrics_encoder) == 79:
+ oov = oov.replace(r"\-'", r"\-+'")
+
+ self.out_of_vocab = regex.compile(oov)
+ self.artists_decoder = {v: k for k, v in self.artists_encoder.items()}
+ self.genres_decoder = {v: k for k, v in self.genres_encoder.items()}
+ self.lyrics_decoder = {v: k for k, v in self.lyrics_encoder.items()}
+
+ @property
+ def vocab_size(self):
+ return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
+
+ def get_vocab(self):
+ return dict(self.artists_encoder, self.genres_encoder, self.lyrics_encoder)
+
+ def _convert_token_to_id(self, list_artists, list_genres, list_lyrics):
+ """Converts the artist, genre and lyrics tokens to their index using the vocabulary.
+ The total_length, offset and duration have to be provided in order to select relevant lyrics and add padding to
+ the lyrics token sequence.
+ """
+ artists_id = [self.artists_encoder.get(artist, 0) for artist in list_artists]
+ for genres in range(len(list_genres)):
+ list_genres[genres] = [self.genres_encoder.get(genre, 0) for genre in list_genres[genres]]
+ list_genres[genres] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
+
+ lyric_ids = [[self.lyrics_encoder.get(character, 0) for character in list_lyrics[0]], [], []]
+ return artists_id, list_genres, lyric_ids
+
+ def _tokenize(self, lyrics):
+ """
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
+
+ Do NOT take care of added tokens. Only the lyrics are split into character for the character-based vocabulary.
+ """
+ # only lyrics are not tokenized, but character based is easily handled
+ return list(lyrics)
+
+ def tokenize(self, artist, genre, lyrics, **kwargs):
+ """
+ Converts three strings in a 3 sequence of tokens using the tokenizer
+ """
+ artist, genre, lyrics = self.prepare_for_tokenization(artist, genre, lyrics)
+ lyrics = self._tokenize(lyrics)
+ return artist, genre, lyrics
+
+ def prepare_for_tokenization(
+ self, artists: str, genres: str, lyrics: str, is_split_into_words: bool = False
+ ) -> Tuple[str, str, str, Dict[str, Any]]:
+ """
+ Performs any necessary transformations before tokenization.
+
+ Args:
+ artist (`str`):
+ The artist name to prepare. This will mostly lower the string
+ genres (`str`):
+ The genre name to prepare. This will mostly lower the string.
+ lyrics (`str`):
+ The lyrics to prepare.
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
+ Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
+ tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
+ which it will tokenize. This is useful for NER or token classification.
+ """
+ for idx in range(len(self.version)):
+ if self.version[idx] == "v3":
+ artists[idx] = artists[idx].lower()
+ genres[idx] = [genres[idx].lower()]
+ else:
+ artists[idx] = self._normalize(artists[idx]) + ".v2"
+ genres[idx] = [
+ self._normalize(genre) + ".v2" for genre in genres[idx].split("_")
+ ] # split is for the full dictionary with combined genres
+
+ if self.version[0] == "v2":
+ self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+")
+ vocab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
+ self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))}
+ self.vocab[""] = 0
+ self.n_vocab = len(vocab) + 1
+ self.lyrics_encoder = self.vocab
+ self.lyrics_decoder = {v: k for k, v in self.vocab.items()}
+ self.lyrics_decoder[0] = ""
+ else:
+ self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+")
+
+ lyrics = self._run_strip_accents(lyrics)
+ lyrics = lyrics.replace("\\", "\n")
+ lyrics = self.out_of_vocab.sub("", lyrics), [], []
+ return artists, genres, lyrics
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _normalize(self, text: str) -> str:
+ """
+ Normalizes the input text. This process is for the genres and the artist
+
+ Args:
+ text (`str`):
+ Artist or Genre string to normalize
+ """
+
+ accepted = (
+ [chr(i) for i in range(ord("a"), ord("z") + 1)]
+ + [chr(i) for i in range(ord("A"), ord("Z") + 1)]
+ + [chr(i) for i in range(ord("0"), ord("9") + 1)]
+ + ["."]
+ )
+ accepted = frozenset(accepted)
+ pattern = re.compile(r"_+")
+ text = "".join([c if c in accepted else "_" for c in text.lower()])
+ text = pattern.sub("_", text).strip("_")
+ return text
+
+ def convert_lyric_tokens_to_string(self, lyrics: List[str]) -> str:
+ return " ".join(lyrics)
+
+ def convert_to_tensors(
+ self, inputs, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
+ ):
+ """
+ Convert the inner content to tensors.
+
+ Args:
+ tensor_type (`str` or [`~utils.TensorType`], *optional*):
+ The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
+ unset, no modification is done.
+ prepend_batch_axis (`int`, *optional*, defaults to `False`):
+ Whether or not to add the batch dimension during the conversion.
+ """
+ # Convert to TensorType
+ if not isinstance(tensor_type, TensorType):
+ tensor_type = TensorType(tensor_type)
+
+ # Get a function reference for the correct framework
+ if tensor_type == TensorType.TENSORFLOW:
+ if not is_tf_available():
+ raise ImportError(
+ "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
+ )
+ import tensorflow as tf
+
+ as_tensor = tf.constant
+ is_tensor = tf.is_tensor
+ elif tensor_type == TensorType.PYTORCH:
+ if not is_torch_available():
+ raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
+ import torch
+
+ as_tensor = torch.tensor
+ is_tensor = torch.is_tensor
+ elif tensor_type == TensorType.JAX:
+ if not is_flax_available():
+ raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
+ import jax.numpy as jnp # noqa: F811
+
+ as_tensor = jnp.array
+ is_tensor = _is_jax
+ else:
+ as_tensor = np.asarray
+ is_tensor = _is_numpy
+
+ # Do the tensor conversion in batch
+
+ try:
+ if prepend_batch_axis:
+ inputs = [inputs]
+
+ if not is_tensor(inputs):
+ inputs = as_tensor(inputs)
+ except: # noqa E722
+ raise ValueError(
+ "Unable to create tensor, you should probably activate truncation and/or padding "
+ "with 'padding=True' 'truncation=True' to have batched tensors with the same length."
+ )
+
+ return inputs
+
+ def __call__(self, artist, genres, lyrics="", return_tensors="pt") -> BatchEncoding:
+ """Convert the raw string to a list of token ids
+
+ Args:
+ artist (`str`):
+ Name of the artist.
+ genres (`str`):
+ List of genres that will be mixed to condition the audio
+ lyrics (`str`, *optional*, defaults to `""`):
+ Lyrics used to condition the generation
+ """
+ input_ids = [0, 0, 0]
+ artist = [artist] * len(self.version)
+ genres = [genres] * len(self.version)
+
+ artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics)
+ artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens)
+
+ attention_masks = [-INFINITY] * len(full_tokens[-1])
+ input_ids = [
+ self.convert_to_tensors(
+ [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors
+ )
+ for i in range(len(self.version))
+ ]
+ return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks})
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Saves the tokenizer's vocabulary dictionary to the provided save_directory.
+
+ Args:
+ save_directory (`str`):
+ A path to the directory where to saved. It will be created if it doesn't exist.
+
+ filename_prefix (`Optional[str]`, *optional*):
+ A prefix to add to the names of the files saved by the tokenizer.
+
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+
+ artists_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"]
+ )
+ with open(artists_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.artists_encoder, ensure_ascii=False))
+
+ genres_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"]
+ )
+ with open(genres_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.genres_encoder, ensure_ascii=False))
+
+ lyrics_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"]
+ )
+ with open(lyrics_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False))
+
+ return (artists_file, genres_file, lyrics_file)
+
+ def _convert_id_to_token(self, artists_index, genres_index, lyric_index):
+ """
+ Converts an index (integer) in a token (str) using the vocab.
+
+ Args:
+ artists_index (`int`):
+ Index of the artist in its corresponding dictionary.
+ genres_index (`Union[List[int], int]`):
+ Index of the genre in its corresponding dictionary.
+ lyric_index (`List[int]`):
+ List of character indices, which each correspond to a character.
+ """
+ artist = self.artists_decoder.get(artists_index)
+ genres = [self.genres_decoder.get(genre) for genre in genres_index]
+ lyrics = [self.lyrics_decoder.get(character) for character in lyric_index]
+ return artist, genres, lyrics
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e47f84734ea6acb7c43896706893929e818ba8c2
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7bc4cfa13200e366acd82c4200960a2c3b4befe8
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfa64a313fab12556cc20f9f51fb709c36c74b00
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..215a3c37daa17084d3a55d44ee9f6eef94c8b501
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..47c5315457b4fa52d40b3d1da10020e4e74f5508
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py
@@ -0,0 +1,1176 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for LayoutXLM model."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...tokenization_utils_base import (
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
+from ..xlm_roberta.tokenization_xlm_roberta import (
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES,
+ PRETRAINED_VOCAB_FILES_MAP,
+ SPIECE_UNDERLINE,
+ VOCAB_FILES_NAMES,
+)
+
+
+logger = logging.get_logger(__name__)
+
+
+LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ return_token_type_ids (`bool`, *optional*):
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
+ of returning overflowing tokens.
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to return special tokens mask information.
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
+ Whether or not to return `(char_start, char_end)` for each token.
+
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
+ Python's tokenizer, this method will raise `NotImplementedError`.
+ return_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the lengths of the encoded inputs.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ **kwargs: passed to the `self.tokenize()` method
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **bbox** -- List of bounding boxes to be fed to a model.
+
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
+ if *"token_type_ids"* is in `self.model_input_names`).
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
+ - **length** -- The length of the inputs (when `return_length=True`).
+"""
+
+
+class LayoutXLMTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [CLS] token.
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
+ The bounding box to use for the special [SEP] token.
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [PAD] token.
+ pad_token_label (`int`, *optional*, defaults to -100):
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
+ CrossEntropyLoss.
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
+ Whether or not to only label the first subword, in case word labels are provided.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ cls_token_box=[0, 0, 0, 0],
+ sep_token_box=[1000, 1000, 1000, 1000],
+ pad_token_box=[0, 0, 0, 0],
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ cls_token_box=cls_token_box,
+ sep_token_box=sep_token_box,
+ pad_token_box=pad_token_box,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self.fairseq_tokens_to_ids = {"": 0, "": 1, "": 2, "": 3}
+
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.fairseq_tokens_to_ids[""] = len(self.sp_model) + self.fairseq_offset
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ # additional properties
+ self.cls_token_box = cls_token_box
+ self.sep_token_box = sep_token_box
+ self.pad_token_box = pad_token_box
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the token
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with word-level normalized bounding boxes and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ words).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = words
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be words
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ words = text if text_pair is None else text_pair
+ if boxes is None:
+ raise ValueError("You must provide corresponding bounding boxes")
+ if is_batched:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
+ for words_example, boxes_example in zip(words, boxes):
+ if len(words_example) != len(boxes_example):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+ else:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ boxes: Optional[List[List[List[int]]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ batch_outputs = self._batch_prepare_for_model(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=return_tensors,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def _batch_prepare_for_model(
+ self,
+ batch_text_or_text_pairs,
+ is_pair: bool = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
+ manages a moving window (with user defined stride) for overflowing tokens
+
+ Args:
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
+ """
+
+ batch_outputs = {}
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
+ batch_text_or_text_pair, boxes_example = example
+ outputs = self.prepare_for_model(
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
+ batch_text_or_text_pair[1] if is_pair else None,
+ boxes_example,
+ word_labels=word_labels[idx] if word_labels is not None else None,
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterward
+ return_attention_mask=False, # we pad in batch afterward
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ return self.prepare_for_model(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def prepare_for_model(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
+ (with user defined stride) for overflowing tokens.
+
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
+ labeled with -100, such that they will be ignored by the loss function.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
+ text_pair (`List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
+ list of list of strings (words of a batch of examples).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ tokens = []
+ pair_tokens = []
+ token_boxes = []
+ pair_token_boxes = []
+ labels = []
+
+ if text_pair is None:
+ if word_labels is None:
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
+ for word, box in zip(text, boxes):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ token_boxes.extend([box] * len(word_tokens))
+ else:
+ # CASE 2: token classification (training)
+ for word, box, label in zip(text, boxes, word_labels):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ token_boxes.extend([box] * len(word_tokens))
+ if self.only_label_first_subword:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
+ else:
+ labels.extend([label] * len(word_tokens))
+ else:
+ # CASE 3: document visual question answering (inference)
+ # text = question
+ # text_pair = words
+ tokens = self.tokenize(text)
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
+
+ for word, box in zip(text_pair, boxes):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ pair_tokens.extend(word_tokens)
+ pair_token_boxes.extend([box] * len(word_tokens))
+
+ # Create ids + pair_ids
+ ids = self.convert_tokens_to_ids(tokens)
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
+
+ # Compute the total size of the returned encodings
+ pair = bool(pair_ids is not None)
+ len_ids = len(ids)
+ len_pair_ids = len(pair_ids) if pair else 0
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
+
+ # Truncation: Handle max sequence length
+ overflowing_tokens = []
+ overflowing_token_boxes = []
+ overflowing_labels = []
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
+ (
+ ids,
+ token_boxes,
+ pair_ids,
+ pair_token_boxes,
+ labels,
+ overflowing_tokens,
+ overflowing_token_boxes,
+ overflowing_labels,
+ ) = self.truncate_sequences(
+ ids,
+ token_boxes,
+ pair_ids=pair_ids,
+ pair_token_boxes=pair_token_boxes,
+ labels=labels,
+ num_tokens_to_remove=total_len - max_length,
+ truncation_strategy=truncation_strategy,
+ stride=stride,
+ )
+
+ if return_token_type_ids and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ # Load from model defaults
+ if return_token_type_ids is None:
+ return_token_type_ids = "token_type_ids" in self.model_input_names
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ encoded_inputs = {}
+
+ if return_overflowing_tokens:
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
+ encoded_inputs["overflowing_labels"] = overflowing_labels
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
+
+ # Add special tokens
+ if add_special_tokens:
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
+ if pair_token_boxes:
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
+ if labels:
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
+ else:
+ sequence = ids + pair_ids if pair else ids
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
+
+ # Build output dictionary
+ encoded_inputs["input_ids"] = sequence
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
+ if return_token_type_ids:
+ encoded_inputs["token_type_ids"] = token_type_ids
+ if return_special_tokens_mask:
+ if add_special_tokens:
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
+ else:
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+ if labels:
+ encoded_inputs["labels"] = labels
+
+ # Check lengths
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
+
+ # Padding
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+ encoded_inputs = self.pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding=padding_strategy.value,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if return_length:
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+
+ batch_outputs = BatchEncoding(
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
+ )
+
+ return batch_outputs
+
+ def truncate_sequences(
+ self,
+ ids: List[int],
+ token_boxes: List[List[int]],
+ pair_ids: Optional[List[int]] = None,
+ pair_token_boxes: Optional[List[List[int]]] = None,
+ labels: Optional[List[int]] = None,
+ num_tokens_to_remove: int = 0,
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
+ stride: int = 0,
+ ) -> Tuple[List[int], List[int], List[int]]:
+ """
+ Truncates a sequence pair in-place following the strategy.
+
+ Args:
+ ids (`List[int]`):
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+ `convert_tokens_to_ids` methods.
+ token_boxes (`List[List[int]]`):
+ Bounding boxes of the first sequence.
+ pair_ids (`List[int]`, *optional*):
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+ and `convert_tokens_to_ids` methods.
+ pair_token_boxes (`List[List[int]]`, *optional*):
+ Bounding boxes of the second sequence.
+ labels (`List[int]`, *optional*):
+ Labels of the first sequence (for token classification tasks).
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
+ Number of tokens to remove using the truncation strategy.
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ The strategy to follow for truncation. Can be:
+
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
+ batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
+ than the model maximum admissible input size).
+ stride (`int`, *optional*, defaults to 0):
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
+ sequence returned. The value of this argument defines the number of additional tokens.
+
+ Returns:
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
+ overflowing tokens.
+ """
+ if num_tokens_to_remove <= 0:
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
+
+ if not isinstance(truncation_strategy, TruncationStrategy):
+ truncation_strategy = TruncationStrategy(truncation_strategy)
+
+ overflowing_tokens = []
+ overflowing_token_boxes = []
+ overflowing_labels = []
+ if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
+ for _ in range(num_tokens_to_remove):
+ if pair_ids is None or len(ids) > len(pair_ids):
+ if not overflowing_tokens:
+ window_len = min(len(ids), stride + 1)
+ else:
+ window_len = 1
+ overflowing_tokens.extend(ids[-window_len:])
+ overflowing_token_boxes.extend(token_boxes[-window_len:])
+ overflowing_labels.extend(labels[-window_len:])
+ ids = ids[:-1]
+ token_boxes = token_boxes[:-1]
+ labels = labels[:-1]
+ else:
+ if not overflowing_tokens:
+ window_len = min(len(pair_ids), stride + 1)
+ else:
+ window_len = 1
+ overflowing_tokens.extend(pair_ids[-window_len:])
+ overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
+ pair_ids = pair_ids[:-1]
+ pair_token_boxes = pair_token_boxes[:-1]
+ elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
+ if len(ids) > num_tokens_to_remove:
+ window_len = min(len(ids), stride + num_tokens_to_remove)
+ overflowing_tokens = ids[-window_len:]
+ overflowing_token_boxes = token_boxes[-window_len:]
+ overflowing_labels = labels[-window_len:]
+ ids = ids[:-num_tokens_to_remove]
+ token_boxes = token_boxes[:-num_tokens_to_remove]
+ labels = labels[:-num_tokens_to_remove]
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the first sequence has a length {len(ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_second'."
+ )
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
+ if len(pair_ids) > num_tokens_to_remove:
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
+ overflowing_tokens = pair_ids[-window_len:]
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
+ pair_ids = pair_ids[:-num_tokens_to_remove]
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the second sequence has a length {len(pair_ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_first'."
+ )
+
+ return (
+ ids,
+ token_boxes,
+ pair_ids,
+ pair_token_boxes,
+ labels,
+ overflowing_tokens,
+ overflowing_token_boxes,
+ overflowing_labels,
+ )
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..322239192740d0e106b81da213c16e4dd8821ec0
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
@@ -0,0 +1,801 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for LayoutXLM model."""
+
+
+import os
+from shutil import copyfile
+from typing import Dict, List, Optional, Tuple, Union
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_base import (
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
+from ..xlm_roberta.tokenization_xlm_roberta_fast import (
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES,
+ PRETRAINED_VOCAB_FILES_MAP,
+ VOCAB_FILES_NAMES,
+)
+
+
+if is_sentencepiece_available():
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
+else:
+ LayoutXLMTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ return_token_type_ids (`bool`, *optional*):
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
+ of returning overflowing tokens.
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to return special tokens mask information.
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
+ Whether or not to return `(char_start, char_end)` for each token.
+
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
+ Python's tokenizer, this method will raise `NotImplementedError`.
+ return_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the lengths of the encoded inputs.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ **kwargs: passed to the `self.tokenize()` method
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **bbox** -- List of bounding boxes to be fed to a model.
+
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
+ if *"token_type_ids"* is in `self.model_input_names`).
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
+ - **length** -- The length of the inputs (when `return_length=True`).
+"""
+
+
+class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [CLS] token.
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
+ The bounding box to use for the special [SEP] token.
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [PAD] token.
+ pad_token_label (`int`, *optional*, defaults to -100):
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
+ CrossEntropyLoss.
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
+ Whether or not to only label the first subword, in case word labels are provided.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = LayoutXLMTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ cls_token_box=[0, 0, 0, 0],
+ sep_token_box=[1000, 1000, 1000, 1000],
+ pad_token_box=[0, 0, 0, 0],
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ cls_token_box=cls_token_box,
+ sep_token_box=sep_token_box,
+ pad_token_box=pad_token_box,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+ self.can_save_slow_tokenizer = False if not self.vocab_file else True
+
+ # additional properties
+ self.cls_token_box = cls_token_box
+ self.sep_token_box = sep_token_box
+ self.pad_token_box = pad_token_box
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with word-level normalized bounding boxes and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ words).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = words
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be words
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ words = text if text_pair is None else text_pair
+ if boxes is None:
+ raise ValueError("You must provide corresponding bounding boxes")
+ if is_batched:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
+ for words_example, boxes_example in zip(words, boxes):
+ if len(words_example) != len(boxes_example):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+ else:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
+ batched_input = [(text, pair)] if pair else [text]
+ encodings = self._tokenizer.encode_batch(
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
+ )
+
+ return encodings[0].tokens
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ boxes: Optional[List[List[List[int]]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if not isinstance(batch_text_or_text_pairs, list):
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
+
+ # Set the truncation and padding strategy and restore the initial configuration
+ self.set_truncation_and_padding(
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ )
+
+ if is_pair:
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
+
+ encodings = self._tokenizer.encode_batch(
+ batch_text_or_text_pairs,
+ add_special_tokens=add_special_tokens,
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
+ )
+
+ # Convert encoding to dict
+ # `Tokens` has type: Tuple[
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
+ # List[EncodingFast]
+ # ]
+ # with nested dimensions corresponding to batch, overflows, sequence length
+ tokens_and_encodings = [
+ self._convert_encoding(
+ encoding=encoding,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=True
+ if word_labels is not None
+ else return_offsets_mapping, # we use offsets to create the labels
+ return_length=return_length,
+ verbose=verbose,
+ )
+ for encoding in encodings
+ ]
+
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
+ # (we say ~ because the number of overflow varies with the example in the batch)
+ #
+ # To match each overflowing sample with the original sample in the batch
+ # we add an overflow_to_sample_mapping array (see below)
+ sanitized_tokens = {}
+ for key in tokens_and_encodings[0][0].keys():
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
+ sanitized_tokens[key] = stack
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
+
+ # If returning overflowing tokens, we need to return a mapping
+ # from the batch idx to the original sample
+ if return_overflowing_tokens:
+ overflow_to_sample_mapping = []
+ for i, (toks, _) in enumerate(tokens_and_encodings):
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
+
+ for input_ids in sanitized_tokens["input_ids"]:
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
+
+ # create the token boxes
+ token_boxes = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ token_boxes_example = []
+ for id, sequence_id, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_encodings[batch_index].sequence_ids,
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if is_pair and sequence_id == 0:
+ token_boxes_example.append(self.pad_token_box)
+ else:
+ token_boxes_example.append(boxes[original_index][word_id])
+ else:
+ if id == self.cls_token_id:
+ token_boxes_example.append(self.cls_token_box)
+ elif id == self.sep_token_id:
+ token_boxes_example.append(self.sep_token_box)
+ elif id == self.pad_token_id:
+ token_boxes_example.append(self.pad_token_box)
+ else:
+ raise ValueError("Id not recognized")
+ token_boxes.append(token_boxes_example)
+
+ sanitized_tokens["bbox"] = token_boxes
+
+ # optionally, create the labels
+ if word_labels is not None:
+ labels = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ labels_example = []
+ for id, offset, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_tokens["offset_mapping"][batch_index],
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if self.only_label_first_subword:
+ if offset[0] == 0:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels_example.append(word_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ else:
+ labels_example.append(word_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ labels.append(labels_example)
+
+ sanitized_tokens["labels"] = labels
+ # finally, remove offsets if the user didn't want them
+ if not return_offsets_mapping:
+ del sanitized_tokens["offset_mapping"]
+
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[bool] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # make it a batched input
+ # 2 options:
+ # 1) only text, in case text must be a list of str
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
+ batched_input = [(text, text_pair)] if text_pair else [text]
+ batched_boxes = [boxes]
+ batched_word_labels = [word_labels] if word_labels is not None else None
+ batched_output = self._batch_encode_plus(
+ batched_input,
+ is_pair=bool(text_pair is not None),
+ boxes=batched_boxes,
+ word_labels=batched_word_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ # Return tensor is None, then we can remove the leading batch axis
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
+ if return_tensors is None and not return_overflowing_tokens:
+ batched_output = BatchEncoding(
+ {
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
+ for key, value in batched_output.items()
+ },
+ batched_output.encodings,
+ )
+
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
+
+ return batched_output
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22aaccb7fc14f84bc1111675b8dbcceb870bd803
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/configuration_luke.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/configuration_luke.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02e3a2dad02eab9a2400bd51c37981a8b6d5b00d
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/configuration_luke.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/convert_luke_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/convert_luke_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abf9fae9f2332cb48a8e891468e55fe9bf2207c0
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/convert_luke_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/modeling_luke.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/modeling_luke.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2bf4ee3413008d128385d7a2bada19d2b6c1d51
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/modeling_luke.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/tokenization_luke.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/tokenization_luke.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b29760302b87050ac6845a29e0b1146d0aa40a14
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/__pycache__/tokenization_luke.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/luke/modeling_luke.py b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/modeling_luke.py
new file mode 100644
index 0000000000000000000000000000000000000000..52b947e8097793537f46978270a827737ae560e6
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/luke/modeling_luke.py
@@ -0,0 +1,2244 @@
+# coding=utf-8
+# Copyright Studio Ousia and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch LUKE model."""
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_luke import LukeConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LukeConfig"
+_CHECKPOINT_FOR_DOC = "studio-ousia/luke-base"
+
+LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "studio-ousia/luke-base",
+ "studio-ousia/luke-large",
+ # See all LUKE models at https://huggingface.co/models?filter=luke
+]
+
+
+@dataclass
+class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling):
+ """
+ Base class for outputs of the LUKE model.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
+ Sequence of entity hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
+ Linear layer and a Tanh activation function.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length +
+ entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to
+ compute the weighted average in the self-attention heads.
+ """
+
+ entity_last_hidden_state: torch.FloatTensor = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class BaseLukeModelOutput(BaseModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
+ Sequence of entity hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ entity_last_hidden_state: torch.FloatTensor = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LukeMaskedLMOutput(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ The sum of masked language modeling (MLM) loss and entity prediction loss.
+ mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked entity prediction (MEP) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ mlm_loss: Optional[torch.FloatTensor] = None
+ mep_loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ entity_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class EntityClassificationOutput(ModelOutput):
+ """
+ Outputs of entity classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class EntityPairClassificationOutput(ModelOutput):
+ """
+ Outputs of entity pair classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class EntitySpanClassificationOutput(ModelOutput):
+ """
+ Outputs of entity span classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LukeSequenceClassifierOutput(ModelOutput):
+ """
+ Outputs of sentence classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LukeTokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LukeQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Outputs of question answering models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_logits: torch.FloatTensor = None
+ end_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LukeMultipleChoiceModelOutput(ModelOutput):
+ """
+ Outputs of multiple choice models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
+ layer plus the initial entity embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class LukeEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(
+ self,
+ input_ids=None,
+ token_type_ids=None,
+ position_ids=None,
+ inputs_embeds=None,
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+class LukeEntityEmbeddings(nn.Module):
+ def __init__(self, config: LukeConfig):
+ super().__init__()
+ self.config = config
+
+ self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0)
+ if config.entity_emb_size != config.hidden_size:
+ self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False)
+
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None
+ ):
+ if token_type_ids is None:
+ token_type_ids = torch.zeros_like(entity_ids)
+
+ entity_embeddings = self.entity_embeddings(entity_ids)
+ if self.config.entity_emb_size != self.config.hidden_size:
+ entity_embeddings = self.entity_embedding_dense(entity_embeddings)
+
+ position_embeddings = self.position_embeddings(position_ids.clamp(min=0))
+ position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1)
+ position_embeddings = position_embeddings * position_embedding_mask
+ position_embeddings = torch.sum(position_embeddings, dim=-2)
+ position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7)
+
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = entity_embeddings + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class LukeSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.use_entity_aware_attention = config.use_entity_aware_attention
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ if self.use_entity_aware_attention:
+ self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ word_size = word_hidden_states.size(1)
+
+ if entity_hidden_states is None:
+ concat_hidden_states = word_hidden_states
+ else:
+ concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
+
+ key_layer = self.transpose_for_scores(self.key(concat_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(concat_hidden_states))
+
+ if self.use_entity_aware_attention and entity_hidden_states is not None:
+ # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e)
+ # query layers
+ w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states))
+ w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states))
+ e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states))
+ e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states))
+
+ # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above
+ w2w_key_layer = key_layer[:, :, :word_size, :]
+ e2w_key_layer = key_layer[:, :, :word_size, :]
+ w2e_key_layer = key_layer[:, :, word_size:, :]
+ e2e_key_layer = key_layer[:, :, word_size:, :]
+
+ # compute attention scores based on the dot product between the query and key vectors
+ w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2))
+ w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2))
+ e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2))
+ e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2))
+
+ # combine attention scores to create the final attention score matrix
+ word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3)
+ entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3)
+ attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2)
+
+ else:
+ query_layer = self.transpose_for_scores(self.query(concat_hidden_states))
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in LukeModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ output_word_hidden_states = context_layer[:, :word_size, :]
+ if entity_hidden_states is None:
+ output_entity_hidden_states = None
+ else:
+ output_entity_hidden_states = context_layer[:, word_size:, :]
+
+ if output_attentions:
+ outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs)
+ else:
+ outputs = (output_word_hidden_states, output_entity_hidden_states)
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class LukeSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LukeAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = LukeSelfAttention(config)
+ self.output = LukeSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ raise NotImplementedError("LUKE does not support the pruning of attention heads")
+
+ def forward(
+ self,
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ word_size = word_hidden_states.size(1)
+ self_outputs = self.self(
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ )
+ if entity_hidden_states is None:
+ concat_self_outputs = self_outputs[0]
+ concat_hidden_states = word_hidden_states
+ else:
+ concat_self_outputs = torch.cat(self_outputs[:2], dim=1)
+ concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
+
+ attention_output = self.output(concat_self_outputs, concat_hidden_states)
+
+ word_attention_output = attention_output[:, :word_size, :]
+ if entity_hidden_states is None:
+ entity_attention_output = None
+ else:
+ entity_attention_output = attention_output[:, word_size:, :]
+
+ # add attentions if we output them
+ outputs = (word_attention_output, entity_attention_output) + self_outputs[2:]
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class LukeIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class LukeOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LukeLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = LukeAttention(config)
+ self.intermediate = LukeIntermediate(config)
+ self.output = LukeOutput(config)
+
+ def forward(
+ self,
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ word_size = word_hidden_states.size(1)
+
+ self_attention_outputs = self.attention(
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ if entity_hidden_states is None:
+ concat_attention_output = self_attention_outputs[0]
+ else:
+ concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1)
+
+ outputs = self_attention_outputs[2:] # add self attentions if we output attention weights
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output
+ )
+ word_layer_output = layer_output[:, :word_size, :]
+ if entity_hidden_states is None:
+ entity_layer_output = None
+ else:
+ entity_layer_output = layer_output[:, word_size:, :]
+
+ outputs = (word_layer_output, entity_layer_output) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class LukeEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_word_hidden_states = () if output_hidden_states else None
+ all_entity_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
+ all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(layer_module),
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask,
+ layer_head_mask,
+ )
+ else:
+ layer_outputs = layer_module(
+ word_hidden_states,
+ entity_hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions,
+ )
+
+ word_hidden_states = layer_outputs[0]
+
+ if entity_hidden_states is not None:
+ entity_hidden_states = layer_outputs[1]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
+ all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ word_hidden_states,
+ all_word_hidden_states,
+ all_self_attentions,
+ entity_hidden_states,
+ all_entity_hidden_states,
+ ]
+ if v is not None
+ )
+ return BaseLukeModelOutput(
+ last_hidden_state=word_hidden_states,
+ hidden_states=all_word_hidden_states,
+ attentions=all_self_attentions,
+ entity_last_hidden_state=entity_hidden_states,
+ entity_hidden_states=all_entity_hidden_states,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class LukePooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class EntityPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class EntityPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.transform = EntityPredictionHeadTransform(config)
+ self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)
+ self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states) + self.bias
+
+ return hidden_states
+
+
+class LukePreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LukeConfig
+ base_model_prefix = "luke"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LukeAttention", "LukeEntityEmbeddings"]
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ if module.embedding_dim == 1: # embedding for bias parameters
+ module.weight.data.zero_()
+ else:
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, LukeEncoder):
+ module.gradient_checkpointing = value
+
+
+LUKE_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LukeConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LUKE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+
+ entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
+ Indices of entity tokens in the entity vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
+ Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for entity tokens that are **not masked**,
+ - 0 for entity tokens that are **masked**.
+
+ entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the entity token inputs. Indices are
+ selected in `[0, 1]`:
+
+ - 0 corresponds to a *portion A* entity token,
+ - 1 corresponds to a *portion B* entity token.
+
+ entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
+ Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any"
+ " specific head on top.",
+ LUKE_START_DOCSTRING,
+)
+class LukeModel(LukePreTrainedModel):
+ def __init__(self, config: LukeConfig, add_pooling_layer: bool = True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = LukeEmbeddings(config)
+ self.entity_embeddings = LukeEntityEmbeddings(config)
+ self.encoder = LukeEncoder(config)
+
+ self.pooler = LukePooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def get_entity_embeddings(self):
+ return self.entity_embeddings.entity_embeddings
+
+ def set_entity_embeddings(self, value):
+ self.entity_embeddings.entity_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError("LUKE does not support the pruning of attention heads")
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseLukeModelOutputWithPooling]:
+ r"""
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LukeModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-base")
+ >>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
+ # Compute the contextualized entity representation corresponding to the entity mention "Beyoncé"
+
+ >>> text = "Beyoncé lives in Los Angeles."
+ >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
+
+ >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
+ >>> outputs = model(**encoding)
+ >>> word_last_hidden_state = outputs.last_hidden_state
+ >>> entity_last_hidden_state = outputs.entity_last_hidden_state
+ # Input Wikipedia entities to obtain enriched contextualized representations of word tokens
+
+ >>> text = "Beyoncé lives in Los Angeles."
+ >>> entities = [
+ ... "Beyoncé",
+ ... "Los Angeles",
+ ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
+ >>> entity_spans = [
+ ... (0, 7),
+ ... (17, 28),
+ ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
+
+ >>> encoding = tokenizer(
+ ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt"
+ ... )
+ >>> outputs = model(**encoding)
+ >>> word_last_hidden_state = outputs.last_hidden_state
+ >>> entity_last_hidden_state = outputs.entity_last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones((batch_size, seq_length), device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+ if entity_ids is not None:
+ entity_seq_length = entity_ids.size(1)
+ if entity_attention_mask is None:
+ entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device)
+ if entity_token_type_ids is None:
+ entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ # First, compute word embeddings
+ word_embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ )
+
+ # Second, compute extended attention mask
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask)
+
+ # Third, compute entity embeddings and concatenate with word embeddings
+ if entity_ids is None:
+ entity_embedding_output = None
+ else:
+ entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids)
+
+ # Fourth, send embeddings through the model
+ encoder_outputs = self.encoder(
+ word_embedding_output,
+ entity_embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size)
+ sequence_output = encoder_outputs[0]
+
+ # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseLukeModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ entity_last_hidden_state=encoder_outputs.entity_last_hidden_state,
+ entity_hidden_states=encoder_outputs.entity_hidden_states,
+ )
+
+ def get_extended_attention_mask(
+ self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]
+ ):
+ """
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
+
+ Arguments:
+ word_attention_mask (`torch.LongTensor`):
+ Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
+ entity_attention_mask (`torch.LongTensor`, *optional*):
+ Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
+
+ Returns:
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
+ """
+ attention_mask = word_attention_mask
+ if entity_attention_mask is not None:
+ attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)
+
+ if attention_mask.dim() == 3:
+ extended_attention_mask = attention_mask[:, None, :, :]
+ elif attention_mask.dim() == 2:
+ extended_attention_mask = attention_mask[:, None, None, :]
+ else:
+ raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})")
+
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
+ return extended_attention_mask
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead
+class LukeLMHead(nn.Module):
+ """Roberta Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ # For accelerate compatibility and to not break backward compatibility
+ if self.decoder.bias.device.type == "meta":
+ self.decoder.bias = self.bias
+ else:
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """
+ The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and
+ masked entity prediction.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForMaskedLM(LukePreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.luke = LukeModel(config)
+
+ self.lm_head = LukeLMHead(config)
+ self.entity_predictions = EntityPredictionHead(config)
+
+ self.loss_fn = nn.CrossEntropyLoss()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ super().tie_weights()
+ self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings)
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.LongTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ entity_labels: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LukeMaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ loss = None
+
+ mlm_loss = None
+ logits = self.lm_head(outputs.last_hidden_state)
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1))
+ if loss is None:
+ loss = mlm_loss
+
+ mep_loss = None
+ entity_logits = None
+ if outputs.entity_last_hidden_state is not None:
+ entity_logits = self.entity_predictions(outputs.entity_last_hidden_state)
+ if entity_labels is not None:
+ mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1))
+ if loss is None:
+ loss = mep_loss
+ else:
+ loss = loss + mep_loss
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ loss,
+ mlm_loss,
+ mep_loss,
+ logits,
+ entity_logits,
+ outputs.hidden_states,
+ outputs.entity_hidden_states,
+ outputs.attentions,
+ ]
+ if v is not None
+ )
+
+ return LukeMaskedLMOutput(
+ loss=loss,
+ mlm_loss=mlm_loss,
+ mep_loss=mep_loss,
+ logits=logits,
+ entity_logits=entity_logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity
+ token) for entity classification tasks, such as Open Entity.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForEntityClassification(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.luke = LukeModel(config)
+
+ self.num_labels = config.num_labels
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, EntityClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
+ Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
+ used for the single-label classification. In this case, labels should contain the indices that should be in
+ `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
+ loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
+ and 1 indicate false and true, respectively.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LukeForEntityClassification
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
+ >>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
+
+ >>> text = "Beyoncé lives in Los Angeles."
+ >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
+ >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: person
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ feature_vector = outputs.entity_last_hidden_state[:, 0, :]
+ feature_vector = self.dropout(feature_vector)
+ logits = self.classifier(feature_vector)
+
+ loss = None
+ if labels is not None:
+ # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
+ # cross entropy is used otherwise.
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if labels.ndim == 1:
+ loss = nn.functional.cross_entropy(logits, labels)
+ else:
+ loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
+ if v is not None
+ )
+
+ return EntityClassificationOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity
+ tokens) for entity pair classification tasks, such as TACRED.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForEntityPairClassification(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.luke = LukeModel(config)
+
+ self.num_labels = config.num_labels
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, EntityPairClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
+ Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
+ used for the single-label classification. In this case, labels should contain the indices that should be in
+ `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
+ loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
+ and 1 indicate false and true, respectively.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LukeForEntityPairClassification
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
+ >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
+
+ >>> text = "Beyoncé lives in Los Angeles."
+ >>> entity_spans = [
+ ... (0, 7),
+ ... (17, 28),
+ ... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
+ >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: per:cities_of_residence
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ feature_vector = torch.cat(
+ [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1
+ )
+ feature_vector = self.dropout(feature_vector)
+ logits = self.classifier(feature_vector)
+
+ loss = None
+ if labels is not None:
+ # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
+ # cross entropy is used otherwise.
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if labels.ndim == 1:
+ loss = nn.functional.cross_entropy(logits, labels)
+ else:
+ loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
+ if v is not None
+ )
+
+ return EntityPairClassificationOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks
+ such as named entity recognition.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForEntitySpanClassification(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.luke = LukeModel(config)
+
+ self.num_labels = config.num_labels
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask=None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.LongTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ entity_start_positions: Optional[torch.LongTensor] = None,
+ entity_end_positions: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, EntitySpanClassificationOutput]:
+ r"""
+ entity_start_positions (`torch.LongTensor`):
+ The start positions of entities in the word token sequence.
+
+ entity_end_positions (`torch.LongTensor`):
+ The end positions of entities in the word token sequence.
+
+ labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):
+ Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross
+ entropy loss is used for the single-label classification. In this case, labels should contain the indices
+ that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length,
+ num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,
+ labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LukeForEntitySpanClassification
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
+ >>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
+
+ >>> text = "Beyoncé lives in Los Angeles"
+ # List all possible entity spans in the text
+
+ >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens
+ >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens
+ >>> entity_spans = []
+ >>> for i, start_pos in enumerate(word_start_positions):
+ ... for end_pos in word_end_positions[i:]:
+ ... entity_spans.append((start_pos, end_pos))
+
+ >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()
+ >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):
+ ... if predicted_class_idx != 0:
+ ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])
+ Beyoncé PER
+ Los Angeles LOC
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+ hidden_size = outputs.last_hidden_state.size(-1)
+
+ entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
+ if entity_start_positions.device != outputs.last_hidden_state.device:
+ entity_start_positions = entity_start_positions.to(outputs.last_hidden_state.device)
+ start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions)
+
+ entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
+ if entity_end_positions.device != outputs.last_hidden_state.device:
+ entity_end_positions = entity_end_positions.to(outputs.last_hidden_state.device)
+ end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions)
+
+ feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2)
+
+ feature_vector = self.dropout(feature_vector)
+ logits = self.classifier(feature_vector)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary
+ # cross entropy is used otherwise.
+ if labels.ndim == 2:
+ loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
+ else:
+ loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
+ if v is not None
+ )
+
+ return EntitySpanClassificationOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForSequenceClassification(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.luke = LukeModel(config)
+ self.dropout = nn.Dropout(
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LukeSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LukeSequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ pooled_output = outputs.pooler_output
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
+ if v is not None
+ )
+
+ return LukeSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE Model with a token classification head on top (a linear layer on top of the hidden-states output). To
+ solve Named-Entity Recognition (NER) task using LUKE, `LukeForEntitySpanClassification` is more suitable than this
+ class.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForTokenClassification(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.luke = LukeModel(config, add_pooling_layer=False)
+ self.dropout = nn.Dropout(
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LukeTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LukeTokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ sequence_output = outputs.last_hidden_state
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
+ if v is not None
+ )
+
+ return LukeTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForQuestionAnswering(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+
+ self.luke = LukeModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LukeQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.FloatTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LukeQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ sequence_output = outputs.last_hidden_state
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions.clamp_(0, ignored_index)
+ end_positions.clamp_(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ total_loss,
+ start_logits,
+ end_logits,
+ outputs.hidden_states,
+ outputs.entity_hidden_states,
+ outputs.attentions,
+ ]
+ if v is not None
+ )
+
+ return LukeQuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The LUKE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ LUKE_START_DOCSTRING,
+)
+class LukeForMultipleChoice(LukePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.luke = LukeModel(config)
+ self.dropout = nn.Dropout(
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LukeMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ entity_ids: Optional[torch.LongTensor] = None,
+ entity_attention_mask: Optional[torch.FloatTensor] = None,
+ entity_token_type_ids: Optional[torch.LongTensor] = None,
+ entity_position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LukeMultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ entity_ids = entity_ids.view(-1, entity_ids.size(-1)) if entity_ids is not None else None
+ entity_attention_mask = (
+ entity_attention_mask.view(-1, entity_attention_mask.size(-1))
+ if entity_attention_mask is not None
+ else None
+ )
+ entity_token_type_ids = (
+ entity_token_type_ids.view(-1, entity_token_type_ids.size(-1))
+ if entity_token_type_ids is not None
+ else None
+ )
+ entity_position_ids = (
+ entity_position_ids.view(-1, entity_position_ids.size(-2), entity_position_ids.size(-1))
+ if entity_position_ids is not None
+ else None
+ )
+
+ outputs = self.luke(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ entity_ids=entity_ids,
+ entity_attention_mask=entity_attention_mask,
+ entity_token_type_ids=entity_token_type_ids,
+ entity_position_ids=entity_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ pooled_output = outputs.pooler_output
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(reshaped_logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ loss,
+ reshaped_logits,
+ outputs.hidden_states,
+ outputs.entity_hidden_states,
+ outputs.attentions,
+ ]
+ if v is not None
+ )
+
+ return LukeMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ entity_hidden_states=outputs.entity_hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/sew_d/modeling_sew_d.py b/openflamingo/lib/python3.10/site-packages/transformers/models/sew_d/modeling_sew_d.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ae717d9a28afd8f359ed8043deef0c179be2730
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/sew_d/modeling_sew_d.py
@@ -0,0 +1,1783 @@
+# coding=utf-8
+# Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch SEW model."""
+
+import math
+import warnings
+from collections.abc import Sequence
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss, LayerNorm
+
+from ...activations import ACT2FN
+from ...deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import softmax_backward_data
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_sew_d import SEWDConfig
+
+
+logger = logging.get_logger(__name__)
+
+_HIDDEN_STATES_START_POSITION = 1
+
+
+# General docstring
+_CONFIG_FOR_DOC = "SEWDConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "asapp/sew-d-tiny-100k-ft-ls100h"
+_EXPECTED_OUTPUT_SHAPE = [1, 292, 384]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTIL OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
+_CTC_EXPECTED_LOSS = 0.21
+
+# Audio class docstring
+_SEQ_CLASS_CHECKPOINT = "anton-l/sew-d-mid-400k-ft-keyword-spotting"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
+_SEQ_CLASS_EXPECTED_LOSS = 3.16
+
+SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "asapp/sew-d-tiny-100k",
+ "asapp/sew-d-small-100k",
+ "asapp/sew-d-mid-100k",
+ "asapp/sew-d-mid-k127-100k",
+ "asapp/sew-d-base-100k",
+ "asapp/sew-d-base-plus-100k",
+ "asapp/sew-d-mid-400k",
+ "asapp/sew-d-mid-k127-400k",
+ "asapp/sew-d-base-plus-400k",
+ # See all SEW models at https://huggingface.co/models?filter=sew-d
+]
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[torch.LongTensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ batch_size, max_num_masked_span * mask_length
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.deberta_v2.modeling_deberta_v2.make_log_bucket_position
+def make_log_bucket_position(relative_pos, bucket_size, max_position):
+ sign = torch.sign(relative_pos)
+ mid = bucket_size // 2
+ abs_pos = torch.where(
+ (relative_pos < mid) & (relative_pos > -mid),
+ torch.tensor(mid - 1).type_as(relative_pos),
+ torch.abs(relative_pos),
+ )
+ log_pos = (
+ torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
+ )
+ bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
+ return bucket_pos
+
+
+# Copied from transformers.models.deberta_v2.modeling_deberta_v2.build_relative_position
+def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):
+ """
+ Build relative position according to the query and key
+
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
+ P_k\\)
+
+ Args:
+ query_size (int): the length of query
+ key_size (int): the length of key
+ bucket_size (int): the size of position bucket
+ max_position (int): the maximum allowed absolute position
+ device (`torch.device`): the device on which tensors will be created.
+
+ Return:
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
+ """
+
+ q_ids = torch.arange(0, query_size, device=device)
+ k_ids = torch.arange(0, key_size, device=device)
+ rel_pos_ids = q_ids[:, None] - k_ids[None, :]
+ if bucket_size > 0 and max_position > 0:
+ rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
+ rel_pos_ids = rel_pos_ids.to(torch.long)
+ rel_pos_ids = rel_pos_ids[:query_size, :]
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
+ return rel_pos_ids
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
+def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
+def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
+def pos_dynamic_expand(pos_index, p2c_att, key_layer):
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
+
+
+# Copied from transformers.models.deberta.modeling_deberta.get_mask
+def get_mask(input, local_context):
+ if not isinstance(local_context, DropoutContext):
+ dropout = local_context
+ mask = None
+ else:
+ dropout = local_context.dropout
+ dropout *= local_context.scale
+ mask = local_context.mask if local_context.reuse_mask else None
+
+ if dropout > 0 and mask is None:
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
+
+ if isinstance(local_context, DropoutContext):
+ if local_context.mask is None:
+ local_context.mask = mask
+
+ return mask, dropout
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SEWD
+class SEWDNoLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SEWD
+class SEWDLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+
+ hidden_states = hidden_states.transpose(-2, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(-2, -1)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SEWD
+class SEWDGroupNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.sew.modeling_sew.SEWPositionalConvEmbedding with SEW->SEWD
+class SEWDPositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ stride=config.squeeze_factor,
+ )
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = SEWDSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW
+class SEWDSamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+# Copied from transformers.models.sew.modeling_sew.SEWUpsampling with SEW->SEWD
+class SEWDUpsampling(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
+ self.activation = ACT2FN[config.feat_extract_activation]
+ self.squeeze_factor = config.squeeze_factor
+
+ def forward(self, hidden_states):
+ hidden_states = self.projection(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ if self.squeeze_factor > 1:
+ # transform embedding channels to sequence length
+ bsz, src_len, src_embed_dim = hidden_states.size()
+ tgt_len = src_len * self.squeeze_factor
+ tgt_embed_dim = src_embed_dim // self.squeeze_factor
+ hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
+ hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SEWD
+class SEWDFeatureEncoder(nn.Module):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [SEWDGroupNormConvLayer(config, layer_id=0)] + [
+ SEWDNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [SEWDLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.ModuleList(conv_layers)
+ self.gradient_checkpointing = False
+ self._requires_grad = True
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+
+ # make sure hidden_states require grad for gradient_checkpointing
+ if self._requires_grad and self.training:
+ hidden_states.requires_grad = True
+
+ for conv_layer in self.conv_layers:
+ if self._requires_grad and self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(conv_layer),
+ hidden_states,
+ )
+ else:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+class SEWDFeatureExtractor(SEWDFeatureEncoder):
+ def __init__(self, config):
+ super().__init__(config)
+ warnings.warn(
+ f"The class `{self.__class__.__name__}` has been depreciated "
+ "and will be removed in Transformers v5. "
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
+ FutureWarning,
+ )
+
+
+# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
+class ContextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
+ self.dropout = StableDropout(config.pooler_dropout)
+ self.config = config
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+
+ context_token = hidden_states[:, 0]
+ context_token = self.dropout(context_token)
+ pooled_output = self.dense(context_token)
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
+ return pooled_output
+
+ @property
+ def output_dim(self):
+ return self.config.hidden_size
+
+
+# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
+class XSoftmax(torch.autograd.Function):
+ """
+ Masked Softmax which is optimized for saving memory
+
+ Args:
+ input (`torch.tensor`): The input tensor that will apply softmax.
+ mask (`torch.IntTensor`):
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
+ dim (int): The dimension that will apply softmax
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
+
+ >>> # Make a tensor
+ >>> x = torch.randn([4, 20, 100])
+
+ >>> # Create a mask
+ >>> mask = (x > 0).int()
+
+ >>> # Specify the dimension to apply softmax
+ >>> dim = -1
+
+ >>> y = XSoftmax.apply(x, mask, dim)
+ ```"""
+
+ @staticmethod
+ def forward(self, input, mask, dim):
+ self.dim = dim
+ rmask = ~(mask.to(torch.bool))
+
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
+ output = torch.softmax(output, self.dim)
+ output.masked_fill_(rmask, 0)
+ self.save_for_backward(output)
+ return output
+
+ @staticmethod
+ def backward(self, grad_output):
+ (output,) = self.saved_tensors
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
+ return inputGrad, None, None
+
+ @staticmethod
+ def symbolic(g, self, mask, dim):
+ import torch.onnx.symbolic_helper as sym_help
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
+
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
+ r_mask = g.op(
+ "Cast",
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
+ )
+ output = masked_fill(
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
+ )
+ output = softmax(g, output, dim)
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
+class DropoutContext(object):
+ def __init__(self):
+ self.dropout = 0
+ self.mask = None
+ self.scale = 1
+ self.reuse_mask = True
+
+
+# Copied from transformers.models.deberta.modeling_deberta.XDropout
+class XDropout(torch.autograd.Function):
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
+
+ @staticmethod
+ def forward(ctx, input, local_ctx):
+ mask, dropout = get_mask(input, local_ctx)
+ ctx.scale = 1.0 / (1 - dropout)
+ if dropout > 0:
+ ctx.save_for_backward(mask)
+ return input.masked_fill(mask, 0) * ctx.scale
+ else:
+ return input
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ if ctx.scale > 1:
+ (mask,) = ctx.saved_tensors
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
+ else:
+ return grad_output, None
+
+ @staticmethod
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
+ from torch.onnx import symbolic_opset12
+
+ dropout_p = local_ctx
+ if isinstance(local_ctx, DropoutContext):
+ dropout_p = local_ctx.dropout
+ # StableDropout only calls this function when training.
+ train = True
+ # TODO: We should check if the opset_version being used to export
+ # is > 12 here, but there's no good way to do that. As-is, if the
+ # opset_version < 12, export will fail with a CheckerError.
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
+ # if opset_version < 12:
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
+
+
+# Copied from transformers.models.deberta.modeling_deberta.StableDropout
+class StableDropout(nn.Module):
+ """
+ Optimized dropout module for stabilizing the training
+
+ Args:
+ drop_prob (float): the dropout probabilities
+ """
+
+ def __init__(self, drop_prob):
+ super().__init__()
+ self.drop_prob = drop_prob
+ self.count = 0
+ self.context_stack = None
+
+ def forward(self, x):
+ """
+ Call the module
+
+ Args:
+ x (`torch.tensor`): The input tensor to apply dropout
+ """
+ if self.training and self.drop_prob > 0:
+ return XDropout.apply(x, self.get_context())
+ return x
+
+ def clear_context(self):
+ self.count = 0
+ self.context_stack = None
+
+ def init_context(self, reuse_mask=True, scale=1):
+ if self.context_stack is None:
+ self.context_stack = []
+ self.count = 0
+ for c in self.context_stack:
+ c.reuse_mask = reuse_mask
+ c.scale = scale
+
+ def get_context(self):
+ if self.context_stack is not None:
+ if self.count >= len(self.context_stack):
+ self.context_stack.append(DropoutContext())
+ ctx = self.context_stack[self.count]
+ ctx.dropout = self.drop_prob
+ self.count += 1
+ return ctx
+ else:
+ return self.drop_prob
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaV2->SEWD, DebertaLayerNorm->LayerNorm, hidden_dropout_prob->activation_dropout
+class SEWDSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.activation_dropout)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta_v2.modeling_deberta_v2.DisentangledSelfAttention with attention_probs_dropout_prob->attention_dropout, hidden_dropout_prob->activation_dropout
+class DisentangledSelfAttention(nn.Module):
+ """
+ Disentangled self-attention module
+
+ Parameters:
+ config (`DebertaV2Config`):
+ A model config class instance with the configuration to build a new model. The schema is similar to
+ *BertConfig*, for more details, please refer [`DebertaV2Config`]
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ _attention_head_size = config.hidden_size // config.num_attention_heads
+ self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+
+ self.share_att_key = getattr(config, "share_att_key", False)
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
+ self.relative_attention = getattr(config, "relative_attention", False)
+
+ if self.relative_attention:
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.pos_ebd_size = self.max_relative_positions
+ if self.position_buckets > 0:
+ self.pos_ebd_size = self.position_buckets
+
+ self.pos_dropout = StableDropout(config.activation_dropout)
+
+ if not self.share_att_key:
+ if "c2p" in self.pos_att_type:
+ self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ if "p2c" in self.pos_att_type:
+ self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = StableDropout(config.attention_dropout)
+
+ def transpose_for_scores(self, x, attention_heads):
+ new_x_shape = x.size()[:-1] + (attention_heads, -1)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ """
+ Call the module
+
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
+ *Attention(Q,K,V)*
+
+ attention_mask (`torch.BoolTensor`):
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
+ th token.
+
+ output_attentions (`bool`, optional):
+ Whether return the attention matrix.
+
+ query_states (`torch.FloatTensor`, optional):
+ The *Q* state in *Attention(Q,K,V)*.
+
+ relative_pos (`torch.LongTensor`):
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
+
+ rel_embeddings (`torch.FloatTensor`):
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
+ \\text{max_relative_positions}\\), *hidden_size*].
+
+
+ """
+ if query_states is None:
+ query_states = hidden_states
+ query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
+ key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
+ value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
+
+ rel_att = None
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ scale_factor = 1
+ if "c2p" in self.pos_att_type:
+ scale_factor += 1
+ if "p2c" in self.pos_att_type:
+ scale_factor += 1
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
+ attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))
+ if self.relative_attention:
+ rel_embeddings = self.pos_dropout(rel_embeddings)
+ rel_att = self.disentangled_attention_bias(
+ query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
+ )
+
+ if rel_att is not None:
+ attention_scores = attention_scores + rel_att
+ attention_scores = attention_scores
+ attention_scores = attention_scores.view(
+ -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
+ )
+
+ # bsz x height x length x dimension
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
+ attention_probs = self.dropout(attention_probs)
+ context_layer = torch.bmm(
+ attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
+ )
+ context_layer = (
+ context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ )
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
+ context_layer = context_layer.view(new_context_layer_shape)
+ if output_attentions:
+ return (context_layer, attention_probs)
+ else:
+ return context_layer
+
+ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
+ if relative_pos is None:
+ q = query_layer.size(-2)
+ relative_pos = build_relative_position(
+ q,
+ key_layer.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=query_layer.device,
+ )
+ if relative_pos.dim() == 2:
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
+ elif relative_pos.dim() == 3:
+ relative_pos = relative_pos.unsqueeze(1)
+ # bsz x height x query x key
+ elif relative_pos.dim() != 4:
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
+
+ att_span = self.pos_ebd_size
+ relative_pos = relative_pos.long().to(query_layer.device)
+
+ rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
+ if self.share_att_key:
+ pos_query_layer = self.transpose_for_scores(
+ self.query_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
+ pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
+ query_layer.size(0) // self.num_attention_heads, 1, 1
+ )
+ else:
+ if "c2p" in self.pos_att_type:
+ pos_key_layer = self.transpose_for_scores(
+ self.pos_key_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(
+ query_layer.size(0) // self.num_attention_heads, 1, 1
+ ) # .split(self.all_head_size, dim=-1)
+ if "p2c" in self.pos_att_type:
+ pos_query_layer = self.transpose_for_scores(
+ self.pos_query_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(
+ query_layer.size(0) // self.num_attention_heads, 1, 1
+ ) # .split(self.all_head_size, dim=-1)
+
+ score = 0
+ # content->position
+ if "c2p" in self.pos_att_type:
+ scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor)
+ c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
+ c2p_att = torch.gather(
+ c2p_att,
+ dim=-1,
+ index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
+ )
+ score += c2p_att / scale.to(dtype=c2p_att.dtype)
+
+ # position->content
+ if "p2c" in self.pos_att_type:
+ scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
+ if key_layer.size(-2) != query_layer.size(-2):
+ r_pos = build_relative_position(
+ key_layer.size(-2),
+ key_layer.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=query_layer.device,
+ )
+ r_pos = r_pos.unsqueeze(0)
+ else:
+ r_pos = relative_pos
+
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
+ p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
+ p2c_att = torch.gather(
+ p2c_att,
+ dim=-1,
+ index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
+ ).transpose(-1, -2)
+ score += p2c_att / scale.to(dtype=p2c_att.dtype)
+
+ return score
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->SEWD
+class SEWDAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = DisentangledSelfAttention(config)
+ self.output = SEWDSelfOutput(config)
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ self_output = self.self(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ self_output, att_matrix = self_output
+ if query_states is None:
+ query_states = hidden_states
+ attention_output = self.output(self_output, query_states)
+
+ if output_attentions:
+ return (attention_output, att_matrix)
+ else:
+ return attention_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->SEWD
+class SEWDIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm, hidden_dropout_prob->activation_dropout
+class SEWDOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.activation_dropout)
+ self.config = config
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->SEWD
+class SEWDLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = SEWDAttention(config)
+ self.intermediate = SEWDIntermediate(config)
+ self.output = SEWDOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ output_attentions=False,
+ ):
+ attention_output = self.attention(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ attention_output, att_matrix = attention_output
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ if output_attentions:
+ return (layer_output, att_matrix)
+ else:
+ return layer_output
+
+
+# Copied from transformers.models.deberta_v2.modeling_deberta_v2.ConvLayer
+class ConvLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ kernel_size = getattr(config, "conv_kernel_size", 3)
+ groups = getattr(config, "conv_groups", 1)
+ self.conv_act = getattr(config, "conv_act", "tanh")
+ self.conv = nn.Conv1d(
+ config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
+ )
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def forward(self, hidden_states, residual_states, input_mask):
+ out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
+ rmask = (1 - input_mask).bool()
+ out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
+ out = ACT2FN[self.conv_act](self.dropout(out))
+
+ layer_norm_input = residual_states + out
+ output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
+
+ if input_mask is None:
+ output_states = output
+ else:
+ if input_mask.dim() != layer_norm_input.dim():
+ if input_mask.dim() == 4:
+ input_mask = input_mask.squeeze(1).squeeze(1)
+ input_mask = input_mask.unsqueeze(2)
+
+ input_mask = input_mask.to(output.dtype)
+ output_states = output * input_mask
+
+ return output_states
+
+
+# Copied from transformers.models.deberta_v2.modeling_deberta_v2.DebertaV2Encoder with DebertaV2->SEWD
+class SEWDTransformerEncoder(nn.Module):
+ """Modified BertEncoder with relative position bias support"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.layer = nn.ModuleList([SEWDLayer(config) for _ in range(config.num_hidden_layers)])
+ self.relative_attention = getattr(config, "relative_attention", False)
+
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ pos_ebd_size = self.max_relative_positions * 2
+
+ if self.position_buckets > 0:
+ pos_ebd_size = self.position_buckets * 2
+
+ self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
+
+ self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
+
+ if "layer_norm" in self.norm_rel_ebd:
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
+
+ self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
+ self.gradient_checkpointing = False
+
+ def get_rel_embedding(self):
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
+ if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
+ rel_embeddings = self.LayerNorm(rel_embeddings)
+ return rel_embeddings
+
+ def get_attention_mask(self, attention_mask):
+ if attention_mask.dim() <= 2:
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
+ elif attention_mask.dim() == 3:
+ attention_mask = attention_mask.unsqueeze(1)
+
+ return attention_mask
+
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
+ if self.relative_attention and relative_pos is None:
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
+ relative_pos = build_relative_position(
+ q,
+ hidden_states.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=hidden_states.device,
+ )
+ return relative_pos
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_hidden_states=True,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ return_dict=True,
+ ):
+ if attention_mask.dim() <= 2:
+ input_mask = attention_mask
+ else:
+ input_mask = attention_mask.sum(-2) > 0
+ attention_mask = self.get_attention_mask(attention_mask)
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[0]
+ else:
+ next_kv = hidden_states
+ rel_embeddings = self.get_rel_embedding()
+ output_states = next_kv
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+
+ output_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(layer_module),
+ next_kv,
+ attention_mask,
+ query_states,
+ relative_pos,
+ rel_embeddings,
+ )
+ else:
+ output_states = layer_module(
+ next_kv,
+ attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ output_states, att_m = output_states
+
+ if i == 0 and self.conv is not None:
+ output_states = self.conv(hidden_states, output_states, input_mask)
+
+ if query_states is not None:
+ query_states = output_states
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
+ else:
+ next_kv = output_states
+
+ if output_attentions:
+ all_attentions = all_attentions + (att_m,)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ if not return_dict:
+ return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class SEWDEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = SEWDPositionalConvEmbedding(config)
+ self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
+ self.encoder = SEWDTransformerEncoder(config)
+ self.upsample = SEWDUpsampling(config)
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (hidden_states.shape[0], max_encoder_length), dtype=torch.long, device=hidden_states.device
+ )
+ else:
+ # make sure padded tokens output 0
+ hidden_states[~attention_mask.bool()] = 0.0
+
+ input_lengths = (attention_mask.long()).sum(-1)
+ # apply pooling formula to get real output_lengths
+ output_lengths = input_lengths // self.config.squeeze_factor
+ attention_ids = (
+ torch.arange(0, max_encoder_length, device=output_lengths.device)
+ .view(1, -1)
+ .expand(output_lengths.shape[0], -1)
+ )
+ attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
+
+ n_input_timesteps = hidden_states.shape[1]
+
+ hidden_states = hidden_states.transpose(1, 2)
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ pooled_hidden_states = self.pool(hidden_states)
+ min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
+ hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
+ hidden_states = hidden_states.transpose(1, 2)
+
+ encoder_outputs = self.encoder(hidden_states, attention_mask, output_hidden_states, output_attentions)
+
+ hidden_states = self.upsample(encoder_outputs.last_hidden_state)
+ if hidden_states.shape[1] < n_input_timesteps:
+ hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, encoder_outputs.hidden_states, encoder_outputs.attentions] if v is not None
+ )
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class SEWDPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SEWDConfig
+ base_model_prefix = "sew-d"
+ main_input_name = "input_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, SEWDPositionalConvEmbedding):
+ nn.init.normal_(
+ module.conv.weight,
+ mean=0,
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
+ )
+ nn.init.constant_(module.conv.bias, 0)
+ elif isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ nn.init.kaiming_normal_(module.weight.data)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
+ module.bias.data.zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+ batch_size = attention_mask.shape[0]
+
+ attention_mask = torch.zeros(
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+ # these two operations makes sure that all values before the output lengths idxs are attended to
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
+ return attention_mask
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, SEWDTransformerEncoder):
+ module.gradient_checkpointing = value
+
+
+SEWD_START_DOCSTRING = r"""
+ SEW-D was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech
+ Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger,
+ Yoav Artzi.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving etc.).
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`SEWDConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SEWD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare SEW-D Model transformer outputting raw hidden-states without any specific head on top.",
+ SEWD_START_DOCSTRING,
+)
+# Copied from transformers.models.sew.modeling_sew.SEWModel with SEW->SEWD, layer_norm_eps->feature_layer_norm_eps
+class SEWDModel(SEWDPreTrainedModel):
+ def __init__(self, config: SEWDConfig):
+ super().__init__(config)
+ self.config = config
+ self.feature_extractor = SEWDFeatureEncoder(config)
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.feature_layer_norm_eps)
+
+ self.project_features = config.conv_dim[-1] != config.hidden_size
+ if self.project_features:
+ self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
+
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
+
+ self.encoder = SEWDEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
+ def _mask_hidden_states(
+ self,
+ hidden_states: torch.FloatTensor,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ @add_start_docstrings_to_model_forward(SEWD_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose(1, 2)
+ extract_features = self.layer_norm(extract_features)
+
+ if self.project_features:
+ extract_features = self.feature_projection(extract_features)
+ hidden_states = self.feature_dropout(extract_features)
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """SEW-D Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ SEWD_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEWD, wav2vec2->sew_d, WAV_2_VEC_2->SEWD
+class SEWDForCTC(SEWDPreTrainedModel):
+ def __init__(self, config, target_lang: Optional[str] = None):
+ super().__init__(config)
+
+ self.sew_d = SEWDModel(config)
+ self.dropout = nn.Dropout(config.final_dropout)
+
+ self.target_lang = target_lang
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `SEWDForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = (
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
+ )
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
+ passing `target_lang=...` to `from_pretrained(...)`.
+
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
+ """
+
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
+ # correctly load adapter layers for SEWD so that we do not have to introduce a new API to
+ # [`PreTrainedModel`]. While slightly hacky, SEWD never has to tie input and output embeddings, so that it is
+ # ok to repurpose this function here.
+ target_lang = self.target_lang
+
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
+ logger.info("By default `target_lang` is set to 'eng'.")
+ elif target_lang is not None:
+ self.load_adapter(target_lang, force_load=True)
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.sew_d.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.sew_d.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(SEWD_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.sew_d(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states)
+
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ SEWD Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB
+ Keyword Spotting.
+ """,
+ SEWD_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->SEWD, wav2vec2->sew_d, WAV_2_VEC_2->SEWD
+class SEWDForSequenceClassification(SEWDPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, "add_adapter") and config.add_adapter:
+ raise ValueError(
+ "Sequence classification does not support the use of SEWD adapters (config.add_adapter=True)"
+ )
+ self.sew_d = SEWDModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.sew_d.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.sew_d.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(SEWD_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.sew_d(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ hidden_states = self.projector(hidden_states)
+ if attention_mask is None:
+ pooled_output = hidden_states.mean(dim=1)
+ else:
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+ hidden_states[~padding_mask] = 0.0
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/image_processing_swin2sr.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/image_processing_swin2sr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd309709a83a1c13acce2a4e6c9706cd9cbd4976
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/swin2sr/__pycache__/image_processing_swin2sr.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3b1ffa79a8a00dce8f044101a521466ed2caad7
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/configuration_swinv2.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/configuration_swinv2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33af8b02d5d43c36181b55d83bc9fb0a4f19b76d
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/configuration_swinv2.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/convert_swinv2_timm_to_pytorch.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/convert_swinv2_timm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a18ac9a49167ce1b8ac714341f2d7a29caddad4
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/convert_swinv2_timm_to_pytorch.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/modeling_swinv2.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/modeling_swinv2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45152b71e7284bec7e7bda71ef8e73e3c14f8d7c
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/__pycache__/modeling_swinv2.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/configuration_swinv2.py b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/configuration_swinv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..96e5711465dd68392a493f2d2f92b64035b48470
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/configuration_swinv2.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Swinv2 Transformer model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "microsoft/swinv2-tiny-patch4-window8-256": (
+ "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
+ ),
+}
+
+
+class Swinv2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin
+ Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
+ [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 4):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ embed_dim (`int`, *optional*, defaults to 96):
+ Dimensionality of patch embedding.
+ depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
+ Depth of each layer in the Transformer encoder.
+ num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
+ Number of attention heads in each layer of the Transformer encoder.
+ window_size (`int`, *optional*, defaults to 7):
+ Size of windows.
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not a learnable bias should be added to the queries, keys and values.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ Stochastic depth rate.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether or not to add absolute position embeddings to the patch embeddings.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ encoder_stride (`int`, `optional`, defaults to 32):
+ Factor to increase the spatial resolution by in the decoder head for masked image modeling.
+
+ Example:
+
+ ```python
+ >>> from transformers import Swinv2Config, Swinv2Model
+
+ >>> # Initializing a Swinv2 microsoft/swinv2-tiny-patch4-window8-256 style configuration
+ >>> configuration = Swinv2Config()
+
+ >>> # Initializing a model (with random weights) from the microsoft/swinv2-tiny-patch4-window8-256 style configuration
+ >>> model = Swinv2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+ model_type = "swinv2"
+
+ attribute_map = {
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ }
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=4,
+ num_channels=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ drop_path_rate=0.1,
+ hidden_act="gelu",
+ use_absolute_embeddings=False,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ encoder_stride=32,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.embed_dim = embed_dim
+ self.depths = depths
+ self.num_layers = len(depths)
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.mlp_ratio = mlp_ratio
+ self.qkv_bias = qkv_bias
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.drop_path_rate = drop_path_rate
+ self.hidden_act = hidden_act
+ self.use_absolute_embeddings = use_absolute_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ self.encoder_stride = encoder_stride
+ # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
+ # this indicates the channel dimension after the last stage of the model
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
+ self.pretrained_window_sizes = (0, 0, 0, 0)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..21deda864c6dd59dd28c3079872f059b2de73d30
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Swinv2 checkpoints from the timm library."""
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import timm
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import AutoImageProcessor, Swinv2Config, Swinv2ForImageClassification
+
+
+def get_swinv2_config(swinv2_name):
+ config = Swinv2Config()
+ name_split = swinv2_name.split("_")
+
+ model_size = name_split[1]
+ if "to" in name_split[3]:
+ img_size = int(name_split[3][-3:])
+ else:
+ img_size = int(name_split[3])
+ if "to" in name_split[2]:
+ window_size = int(name_split[2][-2:])
+ else:
+ window_size = int(name_split[2][6:])
+
+ if model_size == "tiny":
+ embed_dim = 96
+ depths = (2, 2, 6, 2)
+ num_heads = (3, 6, 12, 24)
+ elif model_size == "small":
+ embed_dim = 96
+ depths = (2, 2, 18, 2)
+ num_heads = (3, 6, 12, 24)
+ elif model_size == "base":
+ embed_dim = 128
+ depths = (2, 2, 18, 2)
+ num_heads = (4, 8, 16, 32)
+ else:
+ embed_dim = 192
+ depths = (2, 2, 18, 2)
+ num_heads = (6, 12, 24, 48)
+
+ if "to" in swinv2_name:
+ config.pretrained_window_sizes = (12, 12, 12, 6)
+
+ if ("22k" in swinv2_name) and ("to" not in swinv2_name):
+ num_classes = 21841
+ repo_id = "huggingface/label-files"
+ filename = "imagenet-22k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ else:
+ num_classes = 1000
+ repo_id = "huggingface/label-files"
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ config.image_size = img_size
+ config.num_labels = num_classes
+ config.embed_dim = embed_dim
+ config.depths = depths
+ config.num_heads = num_heads
+ config.window_size = window_size
+
+ return config
+
+
+def rename_key(name):
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "embeddings.norm")
+ if "layers" in name:
+ name = "encoder." + name
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name:
+ name = name.replace("attn", "attention.self")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+ if "q_bias" in name:
+ name = name.replace("q_bias", "query.bias")
+ if "k_bias" in name:
+ name = name.replace("k_bias", "key.bias")
+ if "v_bias" in name:
+ name = name.replace("v_bias", "value.bias")
+ if "cpb_mlp" in name:
+ name = name.replace("cpb_mlp", "continuous_position_bias_mlp")
+ if name == "norm.weight":
+ name = "layernorm.weight"
+ if name == "norm.bias":
+ name = "layernorm.bias"
+
+ if "head" in name:
+ name = name.replace("head", "classifier")
+ else:
+ name = "swinv2." + name
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, model):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if "mask" in key:
+ continue
+ elif "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[1])
+ block_num = int(key_split[3])
+ dim = model.swinv2.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
+
+ if "weight" in key:
+ orig_state_dict[
+ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
+ ] = val[:dim, :]
+ orig_state_dict[
+ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
+ ] = val[dim : dim * 2, :]
+ orig_state_dict[
+ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
+ ] = val[-dim:, :]
+ else:
+ orig_state_dict[
+ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
+ ] = val[:dim]
+ orig_state_dict[f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[
+ dim : dim * 2
+ ]
+ orig_state_dict[
+ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
+ ] = val[-dim:]
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+def convert_swinv2_checkpoint(swinv2_name, pytorch_dump_folder_path):
+ timm_model = timm.create_model(swinv2_name, pretrained=True)
+ timm_model.eval()
+
+ config = get_swinv2_config(swinv2_name)
+ model = Swinv2ForImageClassification(config)
+ model.eval()
+
+ new_state_dict = convert_state_dict(timm_model.state_dict(), model)
+ model.load_state_dict(new_state_dict)
+
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+
+ image_processor = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinv2_name.replace("_", "-")))
+ image = Image.open(requests.get(url, stream=True).raw)
+ inputs = image_processor(images=image, return_tensors="pt")
+
+ timm_outs = timm_model(inputs["pixel_values"])
+ hf_outs = model(**inputs).logits
+
+ assert torch.allclose(timm_outs, hf_outs, atol=1e-3)
+
+ print(f"Saving model {swinv2_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ model.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, swinv2_name),
+ organization="nandwalritik",
+ commit_message="Add model",
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--swinv2_name",
+ default="swinv2_tiny_patch4_window8_256",
+ type=str,
+ help="Name of the Swinv2 timm model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_swinv2_checkpoint(args.swinv2_name, args.pytorch_dump_folder_path)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/modeling_swinv2.py b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/modeling_swinv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e05643a63583e146c5225559b1ce6f1b66a7de62
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/swinv2/modeling_swinv2.py
@@ -0,0 +1,1331 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Swinv2 Transformer model."""
+
+
+import collections.abc
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_swinv2 import Swinv2Config
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "Swinv2Config"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "microsoft/swinv2-tiny-patch4-window8-256"
+_EXPECTED_OUTPUT_SHAPE = [1, 64, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "microsoft/swinv2-tiny-patch4-window8-256"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
+
+
+SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "microsoft/swinv2-tiny-patch4-window8-256",
+ # See all Swinv2 models at https://huggingface.co/models?filter=swinv2
+]
+
+
+# drop_path, Swinv2PatchEmbeddings, Swinv2PatchMerging and Swinv2DropPath are from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/swin_transformer_v2.py.
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->Swinv2
+class Swinv2EncoderOutput(ModelOutput):
+ """
+ Swinv2 encoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->Swinv2
+class Swinv2ModelOutput(ModelOutput):
+ """
+ Swinv2 model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
+ Average pooling of the last layer hidden-state.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ pooler_output: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinMaskedImageModelingOutput with Swin->Swinv2
+class Swinv2MaskedImageModelingOutput(ModelOutput):
+ """
+ Swinv2 masked image model outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
+ Masked image modeling (MLM) loss.
+ reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Reconstructed pixel values.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ reconstruction: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def logits(self):
+ warnings.warn(
+ "logits attribute is deprecated and will be removed in version 5 of Transformers."
+ " Please use the reconstruction attribute to retrieve the final output instead.",
+ FutureWarning,
+ )
+ return self.reconstruction
+
+
+@dataclass
+# Copied from transformers.models.swin.modeling_swin.SwinImageClassifierOutput with Swin->Swinv2
+class Swinv2ImageClassifierOutput(ModelOutput):
+ """
+ Swinv2 outputs for image classification.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# Copied from transformers.models.swin.modeling_swin.window_partition
+def window_partition(input_feature, window_size):
+ """
+ Partitions the given input into windows.
+ """
+ batch_size, height, width, num_channels = input_feature.shape
+ input_feature = input_feature.view(
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
+ )
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
+ return windows
+
+
+# Copied from transformers.models.swin.modeling_swin.window_reverse
+def window_reverse(windows, window_size, height, width):
+ """
+ Merges windows to produce higher resolution features.
+ """
+ num_channels = windows.shape[-1]
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
+ return windows
+
+
+# Copied from transformers.models.swin.modeling_swin.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swinv2
+class Swinv2DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->Swinv2
+class Swinv2Embeddings(nn.Module):
+ """
+ Construct the patch and position embeddings. Optionally, also the mask token.
+ """
+
+ def __init__(self, config, use_mask_token=False):
+ super().__init__()
+
+ self.patch_embeddings = Swinv2PatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.patch_grid = self.patch_embeddings.grid_size
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
+
+ if config.use_absolute_embeddings:
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
+ else:
+ self.position_embeddings = None
+
+ self.norm = nn.LayerNorm(config.embed_dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
+ ) -> Tuple[torch.Tensor]:
+ embeddings, output_dimensions = self.patch_embeddings(pixel_values)
+ embeddings = self.norm(embeddings)
+ batch_size, seq_len, _ = embeddings.size()
+
+ if bool_masked_pos is not None:
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
+ # replace the masked visual tokens by mask_tokens
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ if self.position_embeddings is not None:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings, output_dimensions
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings with Swin->Swinv2
+class Swinv2PatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.embed_dim
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def maybe_pad(self, pixel_values, height, width):
+ if width % self.patch_size[1] != 0:
+ pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ if height % self.patch_size[0] != 0:
+ pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
+ pixel_values = nn.functional.pad(pixel_values, pad_values)
+ return pixel_values
+
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
+ _, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ # pad the input to be divisible by self.patch_size, if needed
+ pixel_values = self.maybe_pad(pixel_values, height, width)
+ embeddings = self.projection(pixel_values)
+ _, _, height, width = embeddings.shape
+ output_dimensions = (height, width)
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+
+ return embeddings, output_dimensions
+
+
+class Swinv2PatchMerging(nn.Module):
+ """
+ Patch Merging Layer.
+
+ Args:
+ input_resolution (`Tuple[int]`):
+ Resolution of input feature.
+ dim (`int`):
+ Number of input channels.
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
+ Normalization layer class.
+ """
+
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
+ super().__init__()
+ self.input_resolution = input_resolution
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(2 * dim)
+
+ def maybe_pad(self, input_feature, height, width):
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
+ if should_pad:
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
+ input_feature = nn.functional.pad(input_feature, pad_values)
+
+ return input_feature
+
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
+ height, width = input_dimensions
+ # `dim` is height * width
+ batch_size, dim, num_channels = input_feature.shape
+
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
+ # pad input to be disible by width and height, if needed
+ input_feature = self.maybe_pad(input_feature, height, width)
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
+ # [batch_size, height/2 * width/2, 4*num_channels]
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C]
+
+ input_feature = self.reduction(input_feature)
+ input_feature = self.norm(input_feature)
+
+ return input_feature
+
+
+class Swinv2SelfAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
+ super().__init__()
+ if dim % num_heads != 0:
+ raise ValueError(
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
+ )
+
+ self.num_attention_heads = num_heads
+ self.attention_head_size = int(dim / num_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.window_size = (
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
+ )
+ self.pretrained_window_size = pretrained_window_size
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
+ # mlp to generate continuous relative position bias
+ self.continuous_position_bias_mlp = nn.Sequential(
+ nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False)
+ )
+
+ # get relative_coords_table
+ relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
+ relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
+ relative_coords_table = (
+ torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij"))
+ .permute(1, 2, 0)
+ .contiguous()
+ .unsqueeze(0)
+ ) # [1, 2*window_height - 1, 2*window_width - 1, 2]
+ if pretrained_window_size[0] > 0:
+ relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
+ relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
+ else:
+ relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
+ relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
+ relative_coords_table *= 8 # normalize to -8, 8
+ relative_coords_table = (
+ torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8)
+ )
+ self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
+ coords_flatten = torch.flatten(coords, 1)
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
+ relative_coords[:, :, 0] += self.window_size[0] - 1
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1)
+ self.register_buffer("relative_position_index", relative_position_index, persistent=False)
+
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ batch_size, dim, num_channels = hidden_states.shape
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # cosine attention
+ attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize(
+ key_layer, dim=-1
+ ).transpose(-2, -1)
+ logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp()
+ attention_scores = attention_scores * logit_scale
+ relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view(
+ -1, self.num_attention_heads
+ )
+ # [window_height*window_width,window_height*window_width,num_attention_heads]
+ relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
+ )
+ # [num_attention_heads,window_height*window_width,window_height*window_width]
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in Swinv2Model forward() function)
+ mask_shape = attention_mask.shape[0]
+ attention_scores = attention_scores.view(
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
+ ) + attention_mask.unsqueeze(1).unsqueeze(0)
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swinv2
+class Swinv2SelfOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class Swinv2Attention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
+ super().__init__()
+ self.self = Swinv2SelfAttention(
+ config=config,
+ dim=dim,
+ num_heads=num_heads,
+ window_size=window_size,
+ pretrained_window_size=pretrained_window_size
+ if isinstance(pretrained_window_size, collections.abc.Iterable)
+ else (pretrained_window_size, pretrained_window_size),
+ )
+ self.output = Swinv2SelfOutput(config, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->Swinv2
+class Swinv2Intermediate(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->Swinv2
+class Swinv2Output(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class Swinv2Layer(nn.Module):
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0, pretrained_window_size=0):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.shift_size = shift_size
+ self.window_size = config.window_size
+ self.input_resolution = input_resolution
+ self.set_shift_and_window_size(input_resolution)
+ self.attention = Swinv2Attention(
+ config=config,
+ dim=dim,
+ num_heads=num_heads,
+ window_size=self.window_size,
+ pretrained_window_size=pretrained_window_size
+ if isinstance(pretrained_window_size, collections.abc.Iterable)
+ else (pretrained_window_size, pretrained_window_size),
+ )
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.drop_path = Swinv2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+ self.intermediate = Swinv2Intermediate(config, dim)
+ self.output = Swinv2Output(config, dim)
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+
+ def set_shift_and_window_size(self, input_resolution):
+ target_window_size = (
+ self.window_size
+ if isinstance(self.window_size, collections.abc.Iterable)
+ else (self.window_size, self.window_size)
+ )
+ target_shift_size = (
+ self.shift_size
+ if isinstance(self.shift_size, collections.abc.Iterable)
+ else (self.shift_size, self.shift_size)
+ )
+ window_dim = input_resolution[0].item() if torch.is_tensor(input_resolution[0]) else input_resolution[0]
+ self.window_size = window_dim if window_dim <= target_window_size[0] else target_window_size[0]
+ self.shift_size = (
+ 0
+ if input_resolution
+ <= (
+ self.window_size
+ if isinstance(self.window_size, collections.abc.Iterable)
+ else (self.window_size, self.window_size)
+ )
+ else target_shift_size[0]
+ )
+
+ def get_attn_mask(self, height, width, dtype):
+ if self.shift_size > 0:
+ # calculate attention mask for shifted window multihead self attention
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
+ height_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ width_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ count = 0
+ for height_slice in height_slices:
+ for width_slice in width_slices:
+ img_mask[:, height_slice, width_slice, :] = count
+ count += 1
+
+ mask_windows = window_partition(img_mask, self.window_size)
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+ else:
+ attn_mask = None
+ return attn_mask
+
+ def maybe_pad(self, hidden_states, height, width):
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
+ return hidden_states, pad_values
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ if not always_partition:
+ self.set_shift_and_window_size(input_dimensions)
+ else:
+ pass
+ height, width = input_dimensions
+ batch_size, _, channels = hidden_states.size()
+ shortcut = hidden_states
+
+ # pad hidden_states to multiples of window size
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
+ _, height_pad, width_pad, _ = hidden_states.shape
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ else:
+ shifted_hidden_states = hidden_states
+
+ # partition windows
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
+ attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
+ if attn_mask is not None:
+ attn_mask = attn_mask.to(hidden_states_windows.device)
+
+ attention_outputs = self.attention(
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
+ )
+
+ attention_output = attention_outputs[0]
+
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ attention_windows = shifted_windows
+
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
+ if was_padded:
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
+
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
+ hidden_states = self.layernorm_before(attention_windows)
+ hidden_states = shortcut + self.drop_path(hidden_states)
+
+ layer_output = self.intermediate(hidden_states)
+ layer_output = self.output(layer_output)
+ layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output))
+
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
+ return layer_outputs
+
+
+class Swinv2Stage(nn.Module):
+ def __init__(
+ self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0
+ ):
+ super().__init__()
+ self.config = config
+ self.dim = dim
+ self.blocks = nn.ModuleList(
+ [
+ Swinv2Layer(
+ config=config,
+ dim=dim,
+ input_resolution=input_resolution,
+ num_heads=num_heads,
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
+ pretrained_window_size=pretrained_window_size,
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ # Copied from transformers.models.swin.modeling_swin.SwinStage.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ height, width = input_dimensions
+ for i, layer_module in enumerate(self.blocks):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
+ else:
+ output_dimensions = (height, width, height, width)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
+
+ if output_attentions:
+ stage_outputs += layer_outputs[1:]
+ return stage_outputs
+
+
+class Swinv2Encoder(nn.Module):
+ def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)):
+ super().__init__()
+ self.num_layers = len(config.depths)
+ self.config = config
+ if self.config.pretrained_window_sizes is not None:
+ pretrained_window_sizes = config.pretrained_window_sizes
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+ self.layers = nn.ModuleList(
+ [
+ Swinv2Stage(
+ config=config,
+ dim=int(config.embed_dim * 2**i_layer),
+ input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
+ depth=config.depths[i_layer],
+ num_heads=config.num_heads[i_layer],
+ drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
+ downsample=Swinv2PatchMerging if (i_layer < self.num_layers - 1) else None,
+ pretrained_window_size=pretrained_window_sizes[i_layer],
+ )
+ for i_layer in range(self.num_layers)
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ # Copied from transformers.models.swin.modeling_swin.SwinEncoder.forward with SwinEncoderOutput->Swinv2EncoderOutput
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, Swinv2EncoderOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if output_hidden_states:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, layer_module in enumerate(self.layers):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+ hidden_states_before_downsampling = layer_outputs[1]
+ output_dimensions = layer_outputs[2]
+
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
+ # rearrange b (h w) c -> b c h w
+ # here we use the original (not downsampled) height and width
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
+ )
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange b (h w) c -> b c h w
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if output_attentions:
+ all_self_attentions += layer_outputs[3:]
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+
+ return Swinv2EncoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ reshaped_hidden_states=all_reshaped_hidden_states,
+ )
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->Swinv2,swin->swinv2
+class Swinv2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Swinv2Config
+ base_model_prefix = "swinv2"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, Swinv2Encoder):
+ module.gradient_checkpointing = value
+
+
+SWINV2_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`Swinv2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SWINV2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
+ for details.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Swinv2 Model transformer outputting raw hidden-states without any specific head on top.",
+ SWINV2_START_DOCSTRING,
+)
+# Copied from transformers.models.swin.modeling_swin.SwinModel with SWIN->SWINV2,Swin->Swinv2
+class Swinv2Model(Swinv2PreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
+ super().__init__(config)
+ self.config = config
+ self.num_layers = len(config.depths)
+ self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
+
+ self.embeddings = Swinv2Embeddings(config, use_mask_token=use_mask_token)
+ self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid)
+
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Swinv2ModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Swinv2ModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, len(self.config.depths))
+
+ embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ input_dimensions,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+
+ pooled_output = None
+ if self.pooler is not None:
+ pooled_output = self.pooler(sequence_output.transpose(1, 2))
+ pooled_output = torch.flatten(pooled_output, 1)
+
+ if not return_dict:
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return output
+
+ return Swinv2ModelOutput(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """Swinv2 Model with a decoder on top for masked image modeling, as proposed in
+[SimMIM](https://arxiv.org/abs/2111.09886).
+
+
+
+ Note that we provide a script to pre-train this model on custom data in our [examples
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
+
+
+ """,
+ SWINV2_START_DOCSTRING,
+)
+# Copied from transformers.models.swin.modeling_swin.SwinForMaskedImageModeling with swin->swinv2, base-simmim-window6-192->tiny-patch4-window8-256,SWIN->SWINV2,Swin->Swinv2,192->256
+class Swinv2ForMaskedImageModeling(Swinv2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.swinv2 = Swinv2Model(config, add_pooling_layer=False, use_mask_token=True)
+
+ num_features = int(config.embed_dim * 2 ** (config.num_layers - 1))
+ self.decoder = nn.Sequential(
+ nn.Conv2d(
+ in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
+ ),
+ nn.PixelShuffle(config.encoder_stride),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Swinv2MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Swinv2MaskedImageModelingOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
+ >>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
+
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> # create random boolean mask of shape (batch_size, num_patches)
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
+ >>> list(reconstructed_pixel_values.shape)
+ [1, 3, 256, 256]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.swinv2(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ # Reshape to (batch_size, num_channels, height, width)
+ sequence_output = sequence_output.transpose(1, 2)
+ batch_size, num_channels, sequence_length = sequence_output.shape
+ height = width = math.floor(sequence_length**0.5)
+ sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
+
+ # Reconstruct pixel values
+ reconstructed_pixel_values = self.decoder(sequence_output)
+
+ masked_im_loss = None
+ if bool_masked_pos is not None:
+ size = self.config.image_size // self.config.patch_size
+ bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
+ mask = (
+ bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
+ .repeat_interleave(self.config.patch_size, 2)
+ .unsqueeze(1)
+ .contiguous()
+ )
+ reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
+ masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
+
+ if not return_dict:
+ output = (reconstructed_pixel_values,) + outputs[2:]
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
+
+ return Swinv2MaskedImageModelingOutput(
+ loss=masked_im_loss,
+ reconstruction=reconstructed_pixel_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ Swinv2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
+ of the [CLS] token) e.g. for ImageNet.
+ """,
+ SWINV2_START_DOCSTRING,
+)
+# Copied from transformers.models.swin.modeling_swin.SwinForImageClassification with SWIN->SWINV2,Swin->Swinv2,swin->swinv2
+class Swinv2ForImageClassification(Swinv2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.swinv2 = Swinv2Model(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(self.swinv2.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(SWINV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=Swinv2ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Swinv2ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.swinv2(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Swinv2ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c09b683a3462564069a62157cd92fa674ae4ccd
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_time_series_transformer": [
+ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TimeSeriesTransformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_time_series_transformer"] = [
+ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TimeSeriesTransformerForPrediction",
+ "TimeSeriesTransformerModel",
+ "TimeSeriesTransformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_time_series_transformer import (
+ TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TimeSeriesTransformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_time_series_transformer import (
+ TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TimeSeriesTransformerForPrediction,
+ TimeSeriesTransformerModel,
+ TimeSeriesTransformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cbc4eb3e1d3a7aa0a8b991459ec2ce58dadba22c
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..161168fdb1231bea5cca19449dfa7d308456570d
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..515c49783b7021dd6b27f8ab6d1804042b4b87dc
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9676b50ed0b954c2555b1c9e04bd504c1906a941
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py
@@ -0,0 +1,232 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Time Series Transformer model configuration"""
+
+from typing import List, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "huggingface/time-series-transformer-tourism-monthly": (
+ "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
+ ),
+ # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
+}
+
+
+class TimeSeriesTransformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to
+ instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series
+ Transformer
+ [huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ prediction_length (`int`):
+ The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
+ typically dictated by the dataset and we recommend to set it appropriately.
+ context_length (`int`, *optional*, defaults to `prediction_length`):
+ The context length for the encoder. If `None`, the context length will be the same as the
+ `prediction_length`.
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
+ loss (`string`, *optional*, defaults to `"nll"`):
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
+ input_size (`int`, *optional*, defaults to 1):
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
+ multivariate targets.
+ scaling (`string` or `bool`, *optional* defaults to `"mean"`):
+ Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
+ scaler is set to "mean".
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
+ The lags of the input time series as covariates often dictated by the frequency of the data. Default is
+ `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
+ num_time_features (`int`, *optional*, defaults to 0):
+ The number of time features in the input time series.
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
+ The number of dynamic real valued features.
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
+ The number of static categorical features.
+ num_static_real_features (`int`, *optional*, defaults to 0):
+ The number of static real valued features.
+ cardinality (`list[int]`, *optional*):
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ embedding_dimension (`list[int]`, *optional*):
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ d_model (`int`, *optional*, defaults to 64):
+ Dimensionality of the transformer layers.
+ encoder_layers (`int`, *optional*, defaults to 2):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 2):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
+ `"relu"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder, and decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each encoder layer.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each decoder layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability used between the two layers of the feed-forward networks.
+ num_parallel_samples (`int`, *optional*, defaults to 100):
+ The number of samples to generate in parallel for each time step of inference.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated normal weight initialization distribution.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
+
+ Example:
+
+ ```python
+ >>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
+
+ >>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
+ >>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
+
+ >>> # Randomly initializing a model (with random weights) from the configuration
+ >>> model = TimeSeriesTransformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+ model_type = "time_series_transformer"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ "num_hidden_layers": "encoder_layers",
+ }
+
+ def __init__(
+ self,
+ prediction_length: Optional[int] = None,
+ context_length: Optional[int] = None,
+ distribution_output: str = "student_t",
+ loss: str = "nll",
+ input_size: int = 1,
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
+ scaling: Optional[Union[str, bool]] = "mean",
+ num_dynamic_real_features: int = 0,
+ num_static_categorical_features: int = 0,
+ num_static_real_features: int = 0,
+ num_time_features: int = 0,
+ cardinality: Optional[List[int]] = None,
+ embedding_dimension: Optional[List[int]] = None,
+ encoder_ffn_dim: int = 32,
+ decoder_ffn_dim: int = 32,
+ encoder_attention_heads: int = 2,
+ decoder_attention_heads: int = 2,
+ encoder_layers: int = 2,
+ decoder_layers: int = 2,
+ is_encoder_decoder: bool = True,
+ activation_function: str = "gelu",
+ d_model: int = 64,
+ dropout: float = 0.1,
+ encoder_layerdrop: float = 0.1,
+ decoder_layerdrop: float = 0.1,
+ attention_dropout: float = 0.1,
+ activation_dropout: float = 0.1,
+ num_parallel_samples: int = 100,
+ init_std: float = 0.02,
+ use_cache=True,
+ **kwargs,
+ ):
+ # time series specific configuration
+ self.prediction_length = prediction_length
+ self.context_length = context_length or prediction_length
+ self.distribution_output = distribution_output
+ self.loss = loss
+ self.input_size = input_size
+ self.num_time_features = num_time_features
+ self.lags_sequence = lags_sequence
+ self.scaling = scaling
+ self.num_dynamic_real_features = num_dynamic_real_features
+ self.num_static_real_features = num_static_real_features
+ self.num_static_categorical_features = num_static_categorical_features
+ if cardinality and num_static_categorical_features > 0:
+ if len(cardinality) != num_static_categorical_features:
+ raise ValueError(
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.cardinality = cardinality
+ else:
+ self.cardinality = [0]
+ if embedding_dimension and num_static_categorical_features > 0:
+ if len(embedding_dimension) != num_static_categorical_features:
+ raise ValueError(
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.embedding_dimension = embedding_dimension
+ else:
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
+ self.num_parallel_samples = num_parallel_samples
+
+ # Transformer architecture configuration
+ self.feature_size = input_size * len(lags_sequence) + self._number_of_features
+ self.d_model = d_model
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_attention_heads = decoder_attention_heads
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.decoder_layers = decoder_layers
+
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+
+ self.activation_function = activation_function
+ self.init_std = init_std
+
+ self.use_cache = use_cache
+
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def _number_of_features(self) -> int:
+ return (
+ sum(self.embedding_dimension)
+ + self.num_dynamic_real_features
+ + self.num_time_features
+ + self.num_static_real_features
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..2caca5bd1051319d1c164fb846ffca8205524936
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py
@@ -0,0 +1,1834 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Time Series Transformer model."""
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ SampleTSPredictionOutput,
+ Seq2SeqTSModelOutput,
+ Seq2SeqTSPredictionOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_time_series_transformer import TimeSeriesTransformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TimeSeriesTransformerConfig"
+
+
+TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "huggingface/time-series-transformer-tourism-monthly",
+ # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
+]
+
+
+class TimeSeriesFeatureEmbedder(nn.Module):
+ """
+ Embed a sequence of categorical features.
+
+ Args:
+ cardinalities (`list[int]`):
+ List of cardinalities of the categorical features.
+ embedding_dims (`list[int]`):
+ List of embedding dimensions of the categorical features.
+ """
+
+ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
+ super().__init__()
+
+ self.num_features = len(cardinalities)
+ self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ if self.num_features > 1:
+ # we slice the last dimension, giving an array of length
+ # self.num_features with shape (N,T) or (N)
+ cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
+ else:
+ cat_feature_slices = [features]
+
+ return torch.cat(
+ [
+ embed(cat_feature_slice.squeeze(-1))
+ for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
+ ],
+ dim=-1,
+ )
+
+
+class TimeSeriesStdScaler(nn.Module):
+ """
+ Standardize features by calculating the mean and scaling along some given dimension `dim`, and then normalizes it
+ by subtracting from the mean and dividing by the standard deviation.
+
+ Args:
+ dim (`int`):
+ Dimension along which to calculate the mean and standard deviation.
+ keepdim (`bool`, *optional*, defaults to `False`):
+ Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.
+ minimum_scale (`float`, *optional*, defaults to 1e-5):
+ Default scale that is used for elements that are constantly zero along dimension `dim`.
+ """
+
+ def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-5):
+ super().__init__()
+ if not dim > 0:
+ raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0")
+ self.dim = dim
+ self.keepdim = keepdim
+ self.minimum_scale = minimum_scale
+
+ @torch.no_grad()
+ def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ denominator = weights.sum(self.dim, keepdim=self.keepdim)
+ denominator = denominator.clamp_min(1.0)
+ loc = (data * weights).sum(self.dim, keepdim=self.keepdim) / denominator
+
+ variance = (((data - loc) * weights) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
+ scale = torch.sqrt(variance + self.minimum_scale)
+ return (data - loc) / scale, loc, scale
+
+
+class TimeSeriesMeanScaler(nn.Module):
+ """
+ Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data
+ accordingly.
+
+ Args:
+ dim (`int`):
+ Dimension along which to compute the scale.
+ keepdim (`bool`, *optional*, defaults to `False`):
+ Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.
+ default_scale (`float`, *optional*, defaults to `None`):
+ Default scale that is used for elements that are constantly zero. If `None`, we use the scale of the batch.
+ minimum_scale (`float`, *optional*, defaults to 1e-10):
+ Default minimum possible scale that is used for any item.
+ """
+
+ def __init__(
+ self, dim: int = -1, keepdim: bool = True, default_scale: Optional[float] = None, minimum_scale: float = 1e-10
+ ):
+ super().__init__()
+ self.dim = dim
+ self.keepdim = keepdim
+ self.minimum_scale = minimum_scale
+ self.default_scale = default_scale
+
+ @torch.no_grad()
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ # shape: (N, [C], T=1)
+ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
+ num_observed = observed_indicator.sum(self.dim, keepdim=True)
+
+ scale = ts_sum / torch.clamp(num_observed, min=1)
+
+ # If `default_scale` is provided, we use it, otherwise we use the scale
+ # of the batch.
+ if self.default_scale is None:
+ batch_sum = ts_sum.sum(dim=0)
+ batch_observations = torch.clamp(num_observed.sum(0), min=1)
+ default_scale = torch.squeeze(batch_sum / batch_observations)
+ else:
+ default_scale = self.default_scale * torch.ones_like(scale)
+
+ # apply default scale where there are no observations
+ scale = torch.where(num_observed > 0, scale, default_scale)
+
+ # ensure the scale is at least `self.minimum_scale`
+ scale = torch.clamp(scale, min=self.minimum_scale)
+ scaled_data = data / scale
+
+ if not self.keepdim:
+ scale = scale.squeeze(dim=self.dim)
+
+ return scaled_data, torch.zeros_like(scale), scale
+
+
+class TimeSeriesNOPScaler(nn.Module):
+ """
+ Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data.
+
+ Args:
+ dim (`int`):
+ Dimension along which to compute the scale.
+ keepdim (`bool`, *optional*, defaults to `False`):
+ Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it.
+ """
+
+ def __init__(self, dim: int, keepdim: bool = False):
+ super().__init__()
+ self.dim = dim
+ self.keepdim = keepdim
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ return data, loc, scale
+
+
+def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
+ """
+ Computes the negative log likelihood loss from input distribution with respect to target.
+ """
+ return -input.log_prob(target)
+
+
+def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
+ """
+ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
+ meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
+
+ Args:
+ input_tensor (`torch.FloatTensor`):
+ Input tensor, of which the average must be computed.
+ weights (`torch.FloatTensor`, *optional*):
+ Weights tensor, of the same shape as `input_tensor`.
+ dim (`int`, *optional*):
+ The dim along which to average `input_tensor`.
+
+ Returns:
+ `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
+ """
+ if weights is not None:
+ weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
+ sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
+ return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
+ else:
+ return input_tensor.mean(dim=dim)
+
+
+# Copied from transformers.models.bart.modeling_bart._make_causal_mask
+def _make_causal_mask(
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
+):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz, tgt_len = input_ids_shape
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
+ mask_cond = torch.arange(mask.size(-1), device=device)
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
+ mask = mask.to(dtype)
+
+ if past_key_values_length > 0:
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
+
+
+# Copied from transformers.models.bart.modeling_bart._expand_mask
+def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ bsz, src_len = mask.size()
+ tgt_len = tgt_len if tgt_len is not None else src_len
+
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
+
+ inverted_mask = 1.0 - expanded_mask
+
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
+
+
+# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->TimeSeries
+class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
+ super().__init__(num_positions, embedding_dim)
+ self.weight = self._init_weight(self.weight)
+
+ @staticmethod
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
+ """
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
+ the 2nd half of the vector. [dim // 2:]
+ """
+ n_pos, dim = out.shape
+ position_enc = np.array(
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
+ )
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+ return out
+
+ @torch.no_grad()
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+class TimeSeriesValueEmbedding(nn.Module):
+ def __init__(self, feature_size, d_model):
+ super().__init__()
+ self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
+
+ def forward(self, x):
+ return self.value_projection(x)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->TimeSeriesTransformer
+class TimeSeriesTransformerAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer
+class TimeSeriesTransformerEncoderLayer(nn.Module):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = TimeSeriesTransformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer
+class TimeSeriesTransformerDecoderLayer(nn.Module):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = TimeSeriesTransformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = TimeSeriesTransformerAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class TimeSeriesTransformerPreTrainedModel(PreTrainedModel):
+ config_class = TimeSeriesTransformerConfig
+ base_model_prefix = "model"
+ main_input_name = "past_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, TimeSeriesSinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (TimeSeriesTransformerDecoder, TimeSeriesTransformerEncoder)):
+ module.gradient_checkpointing = value
+
+
+TIME_SERIES_TRANSFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TimeSeriesTransformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size of
+ this tensor must be larger than the `context_length` of the model, since the model will use the larger size
+ to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
+ context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
+ `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
+ the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
+ `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step. Holiday features are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to the
+ values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
+ Future values of the time series, that serve as labels for the model. The `future_values` is what the
+ Transformer needs during training to learn to output, given the `past_values`.
+
+ The sequence length here is equal to `prediction_length`.
+
+ See the demo notebook and code snippets for details.
+
+ Optionally, during training any missing values need to be replaced with zeros and indicated via the
+ `future_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to `future_values`.
+ These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
+ Fourier features). These could also be so-called "age" features, which basically help the model know "at
+ which point in life" a time-series is. Age features have small values for distant past time steps and
+ increase monotonically the more we approach the current time step. Holiday features are also a good example
+ of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ This mask is used to filter out missing values for the final loss calculation.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to
+ make sure the model can only look at previous inputs in order to predict the future.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TimeSeriesTransformerEncoderLayer`].
+
+ Args:
+ config: TimeSeriesTransformerConfig
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size())
+
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(encoder_layer),
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
+ [`TimeSeriesTransformerDecoderLayer`]
+
+ Args:
+ config: TimeSeriesTransformerConfig
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
+ # create causal mask
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ combined_attention_mask = None
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(
+ input_shape,
+ inputs_embeds.dtype,
+ device=inputs_embeds.device,
+ past_key_values_length=past_key_values_length,
+ )
+
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
+ inputs_embeds.device
+ )
+ combined_attention_mask = (
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
+ )
+
+ return combined_attention_mask
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = inputs_embeds.size()[:-1]
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ attention_mask = self._prepare_decoder_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ # None for past_key_value
+ return module(*inputs, output_attentions, use_cache)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(decoder_layer),
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Time Series Transformer Model outputting raw hidden-states without any specific head on top.",
+ TIME_SERIES_TRANSFORMER_START_DOCSTRING,
+)
+class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+
+ if config.scaling == "mean" or config.scaling is True:
+ self.scaler = TimeSeriesMeanScaler(dim=1, keepdim=True)
+ elif config.scaling == "std":
+ self.scaler = TimeSeriesStdScaler(dim=1, keepdim=True)
+ else:
+ self.scaler = TimeSeriesNOPScaler(dim=1, keepdim=True)
+
+ if config.num_static_categorical_features > 0:
+ self.embedder = TimeSeriesFeatureEmbedder(
+ cardinalities=config.cardinality,
+ embedding_dims=config.embedding_dimension,
+ )
+
+ # transformer encoder-decoder and mask initializer
+ self.encoder = TimeSeriesTransformerEncoder(config)
+ self.decoder = TimeSeriesTransformerDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def _past_length(self) -> int:
+ return self.config.context_length + max(self.config.lags_sequence)
+
+ def get_lagged_subsequences(
+ self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0
+ ) -> torch.Tensor:
+ """
+ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
+ where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
+ j, :, k] = sequence[i, -indices[k]-S+j, :].
+
+ Args:
+ sequence: Tensor
+ The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
+ subsequences_length : int
+ Length of the subsequences to be extracted.
+ shift: int
+ Shift the lags by this amount back.
+ """
+ sequence_length = sequence.shape[1]
+ indices = [lag - shift for lag in self.config.lags_sequence]
+
+ if max(indices) + subsequences_length > sequence_length:
+ raise ValueError(
+ f"lags cannot go further than history length, found lag {max(indices)} "
+ f"while history length is only {sequence_length}"
+ )
+
+ lagged_values = []
+ for lag_index in indices:
+ begin_index = -lag_index - subsequences_length
+ end_index = -lag_index if lag_index > 0 else None
+ lagged_values.append(sequence[:, begin_index:end_index, ...])
+ return torch.stack(lagged_values, dim=-1)
+
+ def create_network_inputs(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ ):
+ # time feature
+ time_feat = (
+ torch.cat(
+ (
+ past_time_features[:, self._past_length - self.config.context_length :, ...],
+ future_time_features,
+ ),
+ dim=1,
+ )
+ if future_values is not None
+ else past_time_features[:, self._past_length - self.config.context_length :, ...]
+ )
+
+ # target
+ if past_observed_mask is None:
+ past_observed_mask = torch.ones_like(past_values)
+
+ context = past_values[:, -self.config.context_length :]
+ observed_context = past_observed_mask[:, -self.config.context_length :]
+ _, loc, scale = self.scaler(context, observed_context)
+
+ inputs = (
+ (torch.cat((past_values, future_values), dim=1) - loc) / scale
+ if future_values is not None
+ else (past_values - loc) / scale
+ )
+
+ # static features
+ log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
+ log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
+ static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
+
+ if static_real_features is not None:
+ static_feat = torch.cat((static_real_features, static_feat), dim=1)
+ if static_categorical_features is not None:
+ embedded_cat = self.embedder(static_categorical_features)
+ static_feat = torch.cat((embedded_cat, static_feat), dim=1)
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
+
+ # all features
+ features = torch.cat((expanded_static_feat, time_feat), dim=-1)
+
+ # lagged features
+ subsequences_length = (
+ self.config.context_length + self.config.prediction_length
+ if future_values is not None
+ else self.config.context_length
+ )
+ lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
+ raise ValueError(
+ f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match"
+ )
+
+ # transformer inputs
+ transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1)
+
+ return transformer_inputs, loc, scale, static_feat
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import TimeSeriesTransformerModel
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_inputs, loc, scale, static_feat = self.create_network_inputs(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ )
+
+ if encoder_outputs is None:
+ enc_input = transformer_inputs[:, : self.config.context_length, ...]
+ encoder_outputs = self.encoder(
+ inputs_embeds=enc_input,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ dec_input = transformer_inputs[:, self.config.context_length :, ...]
+ decoder_outputs = self.decoder(
+ inputs_embeds=dec_input,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
+
+ return Seq2SeqTSModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ loc=loc,
+ scale=scale,
+ static_features=static_feat,
+ )
+
+
+@add_start_docstrings(
+ "The Time Series Transformer Model with a distribution head on top for time-series forecasting.",
+ TIME_SERIES_TRANSFORMER_START_DOCSTRING,
+)
+class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+ self.model = TimeSeriesTransformerModel(config)
+ if config.distribution_output == "student_t":
+ self.distribution_output = StudentTOutput(dim=config.input_size)
+ elif config.distribution_output == "normal":
+ self.distribution_output = NormalOutput(dim=config.input_size)
+ elif config.distribution_output == "negative_binomial":
+ self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
+ else:
+ raise ValueError(f"Unknown distribution output {config.distribution_output}")
+
+ self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
+ self.target_shape = self.distribution_output.event_shape
+
+ if config.loss == "nll":
+ self.loss = nll
+ else:
+ raise ValueError(f"Unknown loss function {config.loss}")
+
+ # Initialize weights of distribution_output and apply final processing
+ self.post_init()
+
+ def output_params(self, dec_output):
+ return self.parameter_projection(dec_output)
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ @torch.jit.ignore
+ def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
+ sliced_params = params
+ if trailing_n is not None:
+ sliced_params = [p[:, -trailing_n:] for p in params]
+ return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
+
+ @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ future_observed_mask: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import TimeSeriesTransformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = TimeSeriesTransformerForPrediction.from_pretrained(
+ ... "huggingface/time-series-transformer-tourism-monthly"
+ ... )
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # during inference, one only provides past values
+ >>> # as well as possible additional features
+ >>> # the model autoregressively generates future values
+ >>> outputs = model.generate(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> mean_prediction = outputs.sequences.mean(dim=1)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if future_values is not None:
+ use_cache = False
+
+ outputs = self.model(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ prediction_loss = None
+ params = None
+ if future_values is not None:
+ params = self.output_params(outputs[0]) # outputs.last_hidden_state
+ # loc is 3rd last and scale is 2nd last output
+ distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
+
+ loss = self.loss(distribution, future_values)
+
+ if future_observed_mask is None:
+ future_observed_mask = torch.ones_like(future_values)
+
+ if len(self.target_shape) == 0:
+ loss_weights = future_observed_mask
+ else:
+ loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
+
+ prediction_loss = weighted_average(loss, weights=loss_weights)
+
+ if not return_dict:
+ outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:]
+ return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
+
+ return Seq2SeqTSPredictionOutput(
+ loss=prediction_loss,
+ params=params,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loc=outputs.loc,
+ scale=outputs.scale,
+ static_features=outputs.static_features,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ future_time_features: torch.Tensor,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> SampleTSPredictionOutput:
+ r"""
+ Greedily generate sequences of sample predictions from a model with a probability distribution head.
+
+ Parameters:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size
+ of this tensor must be larger than the `context_length` of the model, since the model will use the
+ larger size to construct lag features, i.e. additional values from the past which are added in order to
+ serve as "extra context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
+ no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
+ of the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features,
+ such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
+ of variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things
+ like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
+ These could also be so-called "age" features, which basically help the model know "at which point in
+ life" a time-series is. Age features have small values for distant past time steps and increase
+ monotonically the more we approach the current time step. Holiday features are also a good example of
+ time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to sampled
+ predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
+ (for instance as Fourier features). These could also be so-called "age" features, which basically help
+ the model know "at which point in life" a time-series is. Age features have small values for distant
+ past time steps and increase monotonically the more we approach the current time step. Holiday features
+ are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to
+ the values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over
+ time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+
+ Return:
+ [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
+ samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
+ multivariate predictions.
+ """
+ outputs = self(
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ past_time_features=past_time_features,
+ past_values=past_values,
+ past_observed_mask=past_observed_mask,
+ future_time_features=future_time_features,
+ future_values=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ use_cache=True,
+ )
+
+ decoder = self.model.get_decoder()
+ enc_last_hidden = outputs.encoder_last_hidden_state
+ loc = outputs.loc
+ scale = outputs.scale
+ static_feat = outputs.static_features
+
+ num_parallel_samples = self.config.num_parallel_samples
+ repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
+ repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_past_values = (
+ past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
+ ) / repeated_scale
+
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
+ features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
+ repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ future_samples = []
+
+ # greedy decoding
+ for k in range(self.config.prediction_length):
+ lagged_sequence = self.model.get_lagged_subsequences(
+ sequence=repeated_past_values,
+ subsequences_length=1 + k,
+ shift=1,
+ )
+
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1)
+
+ dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
+ dec_last_hidden = dec_output.last_hidden_state
+
+ params = self.parameter_projection(dec_last_hidden[:, -1:])
+ distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
+ next_sample = distr.sample()
+
+ repeated_past_values = torch.cat(
+ (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1
+ )
+ future_samples.append(next_sample)
+
+ concat_future_samples = torch.cat(future_samples, dim=1)
+
+ return SampleTSPredictionOutput(
+ sequences=concat_future_samples.reshape(
+ (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
+ )
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__init__.py b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..35081cfcdef97b99e1a3cc29461fa07c80f31ab8
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__init__.py
@@ -0,0 +1,70 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_wav2vec2_conformer": [
+ "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "Wav2Vec2ConformerConfig",
+ ],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_wav2vec2_conformer"] = [
+ "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_wav2vec2_conformer import (
+ WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ Wav2Vec2ConformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_wav2vec2_conformer import (
+ WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ Wav2Vec2ConformerForAudioFrameClassification,
+ Wav2Vec2ConformerForCTC,
+ Wav2Vec2ConformerForPreTraining,
+ Wav2Vec2ConformerForSequenceClassification,
+ Wav2Vec2ConformerForXVector,
+ Wav2Vec2ConformerModel,
+ Wav2Vec2ConformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/configuration_wav2vec2_conformer.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/configuration_wav2vec2_conformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e787136f0286841de21b96378effe8587b283c86
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/configuration_wav2vec2_conformer.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/modeling_wav2vec2_conformer.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/modeling_wav2vec2_conformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a83121c05e48524dec1997f8ab0a4fd1c5bc0c99
Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/__pycache__/modeling_wav2vec2_conformer.cpython-310.pyc differ
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..24b7dca73944d1d7f7e9d609d6ac2a4d7ec6100f
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py
@@ -0,0 +1,360 @@
+# coding=utf-8
+# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Wav2Vec2Conformer model configuration"""
+
+import functools
+import operator
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/wav2vec2-conformer-rel-pos-large": (
+ "https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large/resolve/main/config.json"
+ ),
+}
+
+
+class Wav2Vec2ConformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Wav2Vec2ConformerModel`]. It is used to
+ instantiate an Wav2Vec2Conformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Conformer
+ [facebook/wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*):
+ Vocabulary size of the Wav2Vec2Conformer model. Defines the number of different tokens that can be
+ represented by the `inputs_ids` passed when calling [`Wav2Vec2ConformerModel`]. Vocabulary size of the
+ model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward
+ method of [`Wav2Vec2ConformerModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ final_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the final projection layer of [`Wav2Vec2ConformerForCTC`].
+ layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
+ details.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
+ convolutional layers.
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for output of the feature encoder.
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probabilitiy for quantized feature encoder states.
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
+ *conv_dim*.
+ conv_bias (`bool`, *optional*, defaults to `False`):
+ Whether the 1D convolutional layers have a bias.
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
+ embeddings layer.
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
+ Number of groups of 1D convolutional positional embeddings layer.
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
+ Recognition](https://arxiv.org/abs/1904.08779).
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
+ mask_time_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the time axis.
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
+ mask_time_min_masks''
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
+ True`.
+ mask_feature_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the feature axis.
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
+ step, irrespectively of `mask_feature_prob`. Only relevant if
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
+ num_codevectors_per_group (`int`, *optional*, defaults to 320):
+ Number of entries in each quantization codebook (group).
+ num_codevector_groups (`int`, *optional*, defaults to 2):
+ Number of codevector groups for product codevector quantization.
+ contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
+ The temperature *kappa* in the contrastive loss.
+ feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probabilitiy for the output of the feature encoder that's used by the quantizer.
+ num_negatives (`int`, *optional*, defaults to 100):
+ Number of negative samples for the contrastive loss.
+ codevector_dim (`int`, *optional*, defaults to 256):
+ Dimensionality of the quantized feature vectors.
+ proj_codevector_dim (`int`, *optional*, defaults to 256):
+ Dimensionality of the final projection of both the quantized and the transformer features.
+ diversity_loss_weight (`int`, *optional*, defaults to 0.1):
+ The weight of the codebook diversity loss component.
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
+ instance of [`Wav2Vec2ConformerForCTC`].
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
+ of [`Wav2Vec2ConformerForCTC`].
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
+ instance of [`Wav2Vec2ConformerForSequenceClassification`].
+ classifier_proj_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the projection before token mean-pooling for classification.
+ tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
+ A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
+ module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
+ tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
+ *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
+ tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
+ A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
+ *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
+ xvector_output_dim (`int`, *optional*, defaults to 512):
+ Dimensionality of the *XVector* embedding vectors.
+ add_adapter (`bool`, *optional*, defaults to `False`):
+ Whether a convolutional network should be stacked on top of the Wav2Vec2Conformer Encoder. Can be very
+ useful for warm-starting Wav2Vec2Conformer for SpeechEncoderDecoder models.
+ adapter_kernel_size (`int`, *optional*, defaults to 3):
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
+ adapter_stride (`int`, *optional*, defaults to 2):
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
+ num_adapter_layers (`int`, *optional*, defaults to 3):
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
+ True`.
+ output_hidden_size (`int`, *optional*):
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
+ if `add_adapter is True`.
+ position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
+ Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
+ `None` no relative position embedding is applied.
+ rotary_embedding_base (`int`, *optional*, defaults to 10000):
+ If `"rotary"` position embeddings are used, defines the size of the embedding base.
+ max_source_positions (`int`, *optional*, defaults to 5000):
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
+ conv_depthwise_kernel_size (`int`, defaults to 31):
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks.
+ conformer_conv_dropout (`float`, defaults to 0.1):
+ The dropout probability for all convolutional layers in Conformer blocks.
+
+ Example:
+
+ ```python
+ >>> from transformers import Wav2Vec2ConformerConfig, Wav2Vec2ConformerModel
+
+ >>> # Initializing a Wav2Vec2Conformer facebook/wav2vec2-conformer-rel-pos-large style configuration
+ >>> configuration = Wav2Vec2ConformerConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/wav2vec2-conformer-rel-pos-large style configuration
+ >>> model = Wav2Vec2ConformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+ model_type = "wav2vec2-conformer"
+
+ def __init__(
+ self,
+ vocab_size=None,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout=0.1,
+ activation_dropout=0.1,
+ attention_dropout=0.1,
+ feat_proj_dropout=0.0,
+ feat_quantizer_dropout=0.0,
+ final_dropout=0.1,
+ layerdrop=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ feat_extract_norm="group",
+ feat_extract_activation="gelu",
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
+ conv_bias=False,
+ num_conv_pos_embeddings=128,
+ num_conv_pos_embedding_groups=16,
+ apply_spec_augment=True,
+ mask_time_prob=0.05,
+ mask_time_length=10,
+ mask_time_min_masks=2,
+ mask_feature_prob=0.0,
+ mask_feature_length=10,
+ mask_feature_min_masks=0,
+ num_codevectors_per_group=320,
+ num_codevector_groups=2,
+ contrastive_logits_temperature=0.1,
+ num_negatives=100,
+ codevector_dim=256,
+ proj_codevector_dim=256,
+ diversity_loss_weight=0.1,
+ ctc_loss_reduction="sum",
+ ctc_zero_infinity=False,
+ use_weighted_layer_sum=False,
+ classifier_proj_size=256,
+ tdnn_dim=(512, 512, 512, 512, 1500),
+ tdnn_kernel=(5, 3, 3, 1, 1),
+ tdnn_dilation=(1, 2, 3, 1, 1),
+ xvector_output_dim=512,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ add_adapter=False,
+ adapter_kernel_size=3,
+ adapter_stride=2,
+ num_adapter_layers=3,
+ output_hidden_size=None,
+ position_embeddings_type="relative",
+ rotary_embedding_base=10000,
+ max_source_positions=5000,
+ conv_depthwise_kernel_size=31,
+ conformer_conv_dropout=0.1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
+ self.hidden_size = hidden_size
+ self.feat_extract_norm = feat_extract_norm
+ self.feat_extract_activation = feat_extract_activation
+ self.conv_dim = list(conv_dim)
+ self.conv_stride = list(conv_stride)
+ self.conv_kernel = list(conv_kernel)
+ self.conv_bias = conv_bias
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.num_attention_heads = num_attention_heads
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.feat_proj_dropout = feat_proj_dropout
+ self.final_dropout = final_dropout
+ self.layerdrop = layerdrop
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ self.vocab_size = vocab_size
+ self.use_weighted_layer_sum = use_weighted_layer_sum
+ self.max_source_positions = max_source_positions
+ self.position_embeddings_type = position_embeddings_type
+ self.rotary_embedding_base = rotary_embedding_base
+
+ if (
+ (len(self.conv_stride) != self.num_feat_extract_layers)
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
+ ):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
+ )
+
+ # Conformer-block related
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
+ self.conformer_conv_dropout = conformer_conv_dropout
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = apply_spec_augment
+ self.mask_time_prob = mask_time_prob
+ self.mask_time_length = mask_time_length
+ self.mask_time_min_masks = mask_time_min_masks
+ self.mask_feature_prob = mask_feature_prob
+ self.mask_feature_length = mask_feature_length
+ self.mask_feature_min_masks = mask_feature_min_masks
+
+ # parameters for pretraining with codevector quantized representations
+ self.num_codevectors_per_group = num_codevectors_per_group
+ self.num_codevector_groups = num_codevector_groups
+ self.contrastive_logits_temperature = contrastive_logits_temperature
+ self.feat_quantizer_dropout = feat_quantizer_dropout
+ self.num_negatives = num_negatives
+ self.codevector_dim = codevector_dim
+ self.proj_codevector_dim = proj_codevector_dim
+ self.diversity_loss_weight = diversity_loss_weight
+
+ # ctc loss
+ self.ctc_loss_reduction = ctc_loss_reduction
+ self.ctc_zero_infinity = ctc_zero_infinity
+
+ # adapter
+ self.add_adapter = add_adapter
+ self.adapter_kernel_size = adapter_kernel_size
+ self.adapter_stride = adapter_stride
+ self.num_adapter_layers = num_adapter_layers
+ self.output_hidden_size = output_hidden_size or hidden_size
+
+ # SequenceClassification-specific parameter. Feel free to ignore for other classes.
+ self.classifier_proj_size = classifier_proj_size
+
+ # XVector-specific parameters. Feel free to ignore for other classes.
+ self.tdnn_dim = list(tdnn_dim)
+ self.tdnn_kernel = list(tdnn_kernel)
+ self.tdnn_dilation = list(tdnn_dilation)
+ self.xvector_output_dim = xvector_output_dim
+
+ @property
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a882e95aba533ae1d37497ca74acd232ac39bc5
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/convert_wav2vec2_conformer_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,310 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Wav2Vec2Conformer checkpoint."""
+
+
+import argparse
+import json
+import os
+
+import fairseq
+import torch
+from fairseq.data import Dictionary
+
+from transformers import (
+ Wav2Vec2ConformerConfig,
+ Wav2Vec2ConformerForCTC,
+ Wav2Vec2ConformerForPreTraining,
+ Wav2Vec2CTCTokenizer,
+ Wav2Vec2FeatureExtractor,
+ Wav2Vec2Processor,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection.projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
+ "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
+ "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
+ "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
+ "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
+ "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
+ "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
+ "self_attn.rotary_emb": "encoder.embed_positions",
+ "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
+ "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
+ "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
+ "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
+ "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
+ "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
+ "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
+ "ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
+ "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
+ "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
+ "ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
+ "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
+ "quantizer.weight_proj": "quantizer.weight_proj",
+ "quantizer.vars": "quantizer.codevectors",
+ "project_q": "project_q",
+ "final_proj": "project_hid",
+ "w2v_encoder.proj": "lm_head",
+ "mask_emb": "masked_spec_embed",
+}
+TOP_LEVEL_KEYS = [
+ "lm_head",
+ "quantizer.weight_proj",
+ "quantizer.codevectors",
+ "project_q",
+ "project_hid",
+]
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ if hf_shape != value.shape:
+ raise ValueError(
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ elif weight_type == "running_mean":
+ hf_pointer.running_mean.data = value
+ elif weight_type == "running_var":
+ hf_pointer.running_var.data = value
+ elif weight_type == "num_batches_tracked":
+ hf_pointer.num_batches_tracked.data = value
+ elif weight_type == "inv_freq":
+ hf_pointer.inv_freq.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights(fairseq_model, hf_model, is_headless):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.wav2vec2_conformer.feature_extractor
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ mapped_key = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "pos_bias_u" in name:
+ weight_type = None
+ elif "pos_bias_v" in name:
+ weight_type = None
+ elif "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "bias" in name:
+ weight_type = "bias"
+ elif "weight" in name:
+ # TODO: don't match quantizer.weight_proj
+ weight_type = "weight"
+ elif "running_mean" in name:
+ weight_type = "running_mean"
+ elif "inv_freq" in name:
+ weight_type = "inv_freq"
+ elif "running_var" in name:
+ weight_type = "running_var"
+ elif "num_batches_tracked" in name:
+ weight_type = "num_batches_tracked"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+# Copied from transformers.models.wav2vec2.convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.load_conv_layer
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+@torch.no_grad()
+def convert_wav2vec2_conformer_checkpoint(
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ if config_path is not None:
+ config = Wav2Vec2ConformerConfig.from_pretrained(config_path, hidden_act="swish")
+ else:
+ config = Wav2Vec2ConformerConfig()
+
+ if "rope" in checkpoint_path:
+ config.position_embeddings_type = "rotary"
+
+ if is_finetuned:
+ if dict_path:
+ target_dict = Dictionary.load(dict_path)
+
+ # important change bos & pad token id since CTC symbol is and
+ # not as in fairseq
+ config.bos_token_id = target_dict.pad_index
+ config.pad_token_id = target_dict.bos_index
+ config.eos_token_id = target_dict.eos_index
+ config.vocab_size = len(target_dict.symbols)
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
+ if not os.path.isdir(pytorch_dump_folder_path):
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
+ return
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ vocab_dict = target_dict.indices
+
+ # fairseq has the and switched
+ vocab_dict[""] = 0
+ vocab_dict[""] = 1
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
+ json.dump(vocab_dict, vocab_handle)
+ tokenizer = Wav2Vec2CTCTokenizer(
+ vocab_path,
+ unk_token=target_dict.unk_word,
+ pad_token=target_dict.pad_word,
+ bos_token=target_dict.bos_word,
+ eos_token=target_dict.eos_word,
+ word_delimiter_token="|",
+ do_lower_case=False,
+ )
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
+ feature_extractor = Wav2Vec2FeatureExtractor(
+ feature_size=1,
+ sampling_rate=16000,
+ padding_value=0,
+ do_normalize=True,
+ return_attention_mask=return_attention_mask,
+ )
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ hf_wav2vec = Wav2Vec2ConformerForCTC(config)
+ else:
+ hf_wav2vec = Wav2Vec2ConformerForPreTraining(config)
+
+ if is_finetuned:
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
+ )
+ else:
+ task_arg = argparse.Namespace(task="audio_pretraining")
+ task = fairseq.tasks.setup_task(task_arg)
+
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=task)
+
+ model = model[0].eval()
+
+ recursively_load_weights(model, hf_wav2vec, not is_finetuned)
+
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
+ )
+ args = parser.parse_args()
+ convert_wav2vec2_conformer_checkpoint(
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
+ )
diff --git a/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4392073b9a48bf0ab7147184096f3484bacaa08
--- /dev/null
+++ b/openflamingo/lib/python3.10/site-packages/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
@@ -0,0 +1,2124 @@
+# coding=utf-8
+# Copyright 2022 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Wav2Vec2-Conformer model."""
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ CausalLMOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+ Wav2Vec2BaseModelOutput,
+ XVectorOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_wav2vec2_conformer import Wav2Vec2ConformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_HIDDEN_STATES_START_POSITION = 2
+
+# General docstring
+_CONFIG_FOR_DOC = "Wav2Vec2ConformerConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/wav2vec2-conformer-rope-large-960h-ft"
+_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
+_CTC_EXPECTED_LOSS = 64.21
+
+
+WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/wav2vec2-conformer-rel-pos-large",
+ # See all Wav2Vec2Conformer models at https://huggingface.co/models?filter=wav2vec2-conformer
+]
+
+
+@dataclass
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`Wav2Vec2ConformerForPreTraining`], with potential hidden states and attentions.
+
+ Args:
+ loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
+ paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
+ projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
+ projected quantized states.
+ projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
+ target vectors for contrastive loss.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
+ The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
+ The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ projected_states: torch.FloatTensor = None
+ projected_quantized_states: torch.FloatTensor = None
+ codevector_perplexity: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ contrastive_loss: Optional[torch.FloatTensor] = None
+ diversity_loss: Optional[torch.FloatTensor] = None
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[torch.LongTensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ batch_size, max_num_masked_span * mask_length
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices
+def _sample_negative_indices(
+ features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
+):
+ """
+ Sample `num_negatives` vectors from feature vectors.
+ """
+ batch_size, sequence_length = features_shape
+
+ # generate indices of the positive vectors themselves, repeat them `num_negatives` times
+ sequence_length_range = np.arange(sequence_length)
+
+ # get `num_negatives` random vector indices from the same utterance
+ sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
+
+ mask_time_indices = (
+ mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool)
+ )
+
+ for batch_idx in range(batch_size):
+ high = mask_time_indices[batch_idx].sum() - 1
+ mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
+
+ feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
+ sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
+ # avoid sampling the same positive vector, but keep the distribution uniform
+ sampled_indices[sampled_indices >= feature_indices] += 1
+
+ # remap to actual indices
+ sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
+
+ # correct for batch size
+ sampled_negative_indices[batch_idx] += batch_idx * sequence_length
+
+ return sampled_negative_indices
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerNoLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+
+ hidden_states = hidden_states.transpose(-2, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(-2, -1)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerGroupNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerPositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ )
+
+ weight_norm = nn.utils.weight_norm
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
+ weight_norm = nn.utils.parametrizations.weight_norm
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = Wav2Vec2ConformerSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class Wav2Vec2ConformerRotaryPositionalEmbedding(nn.Module):
+ """Rotary positional embedding
+ Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ dim = config.hidden_size // config.num_attention_heads
+ base = config.rotary_embedding_base
+
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
+ self.register_buffer("inv_freq", inv_freq)
+ self.cached_sequence_length = None
+ self.cached_rotary_positional_embedding = None
+
+ def forward(self, hidden_states):
+ sequence_length = hidden_states.shape[1]
+
+ if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
+ return self.cached_rotary_positional_embedding
+
+ self.cached_sequence_length = sequence_length
+ time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
+ freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
+ embeddings = torch.cat((freqs, freqs), dim=-1)
+
+ cos_embeddings = embeddings.cos()[:, None, None, :]
+ sin_embeddings = embeddings.sin()[:, None, None, :]
+ self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings])
+ return self.cached_rotary_positional_embedding
+
+
+class Wav2Vec2ConformerRelPositionalEmbedding(nn.Module):
+ """Relative positional encoding module."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.max_len = config.max_source_positions
+ self.d_model = config.hidden_size
+ self.pe = None
+ self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
+
+ def extend_pe(self, x):
+ # Reset the positional encodings
+ if self.pe is not None:
+ # self.pe contains both positive and negative parts
+ # the length of self.pe is 2 * input_len - 1
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
+ return
+ # Suppose `i` is the position of query vector and `j` is the
+ # position of key vector. We use positive relative positions when keys
+ # are to the left (i>j) and negative relative positions otherwise (iWav2Vec2Conformer
+class Wav2Vec2ConformerSamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerFeatureEncoder(nn.Module):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [Wav2Vec2ConformerGroupNormConvLayer(config, layer_id=0)] + [
+ Wav2Vec2ConformerNoLayerNormConvLayer(config, layer_id=i + 1)
+ for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [
+ Wav2Vec2ConformerLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
+ ]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.ModuleList(conv_layers)
+ self.gradient_checkpointing = False
+ self._requires_grad = True
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+
+ # make sure hidden_states require grad for gradient_checkpointing
+ if self._requires_grad and self.training:
+ hidden_states.requires_grad = True
+
+ for conv_layer in self.conv_layers:
+ if self._requires_grad and self.gradient_checkpointing and self.training:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(conv_layer),
+ hidden_states,
+ )
+ else:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerFeatureProjection(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states, norm_hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerFeedForward(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2ConformerConvolutionModule(nn.Module):
+ """Convolution block used in the conformer block"""
+
+ def __init__(self, config):
+ super().__init__()
+ if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
+ raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+ self.pointwise_conv1 = torch.nn.Conv1d(
+ config.hidden_size,
+ 2 * config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.glu = torch.nn.GLU(dim=1)
+ self.depthwise_conv = torch.nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ config.conv_depthwise_kernel_size,
+ stride=1,
+ padding=(config.conv_depthwise_kernel_size - 1) // 2,
+ groups=config.hidden_size,
+ bias=False,
+ )
+ self.batch_norm = torch.nn.BatchNorm1d(config.hidden_size)
+ self.activation = ACT2FN[config.hidden_act]
+ self.pointwise_conv2 = torch.nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.dropout = torch.nn.Dropout(config.conformer_conv_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.layer_norm(hidden_states)
+ # exchange the temporal dimension and the feature dimension
+ hidden_states = hidden_states.transpose(1, 2)
+
+ # GLU mechanism
+ # => (batch, 2*channel, dim)
+ hidden_states = self.pointwise_conv1(hidden_states)
+ # => (batch, channel, dim)
+ hidden_states = self.glu(hidden_states)
+
+ # 1D Depthwise Conv
+ hidden_states = self.depthwise_conv(hidden_states)
+ hidden_states = self.batch_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = self.pointwise_conv2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class Wav2Vec2ConformerSelfAttention(nn.Module):
+ """Construct an Wav2Vec2ConformerSelfAttention object.
+ Can be enhanced with rotary or relative position embeddings.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.head_size = config.hidden_size // config.num_attention_heads
+ self.num_heads = config.num_attention_heads
+ self.position_embeddings_type = config.position_embeddings_type
+
+ self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
+
+ self.dropout = nn.Dropout(p=config.attention_dropout)
+
+ if self.position_embeddings_type == "relative":
+ # linear transformation for positional encoding
+ self.linear_pos = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+ # these two learnable bias are used in matrix c and matrix d
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
+ self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ relative_position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # self-attention mechanism
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ # make sure query/key states can be != value states
+ query_key_states = hidden_states
+ value_states = hidden_states
+
+ if self.position_embeddings_type == "rotary":
+ if relative_position_embeddings is None:
+ raise ValueError(
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'"
+ )
+ query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
+
+ # project query_key_states and value_states
+ query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
+
+ # => (batch, head, time1, d_k)
+ query = query.transpose(1, 2)
+ key = key.transpose(1, 2)
+ value = value.transpose(1, 2)
+
+ if self.position_embeddings_type == "relative":
+ if relative_position_embeddings is None:
+ raise ValueError(
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type =="
+ " 'relative'"
+ )
+ # apply relative_position_embeddings to qk scores
+ # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860
+ scores = self._apply_relative_embeddings(
+ query=query, key=key, relative_position_embeddings=relative_position_embeddings
+ )
+ else:
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
+
+ # apply attention_mask if necessary
+ if attention_mask is not None:
+ scores = scores + attention_mask
+
+ # => (batch, head, time1, time2)
+ probs = torch.softmax(scores, dim=-1)
+ probs = self.dropout(probs)
+
+ # => (batch, head, time1, d_k)
+ hidden_states = torch.matmul(probs, value)
+
+ # => (batch, time1, hidden_size)
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
+ hidden_states = self.linear_out(hidden_states)
+
+ return hidden_states, probs
+
+ def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
+
+ cos = relative_position_embeddings[0, :sequence_length, ...]
+ sin = relative_position_embeddings[1, :sequence_length, ...]
+
+ # rotate hidden_states with rotary embeddings
+ hidden_states = hidden_states.transpose(0, 1)
+ rotated_states_begin = hidden_states[..., : self.head_size // 2]
+ rotated_states_end = hidden_states[..., self.head_size // 2 :]
+ rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
+ hidden_states = (hidden_states * cos) + (rotated_states * sin)
+ hidden_states = hidden_states.transpose(0, 1)
+
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
+
+ return hidden_states
+
+ def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
+ # 1. project positional embeddings
+ # => (batch, head, 2*time1-1, d_k)
+ proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
+ proj_relative_position_embeddings = proj_relative_position_embeddings.view(
+ relative_position_embeddings.size(0), -1, self.num_heads, self.head_size
+ )
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
+
+ # 2. Add bias to query
+ # => (batch, head, time1, d_k)
+ query = query.transpose(1, 2)
+ q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
+ q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
+
+ # 3. attention score: first compute matrix a and matrix c
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ # => (batch, head, time1, time2)
+ scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
+
+ # 4. then compute matrix b and matrix d
+ # => (batch, head, time1, 2*time1-1)
+ scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
+
+ # 5. shift matrix b and matrix d
+ zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
+ scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
+ scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
+ scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
+ scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
+ scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1]
+
+ # 6. sum matrices
+ # => (batch, head, time1, time2)
+ scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
+
+ return scores
+
+
+class Wav2Vec2ConformerEncoderLayer(nn.Module):
+ """Conformer block based on https://arxiv.org/abs/2005.08100."""
+
+ def __init__(self, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ dropout = config.attention_dropout
+
+ # Feed-forward 1
+ self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn1 = Wav2Vec2ConformerFeedForward(config)
+
+ # Self-Attention
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
+ self.self_attn_dropout = torch.nn.Dropout(dropout)
+ self.self_attn = Wav2Vec2ConformerSelfAttention(config)
+
+ # Conformer Convolution
+ self.conv_module = Wav2Vec2ConformerConvolutionModule(config)
+
+ # Feed-forward 2
+ self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn2 = Wav2Vec2ConformerFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(embed_dim)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask: Optional[torch.Tensor] = None,
+ relative_position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ):
+ hidden_states = hidden_states
+
+ # 1. Feed-Forward 1 layer
+ residual = hidden_states
+ hidden_states = self.ffn1_layer_norm(hidden_states)
+ hidden_states = self.ffn1(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ residual = hidden_states
+
+ # 2. Self-Attention layer
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weigts = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ relative_position_embeddings=relative_position_embeddings,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_dropout(hidden_states)
+ hidden_states = hidden_states + residual
+
+ # 3. Convolutional Layer
+ residual = hidden_states
+ hidden_states = self.conv_module(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # 4. Feed-Forward 2 Layer
+ residual = hidden_states
+ hidden_states = self.ffn2_layer_norm(hidden_states)
+ hidden_states = self.ffn2(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return hidden_states, attn_weigts
+
+
+class Wav2Vec2ConformerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ if config.position_embeddings_type == "relative":
+ self.embed_positions = Wav2Vec2ConformerRelPositionalEmbedding(config)
+ elif config.position_embeddings_type == "rotary":
+ self.embed_positions = Wav2Vec2ConformerRotaryPositionalEmbedding(config)
+ else:
+ self.embed_positions = None
+
+ self.pos_conv_embed = Wav2Vec2ConformerPositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.ModuleList([Wav2Vec2ConformerEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ hidden_states[~attention_mask] = 0.0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ hidden_states = self.dropout(hidden_states)
+
+ if self.embed_positions is not None:
+ relative_position_embeddings = self.embed_positions(hidden_states)
+ else:
+ relative_position_embeddings = None
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ # create gradient checkpointing function
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(layer),
+ hidden_states,
+ attention_mask,
+ relative_position_embeddings,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ relative_position_embeddings=relative_position_embeddings,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GumbelVectorQuantizer with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerGumbelVectorQuantizer(nn.Module):
+ """
+ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
+ GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.num_groups = config.num_codevector_groups
+ self.num_vars = config.num_codevectors_per_group
+
+ if config.codevector_dim % self.num_groups != 0:
+ raise ValueError(
+ f"`config.codevector_dim {config.codevector_dim} must be divisible "
+ f"by `config.num_codevector_groups` {self.num_groups} for concatenation"
+ )
+
+ # storage for codebook variables (codewords)
+ self.codevectors = nn.Parameter(
+ torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
+ )
+ self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
+
+ # can be decayed for training
+ self.temperature = 2
+
+ @staticmethod
+ def _compute_perplexity(probs, mask=None):
+ if mask is not None:
+ mask_extended = mask.flatten()[:, None, None].expand(probs.shape)
+ probs = torch.where(mask_extended, probs, torch.zeros_like(probs))
+ marginal_probs = probs.sum(dim=0) / mask.sum()
+ else:
+ marginal_probs = probs.mean(dim=0)
+
+ perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
+ return perplexity
+
+ def forward(self, hidden_states, mask_time_indices=None):
+ batch_size, sequence_length, hidden_size = hidden_states.shape
+
+ # project to codevector dim
+ hidden_states = self.weight_proj(hidden_states)
+ hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
+
+ if self.training:
+ # sample code vector probs via gumbel in differentiateable way
+ codevector_probs = nn.functional.gumbel_softmax(
+ hidden_states.float(), tau=self.temperature, hard=True
+ ).type_as(hidden_states)
+
+ # compute perplexity
+ codevector_soft_dist = torch.softmax(
+ hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
+ )
+ perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices)
+ else:
+ # take argmax in non-differentiable way
+ # comptute hard codevector distribution (one hot)
+ codevector_idx = hidden_states.argmax(dim=-1)
+ codevector_probs = hidden_states.new_zeros(hidden_states.shape).scatter_(
+ -1, codevector_idx.view(-1, 1), 1.0
+ )
+ codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
+
+ perplexity = self._compute_perplexity(codevector_probs, mask_time_indices)
+
+ codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
+ # use probs to retrieve codevectors
+ codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
+ codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
+ codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
+
+ return codevectors, perplexity
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerAdapter(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ # feature dim might need to be down-projected
+ if config.output_hidden_size != config.hidden_size:
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
+ else:
+ self.proj = self.proj_layer_norm = None
+
+ self.layers = nn.ModuleList(Wav2Vec2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers))
+ self.layerdrop = config.layerdrop
+
+ def forward(self, hidden_states):
+ # down project hidden_states if necessary
+ if self.proj is not None and self.proj_layer_norm is not None:
+ hidden_states = self.proj(hidden_states)
+ hidden_states = self.proj_layer_norm(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+
+ for layer in self.layers:
+ layerdrop_prob = np.random.random()
+ if not self.training or (layerdrop_prob > self.layerdrop):
+ hidden_states = layer(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->Wav2Vec2Conformer
+class Wav2Vec2ConformerAdapterLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.output_hidden_size,
+ 2 * config.output_hidden_size,
+ config.adapter_kernel_size,
+ stride=config.adapter_stride,
+ padding=1,
+ )
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
+
+ return hidden_states
+
+
+class Wav2Vec2ConformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Wav2Vec2ConformerConfig
+ base_model_prefix = "wav2vec2_conformer"
+ main_input_name = "input_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ # Wav2Vec2ForPreTraining last 2 linear layers need standard Linear init.
+ if isinstance(module, Wav2Vec2ConformerForPreTraining):
+ module.project_hid.reset_parameters()
+ module.project_q.reset_parameters()
+ module.project_hid._is_hf_initialized = True
+ module.project_q._is_hf_initialized = True
+ # gumbel softmax requires special init
+ elif isinstance(module, Wav2Vec2ConformerGumbelVectorQuantizer):
+ module.weight_proj.weight.data.normal_(mean=0.0, std=1)
+ module.weight_proj.bias.data.zero_()
+ nn.init.uniform_(module.codevectors)
+ elif isinstance(module, Wav2Vec2ConformerSelfAttention):
+ if hasattr(module, "pos_bias_u"):
+ nn.init.xavier_uniform_(module.pos_bias_u)
+ if hasattr(module, "pos_bias_v"):
+ nn.init.xavier_uniform_(module.pos_bias_v)
+ elif isinstance(module, Wav2Vec2ConformerPositionalConvEmbedding):
+ nn.init.normal_(
+ module.conv.weight,
+ mean=0,
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
+ )
+ nn.init.constant_(module.conv.bias, 0)
+ elif isinstance(module, Wav2Vec2ConformerFeatureProjection):
+ k = math.sqrt(1 / module.projection.in_features)
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
+ elif isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.kaiming_normal_(module.weight)
+
+ if module.bias is not None:
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
+ nn.init.uniform_(module.bias, a=-k, b=k)
+
+ def _get_feat_extract_output_lengths(
+ self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
+ ):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ if add_adapter:
+ for _ in range(self.config.num_adapter_layers):
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(
+ self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
+ ):
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
+ # on inference mode.
+ non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
+
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
+ output_lengths = output_lengths.to(torch.long)
+
+ batch_size = attention_mask.shape[0]
+
+ attention_mask = torch.zeros(
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+ # these two operations makes sure that all values before the output lengths idxs are attended to
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
+ return attention_mask
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (Wav2Vec2ConformerEncoder, Wav2Vec2ConformerFeatureEncoder)):
+ module.gradient_checkpointing = value
+
+
+WAV2VEC2_CONFORMER_START_DOCSTRING = r"""
+ Wav2Vec2Conformer was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech
+ Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael
+ Auli.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving etc.).
+
+ This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) sub-class. Use it as a
+ regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
+
+ Parameters:
+ config ([`Wav2Vec2ConformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+WAV2VEC2_CONFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+
+
+ `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
+ True`. For all models whose processor has `config.return_attention_mask == False`, such as
+ [wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large),
+ `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For
+ such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware
+ that these models also yield slightly different results depending on whether `input_values` is padded or
+ not.
+
+
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Wav2Vec2Conformer Model transformer outputting raw hidden-states without any specific head on top.",
+ WAV2VEC2_CONFORMER_START_DOCSTRING,
+)
+class Wav2Vec2ConformerModel(Wav2Vec2ConformerPreTrainedModel):
+ def __init__(self, config: Wav2Vec2ConformerConfig):
+ super().__init__(config)
+ self.config = config
+ self.feature_extractor = Wav2Vec2ConformerFeatureEncoder(config)
+ self.feature_projection = Wav2Vec2ConformerFeatureProjection(config)
+
+ # model only needs masking vector if mask prob is > 0.0
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
+
+ self.encoder = Wav2Vec2ConformerEncoder(config)
+
+ self.adapter = Wav2Vec2ConformerAdapter(config) if config.add_adapter else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model.freeze_feature_encoder
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.feature_extractor._freeze_parameters()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
+ def _mask_hidden_states(
+ self,
+ hidden_states: torch.FloatTensor,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Wav2Vec2BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model.forward with wav2vec2->wav2vec2_conformer
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose(1, 2)
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(
+ extract_features.shape[1], attention_mask, add_adapter=False
+ )
+
+ hidden_states, extract_features = self.feature_projection(extract_features)
+ hidden_states = self._mask_hidden_states(
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
+ )
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if self.adapter is not None:
+ hidden_states = self.adapter(hidden_states)
+
+ if not return_dict:
+ return (hidden_states, extract_features) + encoder_outputs[1:]
+
+ return Wav2Vec2BaseModelOutput(
+ last_hidden_state=hidden_states,
+ extract_features=extract_features,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Wav2Vec2Conformer Model with a quantizer and `VQ` head on top.""", WAV2VEC2_CONFORMER_START_DOCSTRING
+)
+class Wav2Vec2ConformerForPreTraining(Wav2Vec2ConformerPreTrainedModel):
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTraining.__init__ with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer
+ def __init__(self, config: Wav2Vec2ConformerConfig):
+ super().__init__(config)
+ self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
+ self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
+
+ self.quantizer = Wav2Vec2ConformerGumbelVectorQuantizer(config)
+
+ self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim)
+ self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTraining.set_gumbel_temperature
+ def set_gumbel_temperature(self, temperature: int):
+ """
+ Set the Gumbel softmax temperature to a given value. Only necessary for training
+ """
+ self.quantizer.temperature = temperature
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTraining.freeze_feature_encoder with wav2vec2->wav2vec2_conformer
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.wav2vec2_conformer.feature_extractor._freeze_parameters()
+
+ @staticmethod
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTraining.compute_contrastive_logits
+ def compute_contrastive_logits(
+ target_features: torch.FloatTensor,
+ negative_features: torch.FloatTensor,
+ predicted_features: torch.FloatTensor,
+ temperature: int = 0.1,
+ ):
+ """
+ Compute logits for contrastive loss based using cosine similarity as the distance measure between
+ `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
+ """
+ target_features = torch.cat([target_features, negative_features], dim=0)
+
+ logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1).type_as(
+ target_features
+ )
+
+ # apply temperature
+ logits = logits / temperature
+ return logits
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Wav2Vec2ConformerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTraining.forward with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer,wav2vec2_conformer-base->wav2vec2-conformer-rel-pos-large
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ mask_time_indices: Optional[torch.BoolTensor] = None,
+ sampled_negative_indices: Optional[torch.BoolTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Wav2Vec2ConformerForPreTrainingOutput]:
+ r"""
+ mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
+ masked extracted features in *config.proj_codevector_dim* space.
+ sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
+ Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
+ Required input for pre-training.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import AutoFeatureExtractor, Wav2Vec2ConformerForPreTraining
+ >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import (
+ ... _compute_mask_indices,
+ ... _sample_negative_indices,
+ ... )
+ >>> from datasets import load_dataset
+
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large")
+ >>> model = Wav2Vec2ConformerForPreTraining.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large")
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values # Batch size 1
+
+ >>> # compute masked indices
+ >>> batch_size, raw_sequence_length = input_values.shape
+ >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item()
+ >>> mask_time_indices = _compute_mask_indices(
+ ... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2
+ ... )
+ >>> sampled_negative_indices = _sample_negative_indices(
+ ... features_shape=(batch_size, sequence_length),
+ ... num_negatives=model.config.num_negatives,
+ ... mask_time_indices=mask_time_indices,
+ ... )
+ >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long)
+ >>> sampled_negative_indices = torch.tensor(
+ ... data=sampled_negative_indices, device=input_values.device, dtype=torch.long
+ ... )
+
+ >>> with torch.no_grad():
+ ... outputs = model(input_values, mask_time_indices=mask_time_indices)
+
+ >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)
+ >>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)
+
+ >>> # show that cosine similarity is much higher than random
+ >>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5
+ tensor(True)
+
+ >>> # for contrastive loss training model should be put into train mode
+ >>> model = model.train()
+ >>> loss = model(
+ ... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices
+ ... ).loss
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if mask_time_indices is not None:
+ mask_time_indices = mask_time_indices.to(torch.bool)
+
+ outputs = self.wav2vec2_conformer(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ mask_time_indices=mask_time_indices,
+ return_dict=return_dict,
+ )
+
+ # 1. project all transformed features (including masked) to final vq dim
+ transformer_features = self.project_hid(outputs[0])
+
+ # 2. quantize all (unmasked) extracted features and project to final vq dim
+ extract_features = self.dropout_features(outputs[1])
+
+ if attention_mask is not None:
+ # compute reduced attention_mask correponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(
+ extract_features.shape[1], attention_mask, add_adapter=False
+ )
+
+ quantized_features, codevector_perplexity = self.quantizer(
+ extract_features, mask_time_indices=mask_time_indices
+ )
+ quantized_features = self.project_q(quantized_features)
+
+ loss = contrastive_loss = diversity_loss = None
+ if sampled_negative_indices is not None:
+ batch_size, sequence_length, hidden_size = quantized_features.shape
+
+ # for training, we sample negatives
+ # 3. sample K negatives (distractors) quantized states for contrastive loss
+ # if attention_mask is passed, make sure that padded feature vectors cannot be sampled
+ # sample negative quantized vectors BTC => (BxT)C
+ negative_quantized_features = quantized_features.view(-1, hidden_size)[
+ sampled_negative_indices.long().view(-1)
+ ]
+ negative_quantized_features = negative_quantized_features.view(
+ batch_size, sequence_length, -1, hidden_size
+ ).permute(2, 0, 1, 3)
+
+ # 4. compute logits, corresponding to `logs = sim(c_t, [q_t, \sim{q}_t]) / \kappa`
+ # of equation (3) in https://arxiv.org/pdf/2006.11477.pdf
+ logits = self.compute_contrastive_logits(
+ quantized_features[None, :],
+ negative_quantized_features,
+ transformer_features,
+ self.config.contrastive_logits_temperature,
+ )
+
+ # 5. if a negative vector is identical to the positive (i.e. when codebook utilization is low),
+ # its cosine similarity will be masked
+ neg_is_pos = (quantized_features == negative_quantized_features).all(-1)
+
+ if neg_is_pos.any():
+ logits[1:][neg_is_pos] = float("-inf")
+
+ # 6. compute contrastive loss \mathbf{L}_m = cross_entropy(logs) =
+ # -log(exp(sim(c_t, q_t)/\kappa) / \sum_{\sim{q}} exp(sim(c_t, \sim{q})/\kappa))
+ logits = logits.transpose(0, 2).reshape(-1, logits.size(0))
+ target = ((1 - mask_time_indices.long()) * -100).transpose(0, 1).flatten()
+
+ contrastive_loss = nn.functional.cross_entropy(logits.float(), target, reduction="sum")
+ # 7. compute diversity loss: \mathbf{L}_d
+ num_codevectors = self.config.num_codevectors_per_group * self.config.num_codevector_groups
+ diversity_loss = ((num_codevectors - codevector_perplexity) / num_codevectors) * mask_time_indices.sum()
+
+ # 8. \mathbf{L} = \mathbf{L}_m + \alpha * \mathbf{L}_d
+ loss = contrastive_loss + self.config.diversity_loss_weight * diversity_loss
+
+ if not return_dict:
+ if loss is not None:
+ return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
+ return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
+
+ return Wav2Vec2ConformerForPreTrainingOutput(
+ loss=loss,
+ projected_states=transformer_features,
+ projected_quantized_states=quantized_features,
+ codevector_perplexity=codevector_perplexity,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ contrastive_loss=contrastive_loss,
+ diversity_loss=diversity_loss,
+ )
+
+
+@add_start_docstrings(
+ """Wav2Vec2Conformer Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ WAV2VEC2_CONFORMER_START_DOCSTRING,
+)
+class Wav2Vec2ConformerForCTC(Wav2Vec2ConformerPreTrainedModel):
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.__init__ with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer
+ def __init__(self, config, target_lang: Optional[str] = None):
+ super().__init__(config)
+
+ self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
+ self.dropout = nn.Dropout(config.final_dropout)
+
+ self.target_lang = target_lang
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `Wav2Vec2ConformerForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = (
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
+ )
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.freeze_feature_encoder with wav2vec2->wav2vec2_conformer
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.wav2vec2_conformer.feature_extractor._freeze_parameters()
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.wav2vec2_conformer(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states)
+
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ Wav2Vec2Conformer Model with a sequence classification head on top (a linear layer over the pooled output) for
+ tasks like SUPERB Keyword Spotting.
+ """,
+ WAV2VEC2_CONFORMER_START_DOCSTRING,
+)
+class Wav2Vec2ConformerForSequenceClassification(Wav2Vec2ConformerPreTrainedModel):
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.__init__ with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, "add_adapter") and config.add_adapter:
+ raise ValueError(
+ "Sequence classification does not support the use of Wav2Vec2Conformer adapters (config.add_adapter=True)"
+ )
+ self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->wav2vec2_conformer
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.wav2vec2_conformer.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.wav2vec2_conformer.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ )
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer,WAV_2_VEC_2->WAV2VEC2_CONFORMER
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.wav2vec2_conformer(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ hidden_states = self.projector(hidden_states)
+ if attention_mask is None:
+ pooled_output = hidden_states.mean(dim=1)
+ else:
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+ hidden_states[~padding_mask] = 0.0
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Wav2Vec2Conformer Model with a frame classification head on top for tasks like Speaker Diarization.
+ """,
+ WAV2VEC2_CONFORMER_START_DOCSTRING,
+)
+class Wav2Vec2ConformerForAudioFrameClassification(Wav2Vec2ConformerPreTrainedModel):
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.__init__ with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer,WAV_2_VEC_2->WAV2VEC2_CONFORMER
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, "add_adapter") and config.add_adapter:
+ raise ValueError(
+ "Audio frame classification does not support the use of Wav2Vec2Conformer adapters (config.add_adapter=True)"
+ )
+ self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+ self.num_labels = config.num_labels
+
+ self.init_weights()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.freeze_feature_encoder with wav2vec2->wav2vec2_conformer
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.wav2vec2_conformer.feature_extractor._freeze_parameters()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.freeze_base_model with wav2vec2->wav2vec2_conformer
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.wav2vec2_conformer.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ )
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.forward with wav2vec2->wav2vec2_conformer
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.wav2vec2_conformer(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ logits = self.classifier(hidden_states)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
+class AMSoftmaxLoss(nn.Module):
+ def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
+ super(AMSoftmaxLoss, self).__init__()
+ self.scale = scale
+ self.margin = margin
+ self.num_labels = num_labels
+ self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
+ self.loss = nn.CrossEntropyLoss()
+
+ def forward(self, hidden_states, labels):
+ labels = labels.flatten()
+ weight = nn.functional.normalize(self.weight, dim=0)
+ hidden_states = nn.functional.normalize(hidden_states, dim=1)
+ cos_theta = torch.mm(hidden_states, weight)
+ psi = cos_theta - self.margin
+
+ onehot = nn.functional.one_hot(labels, self.num_labels)
+ logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
+ loss = self.loss(logits, labels)
+
+ return loss
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
+class TDNNLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
+ self.out_conv_dim = config.tdnn_dim[layer_id]
+ self.kernel_size = config.tdnn_kernel[layer_id]
+ self.dilation = config.tdnn_dilation[layer_id]
+
+ self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
+ self.activation = nn.ReLU()
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.unsqueeze(1)
+ hidden_states = nn.functional.unfold(
+ hidden_states,
+ (self.kernel_size, self.in_conv_dim),
+ stride=(1, self.in_conv_dim),
+ dilation=(self.dilation, 1),
+ )
+ hidden_states = hidden_states.transpose(1, 2)
+ hidden_states = self.kernel(hidden_states)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+@add_start_docstrings(
+ """
+ Wav2Vec2Conformer Model with an XVector feature extraction head on top for tasks like Speaker Verification.
+ """,
+ WAV2VEC2_CONFORMER_START_DOCSTRING,
+)
+class Wav2Vec2ConformerForXVector(Wav2Vec2ConformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
+
+ tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
+ self.tdnn = nn.ModuleList(tdnn_layers)
+
+ self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
+ self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
+
+ self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
+
+ self.init_weights()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector.freeze_feature_encoder with wav2vec2->wav2vec2_conformer
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.wav2vec2_conformer.feature_extractor._freeze_parameters()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector.freeze_base_model with wav2vec2->wav2vec2_conformer
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.wav2vec2_conformer.parameters():
+ param.requires_grad = False
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector._get_tdnn_output_lengths with wav2vec2->wav2vec2_conformer
+ def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the TDNN layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return (input_length - kernel_size) // stride + 1
+
+ for kernel_size in self.config.tdnn_kernel:
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
+
+ return input_lengths
+
+ @add_start_docstrings_to_model_forward(WAV2VEC2_CONFORMER_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XVectorOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ )
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector.forward with Wav2Vec2->Wav2Vec2Conformer,wav2vec2->wav2vec2_conformer,WAV_2_VEC_2->WAV2VEC2_CONFORMER
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, XVectorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.wav2vec2_conformer(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ hidden_states = self.projector(hidden_states)
+
+ for tdnn_layer in self.tdnn:
+ hidden_states = tdnn_layer(hidden_states)
+
+ # Statistic Pooling
+ if attention_mask is None:
+ mean_features = hidden_states.mean(dim=1)
+ std_features = hidden_states.std(dim=1)
+ else:
+ feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
+ tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
+ mean_features = []
+ std_features = []
+ for i, length in enumerate(tdnn_output_lengths):
+ mean_features.append(hidden_states[i, :length].mean(dim=0))
+ std_features.append(hidden_states[i, :length].std(dim=0))
+ mean_features = torch.stack(mean_features)
+ std_features = torch.stack(std_features)
+ statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
+
+ output_embeddings = self.feature_extractor(statistic_pooling)
+ logits = self.classifier(output_embeddings)
+
+ loss = None
+ if labels is not None:
+ loss = self.objective(logits, labels)
+
+ if not return_dict:
+ output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XVectorOutput(
+ loss=loss,
+ logits=logits,
+ embeddings=output_embeddings,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )