Upload folder using huggingface_hub
Browse files- .gitattributes +45 -35
- .gitignore +37 -0
- LICENSE +201 -0
- README.md +144 -0
- config.yaml +37 -0
- examples/example_usage.py +53 -0
- examples/sample_audio.wav +3 -0
- inference.py +214 -0
- install.sh +94 -0
- models/Qformer.py +1217 -0
- models/__init__.py +18 -0
- models/beats/BEATs.py +180 -0
- models/beats/Tokenizers.py +172 -0
- models/beats/__init__.py +0 -0
- models/beats/backbone.py +783 -0
- models/beats/modules.py +218 -0
- models/beats/quantizer.py +215 -0
- models/modeling_llama.py +754 -0
- models/modeling_whisper.py +1770 -0
- models/salmonn.py +506 -0
- models/utils.py +30 -0
- prompts/test_prompt.json +27 -0
- prompts/train_prompt.json +132 -0
- requirements.txt +14 -0
- server.py +184 -0
- utils.py +158 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,45 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
*
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
*.
|
| 11 |
-
*.
|
| 12 |
-
*.
|
| 13 |
-
*.
|
| 14 |
-
*.
|
| 15 |
-
*.
|
| 16 |
-
*.
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
*.
|
| 23 |
-
*.
|
| 24 |
-
*.
|
| 25 |
-
*.
|
| 26 |
-
|
| 27 |
-
*.
|
| 28 |
-
*.
|
| 29 |
-
*.
|
| 30 |
-
*.
|
| 31 |
-
*.
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Set default behavior to automatically normalize line endings.
|
| 3 |
+
###############################################################################
|
| 4 |
+
* text=auto
|
| 5 |
+
|
| 6 |
+
###############################################################################
|
| 7 |
+
# behavior for image files
|
| 8 |
+
# image files are treated as binary by default.
|
| 9 |
+
###############################################################################
|
| 10 |
+
*.jpg binary
|
| 11 |
+
*.png binary
|
| 12 |
+
*.gif binary
|
| 13 |
+
*.bmp binary
|
| 14 |
+
*.ico binary
|
| 15 |
+
*.jpeg binary
|
| 16 |
+
*.jfif binary
|
| 17 |
+
|
| 18 |
+
###############################################################################
|
| 19 |
+
# diff behavior for common document formats
|
| 20 |
+
# Convert binary document formats to text before diffing them.
|
| 21 |
+
###############################################################################
|
| 22 |
+
*.doc diff=astextplain
|
| 23 |
+
*.DOC diff=astextplain
|
| 24 |
+
*.docx diff=astextplain
|
| 25 |
+
*.DOCX diff=astextplain
|
| 26 |
+
*.dot diff=astextplain
|
| 27 |
+
*.DOT diff=astextplain
|
| 28 |
+
*.pdf diff=astextplain
|
| 29 |
+
*.PDF diff=astextplain
|
| 30 |
+
*.rtf diff=astextplain
|
| 31 |
+
*.RTF diff=astextplain
|
| 32 |
+
|
| 33 |
+
###############################################################################
|
| 34 |
+
# Force Windows cmd and batch scripts to always use crlf line endings so that if
|
| 35 |
+
# a repo is accessed in Windows via a file share from Unix, the scripts will
|
| 36 |
+
# work.
|
| 37 |
+
###############################################################################
|
| 38 |
+
*.cmd text eol=crlf
|
| 39 |
+
*.bat text eol=crlf
|
| 40 |
+
|
| 41 |
+
###############################################################################
|
| 42 |
+
# Force bash scripts to always use lf line endings so that if a repo is accessed
|
| 43 |
+
# in Unix via a file share from Windows, the scripts will work.
|
| 44 |
+
###############################################################################
|
| 45 |
+
*.sh text eol=lfexamples/sample_audio.wav filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Checkpoints (download with install.sh)
|
| 2 |
+
checkpoints/
|
| 3 |
+
|
| 4 |
+
# Python
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.py[cod]
|
| 7 |
+
*$py.class
|
| 8 |
+
*.so
|
| 9 |
+
.Python
|
| 10 |
+
*.egg-info/
|
| 11 |
+
dist/
|
| 12 |
+
build/
|
| 13 |
+
|
| 14 |
+
# Environments
|
| 15 |
+
.env
|
| 16 |
+
.venv
|
| 17 |
+
venv/
|
| 18 |
+
ENV/
|
| 19 |
+
|
| 20 |
+
# IDE
|
| 21 |
+
.idea/
|
| 22 |
+
.vscode/
|
| 23 |
+
*.swp
|
| 24 |
+
*.swo
|
| 25 |
+
|
| 26 |
+
# OS
|
| 27 |
+
.DS_Store
|
| 28 |
+
Thumbs.db
|
| 29 |
+
|
| 30 |
+
# Logs
|
| 31 |
+
*.log
|
| 32 |
+
logs/
|
| 33 |
+
|
| 34 |
+
# Temp files
|
| 35 |
+
*.tmp
|
| 36 |
+
*.temp
|
| 37 |
+
uploads/
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright Tsinghua University
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SALMONN Inference Server
|
| 2 |
+
|
| 3 |
+
A ready-to-use inference server for [SALMONN](https://github.com/bytedance/SALMONN) (Speech Audio Language Music Open Neural Network) - a multimodal LLM that can understand speech, audio events, and music.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- Audio transcription
|
| 8 |
+
- Question answering about audio content
|
| 9 |
+
- Audio description and analysis
|
| 10 |
+
- FastAPI server with REST API
|
| 11 |
+
- Simple Python API
|
| 12 |
+
|
| 13 |
+
## Requirements
|
| 14 |
+
|
| 15 |
+
- **GPU**: NVIDIA GPU with 24GB+ VRAM (L4, A100, RTX 4090, etc.)
|
| 16 |
+
- **CUDA**: 11.8 or higher
|
| 17 |
+
- **Python**: 3.10+
|
| 18 |
+
- **Storage**: ~25GB for model checkpoints
|
| 19 |
+
|
| 20 |
+
## Quick Start
|
| 21 |
+
|
| 22 |
+
### 1. Clone and Install
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
git clone https://huggingface.co/marcosremar2/salmonn-inference
|
| 26 |
+
cd salmonn-inference
|
| 27 |
+
|
| 28 |
+
# Create virtual environment
|
| 29 |
+
python -m venv venv
|
| 30 |
+
source venv/bin/activate
|
| 31 |
+
|
| 32 |
+
# Install dependencies
|
| 33 |
+
pip install -r requirements.txt
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
### 2. Download Models
|
| 37 |
+
|
| 38 |
+
```bash
|
| 39 |
+
./install.sh
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
This downloads (~20GB):
|
| 43 |
+
- Vicuna 7B v1.5 (LLM backbone)
|
| 44 |
+
- Whisper Large v2 (speech encoder)
|
| 45 |
+
- BEATs (audio encoder)
|
| 46 |
+
- SALMONN checkpoint (adapter weights)
|
| 47 |
+
|
| 48 |
+
### 3. Start Server
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
python server.py
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
Server runs at `http://localhost:8000`
|
| 55 |
+
|
| 56 |
+
## API Usage
|
| 57 |
+
|
| 58 |
+
### Transcribe Audio
|
| 59 |
+
|
| 60 |
+
```bash
|
| 61 |
+
curl -X POST "http://localhost:8000/transcribe" \
|
| 62 |
+
-F "audio=@your_audio.wav"
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
### Ask Questions
|
| 66 |
+
|
| 67 |
+
```bash
|
| 68 |
+
curl -X POST "http://localhost:8000/chat" \
|
| 69 |
+
-F "audio=@your_audio.wav" \
|
| 70 |
+
-F "question=What is being said in this audio?"
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
### Python API
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
from inference import SALMONNInference
|
| 77 |
+
|
| 78 |
+
model = SALMONNInference()
|
| 79 |
+
model.load()
|
| 80 |
+
|
| 81 |
+
# Transcribe
|
| 82 |
+
text = model.transcribe("audio.wav")
|
| 83 |
+
|
| 84 |
+
# Ask questions
|
| 85 |
+
answer = model.chat("audio.wav", "What language is being spoken?")
|
| 86 |
+
|
| 87 |
+
# Describe audio
|
| 88 |
+
description = model.describe("audio.wav")
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
## API Endpoints
|
| 92 |
+
|
| 93 |
+
| Endpoint | Method | Description |
|
| 94 |
+
|----------|--------|-------------|
|
| 95 |
+
| `/` | GET | API info |
|
| 96 |
+
| `/health` | GET | Health check |
|
| 97 |
+
| `/transcribe` | POST | Transcribe audio to text |
|
| 98 |
+
| `/chat` | POST | Ask questions about audio |
|
| 99 |
+
| `/describe` | POST | Get audio description |
|
| 100 |
+
|
| 101 |
+
## Configuration
|
| 102 |
+
|
| 103 |
+
Edit `config.yaml` to customize:
|
| 104 |
+
|
| 105 |
+
```yaml
|
| 106 |
+
model:
|
| 107 |
+
device: "cuda:0" # GPU device
|
| 108 |
+
|
| 109 |
+
server:
|
| 110 |
+
host: "0.0.0.0"
|
| 111 |
+
port: 8000
|
| 112 |
+
|
| 113 |
+
generation:
|
| 114 |
+
max_new_tokens: 200
|
| 115 |
+
temperature: 1.0
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## Performance
|
| 119 |
+
|
| 120 |
+
Tested on NVIDIA L4 (24GB):
|
| 121 |
+
|
| 122 |
+
| Metric | Value |
|
| 123 |
+
|--------|-------|
|
| 124 |
+
| Model Load Time | ~20s |
|
| 125 |
+
| Audio Encode | ~250ms |
|
| 126 |
+
| Time to First Token | ~150ms |
|
| 127 |
+
| Tokens/second | ~18 |
|
| 128 |
+
| GPU Memory | ~16GB |
|
| 129 |
+
|
| 130 |
+
## Important Note
|
| 131 |
+
|
| 132 |
+
This repository uses **Vicuna 7B v1.5** (not v1.1). The original SALMONN checkpoint was trained with v1.5, and using v1.1 will result in broken outputs (`<unk>` tokens).
|
| 133 |
+
|
| 134 |
+
## License
|
| 135 |
+
|
| 136 |
+
- SALMONN: Apache 2.0
|
| 137 |
+
- Vicuna: Llama 2 Community License
|
| 138 |
+
- Whisper: MIT
|
| 139 |
+
|
| 140 |
+
## Credits
|
| 141 |
+
|
| 142 |
+
- [SALMONN](https://github.com/bytedance/SALMONN) by Tsinghua University & ByteDance
|
| 143 |
+
- [Vicuna](https://huggingface.co/lmsys/vicuna-7b-v1.5) by LMSYS
|
| 144 |
+
- [Whisper](https://huggingface.co/openai/whisper-large-v2) by OpenAI
|
config.yaml
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SALMONN Configuration
|
| 2 |
+
|
| 3 |
+
model:
|
| 4 |
+
# Model paths (relative to project root)
|
| 5 |
+
vicuna_path: "checkpoints/vicuna-7b-v1.5"
|
| 6 |
+
whisper_path: "checkpoints/whisper-large-v2"
|
| 7 |
+
beats_path: "checkpoints/BEATs_iter3_plus_AS2M.pt"
|
| 8 |
+
salmonn_ckpt: "checkpoints/salmonn-7b/salmonn_7b_v0.pth"
|
| 9 |
+
|
| 10 |
+
# Model settings
|
| 11 |
+
device: "cuda:0"
|
| 12 |
+
low_resource: false # Set true for 8-bit quantization
|
| 13 |
+
|
| 14 |
+
# LoRA settings (do not change)
|
| 15 |
+
lora: true
|
| 16 |
+
lora_rank: 8
|
| 17 |
+
lora_alpha: 32
|
| 18 |
+
lora_dropout: 0.1
|
| 19 |
+
|
| 20 |
+
# Prompt settings
|
| 21 |
+
prompt_template: "USER: {}\nASSISTANT:"
|
| 22 |
+
prompt_path: "prompts/train_prompt.json"
|
| 23 |
+
|
| 24 |
+
server:
|
| 25 |
+
host: "0.0.0.0"
|
| 26 |
+
port: 8000
|
| 27 |
+
reload: false
|
| 28 |
+
|
| 29 |
+
generation:
|
| 30 |
+
max_new_tokens: 200
|
| 31 |
+
num_beams: 4
|
| 32 |
+
do_sample: false
|
| 33 |
+
min_length: 1
|
| 34 |
+
temperature: 1.0
|
| 35 |
+
top_p: 0.9
|
| 36 |
+
repetition_penalty: 1.0
|
| 37 |
+
length_penalty: 1.0
|
examples/example_usage.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
SALMONN Example Usage
|
| 3 |
+
|
| 4 |
+
This script demonstrates how to use the SALMONN inference API.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
sys.path.insert(0, '..')
|
| 9 |
+
|
| 10 |
+
from inference import SALMONNInference
|
| 11 |
+
|
| 12 |
+
def main():
|
| 13 |
+
# Initialize model
|
| 14 |
+
print("Loading SALMONN model...")
|
| 15 |
+
model = SALMONNInference(config_path="../config.yaml")
|
| 16 |
+
model.load()
|
| 17 |
+
|
| 18 |
+
# Example audio file
|
| 19 |
+
audio_file = "sample_audio.wav"
|
| 20 |
+
|
| 21 |
+
print("\n" + "="*50)
|
| 22 |
+
print("SALMONN Example Usage")
|
| 23 |
+
print("="*50)
|
| 24 |
+
|
| 25 |
+
# 1. Transcribe audio
|
| 26 |
+
print("\n1. Transcription:")
|
| 27 |
+
text = model.transcribe(audio_file)
|
| 28 |
+
print(f" {text}")
|
| 29 |
+
|
| 30 |
+
# 2. Ask questions
|
| 31 |
+
print("\n2. Question Answering:")
|
| 32 |
+
questions = [
|
| 33 |
+
"What language is being spoken?",
|
| 34 |
+
"What is the tone of the speaker?",
|
| 35 |
+
"Is this audio about weather?",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
for q in questions:
|
| 39 |
+
answer = model.chat(audio_file, q)
|
| 40 |
+
print(f" Q: {q}")
|
| 41 |
+
print(f" A: {answer}")
|
| 42 |
+
print()
|
| 43 |
+
|
| 44 |
+
# 3. Describe audio
|
| 45 |
+
print("3. Audio Description:")
|
| 46 |
+
description = model.describe(audio_file)
|
| 47 |
+
print(f" {description}")
|
| 48 |
+
|
| 49 |
+
print("\n" + "="*50)
|
| 50 |
+
print("Done!")
|
| 51 |
+
|
| 52 |
+
if __name__ == "__main__":
|
| 53 |
+
main()
|
examples/sample_audio.wav
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa8e0712e9dd12f400579d2c0d15400259c544b12b18fe0508d3b21c79ab0263
|
| 3 |
+
size 142926
|
inference.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
SALMONN Inference Module
|
| 3 |
+
Simple interface for audio understanding and transcription.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import yaml
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Optional, Generator
|
| 11 |
+
from transformers import WhisperFeatureExtractor
|
| 12 |
+
from omegaconf import OmegaConf
|
| 13 |
+
|
| 14 |
+
from models.salmonn import SALMONN
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SALMONNInference:
|
| 18 |
+
"""
|
| 19 |
+
Simple inference class for SALMONN model.
|
| 20 |
+
|
| 21 |
+
Usage:
|
| 22 |
+
model = SALMONNInference()
|
| 23 |
+
model.load()
|
| 24 |
+
|
| 25 |
+
# Transcribe audio
|
| 26 |
+
text = model.transcribe("audio.wav")
|
| 27 |
+
|
| 28 |
+
# Ask questions about audio
|
| 29 |
+
answer = model.chat("audio.wav", "What is being said?")
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, config_path: str = "config.yaml"):
|
| 33 |
+
self.config_path = config_path
|
| 34 |
+
self.model = None
|
| 35 |
+
self.wav_processor = None
|
| 36 |
+
self.config = None
|
| 37 |
+
self.device = None
|
| 38 |
+
self._loaded = False
|
| 39 |
+
|
| 40 |
+
def load(self) -> None:
|
| 41 |
+
"""Load the model and processors."""
|
| 42 |
+
if self._loaded:
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# Load config
|
| 46 |
+
with open(self.config_path, "r") as f:
|
| 47 |
+
self.config = OmegaConf.create(yaml.safe_load(f))
|
| 48 |
+
|
| 49 |
+
self.device = self.config.model.device
|
| 50 |
+
|
| 51 |
+
print("Loading SALMONN model...")
|
| 52 |
+
|
| 53 |
+
# Build model config for SALMONN
|
| 54 |
+
model_config = {
|
| 55 |
+
"llama_path": self.config.model.vicuna_path,
|
| 56 |
+
"whisper_path": self.config.model.whisper_path,
|
| 57 |
+
"beats_path": self.config.model.beats_path,
|
| 58 |
+
"ckpt": self.config.model.salmonn_ckpt,
|
| 59 |
+
"lora": self.config.model.lora,
|
| 60 |
+
"lora_rank": self.config.model.lora_rank,
|
| 61 |
+
"lora_alpha": self.config.model.lora_alpha,
|
| 62 |
+
"lora_dropout": self.config.model.lora_dropout,
|
| 63 |
+
"low_resource": self.config.model.get("low_resource", False),
|
| 64 |
+
"prompt_template": self.config.model.prompt_template,
|
| 65 |
+
"prompt_path": self.config.model.prompt_path,
|
| 66 |
+
"freeze_whisper": True,
|
| 67 |
+
"freeze_beats": True,
|
| 68 |
+
"use_speech_Qformer": True,
|
| 69 |
+
"num_speech_query_token": 1,
|
| 70 |
+
"freeze_speech_QFormer": False,
|
| 71 |
+
"window_level_Qformer": True,
|
| 72 |
+
"second_per_window": 0.333333,
|
| 73 |
+
"second_stride": 0.333333,
|
| 74 |
+
"multi_prompt": True,
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
self.model = SALMONN.from_config(model_config)
|
| 78 |
+
self.model.to(self.device)
|
| 79 |
+
self.model.eval()
|
| 80 |
+
|
| 81 |
+
self.wav_processor = WhisperFeatureExtractor.from_pretrained(
|
| 82 |
+
self.config.model.whisper_path
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
self._loaded = True
|
| 86 |
+
print("Model loaded successfully!")
|
| 87 |
+
|
| 88 |
+
def _prepare_audio(self, audio_path: str) -> dict:
|
| 89 |
+
"""Prepare audio file for inference."""
|
| 90 |
+
import soundfile as sf
|
| 91 |
+
|
| 92 |
+
audio, sr = sf.read(audio_path)
|
| 93 |
+
|
| 94 |
+
# Convert to mono if stereo
|
| 95 |
+
if len(audio.shape) > 1:
|
| 96 |
+
audio = audio.mean(axis=1)
|
| 97 |
+
|
| 98 |
+
# Resample to 16kHz if needed
|
| 99 |
+
if sr != 16000:
|
| 100 |
+
import librosa
|
| 101 |
+
audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
|
| 102 |
+
sr = 16000
|
| 103 |
+
|
| 104 |
+
# Process audio
|
| 105 |
+
spectrogram = self.wav_processor(
|
| 106 |
+
audio,
|
| 107 |
+
sampling_rate=sr,
|
| 108 |
+
return_tensors="pt"
|
| 109 |
+
)["input_features"].to(self.device)
|
| 110 |
+
|
| 111 |
+
return {
|
| 112 |
+
"spectrogram": spectrogram,
|
| 113 |
+
"raw_wav": torch.from_numpy(audio).unsqueeze(0).to(self.device),
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
def _generate(self, samples: dict, prompt: str) -> str:
|
| 117 |
+
"""Generate response from the model."""
|
| 118 |
+
prompt_formatted = self.config.model.prompt_template.format(
|
| 119 |
+
"<Speech><SpeechHere></Speech> " + prompt
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
gen_config = {
|
| 123 |
+
"max_new_tokens": self.config.generation.max_new_tokens,
|
| 124 |
+
"num_beams": self.config.generation.num_beams,
|
| 125 |
+
"do_sample": self.config.generation.do_sample,
|
| 126 |
+
"min_length": self.config.generation.min_length,
|
| 127 |
+
"temperature": self.config.generation.temperature,
|
| 128 |
+
"top_p": self.config.generation.top_p,
|
| 129 |
+
"repetition_penalty": self.config.generation.repetition_penalty,
|
| 130 |
+
"length_penalty": self.config.generation.length_penalty,
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
with torch.cuda.amp.autocast(dtype=torch.float16):
|
| 134 |
+
output = self.model.generate(
|
| 135 |
+
samples,
|
| 136 |
+
gen_config,
|
| 137 |
+
prompts=[prompt_formatted]
|
| 138 |
+
)[0]
|
| 139 |
+
|
| 140 |
+
# Clean output
|
| 141 |
+
return output.replace("<s>", "").replace("</s>", "").strip()
|
| 142 |
+
|
| 143 |
+
def transcribe(self, audio_path: str) -> str:
|
| 144 |
+
"""
|
| 145 |
+
Transcribe audio file to text.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
audio_path: Path to audio file (wav, mp3, etc.)
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
Transcribed text
|
| 152 |
+
"""
|
| 153 |
+
if not self._loaded:
|
| 154 |
+
self.load()
|
| 155 |
+
|
| 156 |
+
samples = self._prepare_audio(audio_path)
|
| 157 |
+
return self._generate(samples, "Transcribe the speech.")
|
| 158 |
+
|
| 159 |
+
def chat(self, audio_path: str, question: str) -> str:
|
| 160 |
+
"""
|
| 161 |
+
Ask a question about an audio file.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
audio_path: Path to audio file
|
| 165 |
+
question: Question about the audio content
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
Model response
|
| 169 |
+
"""
|
| 170 |
+
if not self._loaded:
|
| 171 |
+
self.load()
|
| 172 |
+
|
| 173 |
+
samples = self._prepare_audio(audio_path)
|
| 174 |
+
return self._generate(samples, question)
|
| 175 |
+
|
| 176 |
+
def describe(self, audio_path: str) -> str:
|
| 177 |
+
"""
|
| 178 |
+
Get a description of the audio content.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
audio_path: Path to audio file
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
Description of the audio
|
| 185 |
+
"""
|
| 186 |
+
if not self._loaded:
|
| 187 |
+
self.load()
|
| 188 |
+
|
| 189 |
+
samples = self._prepare_audio(audio_path)
|
| 190 |
+
return self._generate(samples, "Describe the audio content in detail.")
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
if __name__ == "__main__":
|
| 194 |
+
import sys
|
| 195 |
+
|
| 196 |
+
if len(sys.argv) < 2:
|
| 197 |
+
print("Usage: python inference.py <audio_file> [question]")
|
| 198 |
+
print(" If question is omitted, transcribes the audio.")
|
| 199 |
+
sys.exit(1)
|
| 200 |
+
|
| 201 |
+
audio_file = sys.argv[1]
|
| 202 |
+
question = sys.argv[2] if len(sys.argv) > 2 else None
|
| 203 |
+
|
| 204 |
+
model = SALMONNInference()
|
| 205 |
+
model.load()
|
| 206 |
+
|
| 207 |
+
if question:
|
| 208 |
+
print(f"Question: {question}")
|
| 209 |
+
result = model.chat(audio_file, question)
|
| 210 |
+
else:
|
| 211 |
+
print("Transcribing...")
|
| 212 |
+
result = model.transcribe(audio_file)
|
| 213 |
+
|
| 214 |
+
print(f"Result: {result}")
|
install.sh
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# SALMONN Installation Script
|
| 3 |
+
# Downloads all required models (~20GB total)
|
| 4 |
+
|
| 5 |
+
set -e
|
| 6 |
+
|
| 7 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 8 |
+
CHECKPOINTS_DIR="$SCRIPT_DIR/checkpoints"
|
| 9 |
+
|
| 10 |
+
echo "=============================================="
|
| 11 |
+
echo "SALMONN Model Installation"
|
| 12 |
+
echo "=============================================="
|
| 13 |
+
echo ""
|
| 14 |
+
echo "This will download approximately 20GB of model files."
|
| 15 |
+
echo "Make sure you have enough disk space and a stable internet connection."
|
| 16 |
+
echo ""
|
| 17 |
+
|
| 18 |
+
# Create checkpoints directory
|
| 19 |
+
mkdir -p "$CHECKPOINTS_DIR"
|
| 20 |
+
cd "$CHECKPOINTS_DIR"
|
| 21 |
+
|
| 22 |
+
# Check if git-lfs is installed
|
| 23 |
+
if ! command -v git-lfs &> /dev/null; then
|
| 24 |
+
echo "Installing git-lfs..."
|
| 25 |
+
if command -v apt-get &> /dev/null; then
|
| 26 |
+
sudo apt-get update && sudo apt-get install -y git-lfs
|
| 27 |
+
elif command -v yum &> /dev/null; then
|
| 28 |
+
sudo yum install -y git-lfs
|
| 29 |
+
elif command -v brew &> /dev/null; then
|
| 30 |
+
brew install git-lfs
|
| 31 |
+
else
|
| 32 |
+
echo "Please install git-lfs manually: https://git-lfs.github.com/"
|
| 33 |
+
exit 1
|
| 34 |
+
fi
|
| 35 |
+
fi
|
| 36 |
+
git lfs install
|
| 37 |
+
|
| 38 |
+
# 1. Download Vicuna 7B v1.5 (~13GB)
|
| 39 |
+
echo ""
|
| 40 |
+
echo "[1/4] Downloading Vicuna 7B v1.5 (~13GB)..."
|
| 41 |
+
if [ -d "vicuna-7b-v1.5" ] && [ -f "vicuna-7b-v1.5/pytorch_model-00001-of-00002.bin" ]; then
|
| 42 |
+
echo " -> Already exists, skipping..."
|
| 43 |
+
else
|
| 44 |
+
rm -rf vicuna-7b-v1.5
|
| 45 |
+
git clone https://huggingface.co/lmsys/vicuna-7b-v1.5
|
| 46 |
+
echo " -> Done!"
|
| 47 |
+
fi
|
| 48 |
+
|
| 49 |
+
# 2. Download Whisper Large v2 (~3GB)
|
| 50 |
+
echo ""
|
| 51 |
+
echo "[2/4] Downloading Whisper Large v2 (~3GB)..."
|
| 52 |
+
if [ -d "whisper-large-v2" ] && [ -f "whisper-large-v2/model.safetensors" ]; then
|
| 53 |
+
echo " -> Already exists, skipping..."
|
| 54 |
+
else
|
| 55 |
+
rm -rf whisper-large-v2
|
| 56 |
+
git clone https://huggingface.co/openai/whisper-large-v2
|
| 57 |
+
echo " -> Done!"
|
| 58 |
+
fi
|
| 59 |
+
|
| 60 |
+
# 3. Download BEATs (~350MB)
|
| 61 |
+
echo ""
|
| 62 |
+
echo "[3/4] Downloading BEATs checkpoint (~350MB)..."
|
| 63 |
+
if [ -f "BEATs_iter3_plus_AS2M.pt" ]; then
|
| 64 |
+
echo " -> Already exists, skipping..."
|
| 65 |
+
else
|
| 66 |
+
wget -q --show-progress https://valle.blob.core.windows.net/share/BEATs/BEATs_iter3_plus_AS2M.pt
|
| 67 |
+
echo " -> Done!"
|
| 68 |
+
fi
|
| 69 |
+
|
| 70 |
+
# 4. Download SALMONN checkpoint (~350MB)
|
| 71 |
+
echo ""
|
| 72 |
+
echo "[4/4] Downloading SALMONN 7B checkpoint (~350MB)..."
|
| 73 |
+
if [ -d "salmonn-7b" ] && [ -f "salmonn-7b/salmonn_7b_v0.pth" ]; then
|
| 74 |
+
echo " -> Already exists, skipping..."
|
| 75 |
+
else
|
| 76 |
+
rm -rf salmonn-7b
|
| 77 |
+
mkdir -p salmonn-7b
|
| 78 |
+
cd salmonn-7b
|
| 79 |
+
wget -q --show-progress https://huggingface.co/tsinghua-ee/SALMONN-7B/resolve/main/salmonn_7b_v0.pth
|
| 80 |
+
cd ..
|
| 81 |
+
echo " -> Done!"
|
| 82 |
+
fi
|
| 83 |
+
|
| 84 |
+
echo ""
|
| 85 |
+
echo "=============================================="
|
| 86 |
+
echo "Installation Complete!"
|
| 87 |
+
echo "=============================================="
|
| 88 |
+
echo ""
|
| 89 |
+
echo "Downloaded models:"
|
| 90 |
+
du -sh "$CHECKPOINTS_DIR"/*
|
| 91 |
+
echo ""
|
| 92 |
+
echo "To start the server, run:"
|
| 93 |
+
echo " python server.py"
|
| 94 |
+
echo ""
|
models/Qformer.py
ADDED
|
@@ -0,0 +1,1217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Adapted from salesforce@LAVIS. Below is the original copyright:
|
| 3 |
+
* Copyright (c) 2023, salesforce.com, inc.
|
| 4 |
+
* All rights reserved.
|
| 5 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 6 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 7 |
+
* By Junnan Li
|
| 8 |
+
* Based on huggingface code base
|
| 9 |
+
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import math
|
| 13 |
+
import os
|
| 14 |
+
import warnings
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from typing import Optional, Tuple, Dict, Any
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch import Tensor, device, dtype, nn
|
| 20 |
+
import torch.utils.checkpoint
|
| 21 |
+
from torch import nn
|
| 22 |
+
from torch.nn import CrossEntropyLoss
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
|
| 25 |
+
from transformers.activations import ACT2FN
|
| 26 |
+
from transformers.file_utils import (
|
| 27 |
+
ModelOutput,
|
| 28 |
+
)
|
| 29 |
+
from transformers.modeling_outputs import (
|
| 30 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 31 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
| 32 |
+
CausalLMOutputWithCrossAttentions,
|
| 33 |
+
MaskedLMOutput,
|
| 34 |
+
MultipleChoiceModelOutput,
|
| 35 |
+
NextSentencePredictorOutput,
|
| 36 |
+
QuestionAnsweringModelOutput,
|
| 37 |
+
SequenceClassifierOutput,
|
| 38 |
+
TokenClassifierOutput,
|
| 39 |
+
)
|
| 40 |
+
from transformers.modeling_utils import (
|
| 41 |
+
PreTrainedModel,
|
| 42 |
+
apply_chunking_to_forward,
|
| 43 |
+
find_pruneable_heads_and_indices,
|
| 44 |
+
prune_linear_layer,
|
| 45 |
+
)
|
| 46 |
+
from transformers.utils import logging
|
| 47 |
+
from transformers.models.bert.configuration_bert import BertConfig
|
| 48 |
+
|
| 49 |
+
logger = logging.get_logger(__name__)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class BertEmbeddings(nn.Module):
|
| 53 |
+
"""Construct the embeddings from word and position embeddings."""
|
| 54 |
+
|
| 55 |
+
def __init__(self, config):
|
| 56 |
+
super().__init__()
|
| 57 |
+
self.word_embeddings = nn.Embedding(
|
| 58 |
+
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
|
| 59 |
+
)
|
| 60 |
+
self.position_embeddings = nn.Embedding(
|
| 61 |
+
config.max_position_embeddings, config.hidden_size
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 65 |
+
# any TensorFlow checkpoint file
|
| 66 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 67 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 68 |
+
|
| 69 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 70 |
+
self.register_buffer(
|
| 71 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
|
| 72 |
+
)
|
| 73 |
+
self.position_embedding_type = getattr(
|
| 74 |
+
config, "position_embedding_type", "absolute"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
self.config = config
|
| 78 |
+
|
| 79 |
+
def forward(
|
| 80 |
+
self,
|
| 81 |
+
input_ids=None,
|
| 82 |
+
position_ids=None,
|
| 83 |
+
query_embeds=None,
|
| 84 |
+
past_key_values_length=0,
|
| 85 |
+
):
|
| 86 |
+
if input_ids is not None:
|
| 87 |
+
seq_length = input_ids.size()[1]
|
| 88 |
+
else:
|
| 89 |
+
seq_length = 0
|
| 90 |
+
|
| 91 |
+
if position_ids is None:
|
| 92 |
+
position_ids = self.position_ids[
|
| 93 |
+
:, past_key_values_length : seq_length + past_key_values_length
|
| 94 |
+
].clone()
|
| 95 |
+
|
| 96 |
+
if input_ids is not None:
|
| 97 |
+
embeddings = self.word_embeddings(input_ids)
|
| 98 |
+
if self.position_embedding_type == "absolute":
|
| 99 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 100 |
+
embeddings = embeddings + position_embeddings
|
| 101 |
+
|
| 102 |
+
if query_embeds is not None:
|
| 103 |
+
embeddings = torch.cat((query_embeds, embeddings), dim=1)
|
| 104 |
+
else:
|
| 105 |
+
embeddings = query_embeds
|
| 106 |
+
|
| 107 |
+
embeddings = self.LayerNorm(embeddings)
|
| 108 |
+
embeddings = self.dropout(embeddings)
|
| 109 |
+
return embeddings
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class BertSelfAttention(nn.Module):
|
| 113 |
+
def __init__(self, config, is_cross_attention):
|
| 114 |
+
super().__init__()
|
| 115 |
+
self.config = config
|
| 116 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
|
| 117 |
+
config, "embedding_size"
|
| 118 |
+
):
|
| 119 |
+
raise ValueError(
|
| 120 |
+
"The hidden size (%d) is not a multiple of the number of attention "
|
| 121 |
+
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
self.num_attention_heads = config.num_attention_heads
|
| 125 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 126 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 127 |
+
|
| 128 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
| 129 |
+
if is_cross_attention:
|
| 130 |
+
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
| 131 |
+
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
| 132 |
+
else:
|
| 133 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
| 134 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
| 135 |
+
|
| 136 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 137 |
+
self.position_embedding_type = getattr(
|
| 138 |
+
config, "position_embedding_type", "absolute"
|
| 139 |
+
)
|
| 140 |
+
if (
|
| 141 |
+
self.position_embedding_type == "relative_key"
|
| 142 |
+
or self.position_embedding_type == "relative_key_query"
|
| 143 |
+
):
|
| 144 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 145 |
+
self.distance_embedding = nn.Embedding(
|
| 146 |
+
2 * config.max_position_embeddings - 1, self.attention_head_size
|
| 147 |
+
)
|
| 148 |
+
self.save_attention = False
|
| 149 |
+
|
| 150 |
+
def save_attn_gradients(self, attn_gradients):
|
| 151 |
+
self.attn_gradients = attn_gradients
|
| 152 |
+
|
| 153 |
+
def get_attn_gradients(self):
|
| 154 |
+
return self.attn_gradients
|
| 155 |
+
|
| 156 |
+
def save_attention_map(self, attention_map):
|
| 157 |
+
self.attention_map = attention_map
|
| 158 |
+
|
| 159 |
+
def get_attention_map(self):
|
| 160 |
+
return self.attention_map
|
| 161 |
+
|
| 162 |
+
def transpose_for_scores(self, x):
|
| 163 |
+
new_x_shape = x.size()[:-1] + (
|
| 164 |
+
self.num_attention_heads,
|
| 165 |
+
self.attention_head_size,
|
| 166 |
+
)
|
| 167 |
+
x = x.view(*new_x_shape)
|
| 168 |
+
return x.permute(0, 2, 1, 3)
|
| 169 |
+
|
| 170 |
+
def forward(
|
| 171 |
+
self,
|
| 172 |
+
hidden_states,
|
| 173 |
+
attention_mask=None,
|
| 174 |
+
head_mask=None,
|
| 175 |
+
encoder_hidden_states=None,
|
| 176 |
+
encoder_attention_mask=None,
|
| 177 |
+
past_key_value=None,
|
| 178 |
+
output_attentions=False,
|
| 179 |
+
):
|
| 180 |
+
|
| 181 |
+
# If this is instantiated as a cross-attention module, the keys
|
| 182 |
+
# and values come from an encoder; the attention mask needs to be
|
| 183 |
+
# such that the encoder's padding tokens are not attended to.
|
| 184 |
+
is_cross_attention = encoder_hidden_states is not None
|
| 185 |
+
|
| 186 |
+
if is_cross_attention:
|
| 187 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
| 188 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
| 189 |
+
attention_mask = encoder_attention_mask
|
| 190 |
+
elif past_key_value is not None:
|
| 191 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 192 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 193 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
| 194 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
| 195 |
+
else:
|
| 196 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 197 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 198 |
+
|
| 199 |
+
mixed_query_layer = self.query(hidden_states)
|
| 200 |
+
|
| 201 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 202 |
+
|
| 203 |
+
past_key_value = (key_layer, value_layer)
|
| 204 |
+
|
| 205 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 206 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 207 |
+
|
| 208 |
+
if (
|
| 209 |
+
self.position_embedding_type == "relative_key"
|
| 210 |
+
or self.position_embedding_type == "relative_key_query"
|
| 211 |
+
):
|
| 212 |
+
seq_length = hidden_states.size()[1]
|
| 213 |
+
position_ids_l = torch.arange(
|
| 214 |
+
seq_length, dtype=torch.long, device=hidden_states.device
|
| 215 |
+
).view(-1, 1)
|
| 216 |
+
position_ids_r = torch.arange(
|
| 217 |
+
seq_length, dtype=torch.long, device=hidden_states.device
|
| 218 |
+
).view(1, -1)
|
| 219 |
+
distance = position_ids_l - position_ids_r
|
| 220 |
+
positional_embedding = self.distance_embedding(
|
| 221 |
+
distance + self.max_position_embeddings - 1
|
| 222 |
+
)
|
| 223 |
+
positional_embedding = positional_embedding.to(
|
| 224 |
+
dtype=query_layer.dtype
|
| 225 |
+
) # fp16 compatibility
|
| 226 |
+
|
| 227 |
+
if self.position_embedding_type == "relative_key":
|
| 228 |
+
relative_position_scores = torch.einsum(
|
| 229 |
+
"bhld,lrd->bhlr", query_layer, positional_embedding
|
| 230 |
+
)
|
| 231 |
+
attention_scores = attention_scores + relative_position_scores
|
| 232 |
+
elif self.position_embedding_type == "relative_key_query":
|
| 233 |
+
relative_position_scores_query = torch.einsum(
|
| 234 |
+
"bhld,lrd->bhlr", query_layer, positional_embedding
|
| 235 |
+
)
|
| 236 |
+
relative_position_scores_key = torch.einsum(
|
| 237 |
+
"bhrd,lrd->bhlr", key_layer, positional_embedding
|
| 238 |
+
)
|
| 239 |
+
attention_scores = (
|
| 240 |
+
attention_scores
|
| 241 |
+
+ relative_position_scores_query
|
| 242 |
+
+ relative_position_scores_key
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 246 |
+
if attention_mask is not None:
|
| 247 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 248 |
+
attention_scores = attention_scores + attention_mask
|
| 249 |
+
|
| 250 |
+
# Normalize the attention scores to probabilities.
|
| 251 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
| 252 |
+
|
| 253 |
+
if is_cross_attention and self.save_attention:
|
| 254 |
+
self.save_attention_map(attention_probs)
|
| 255 |
+
attention_probs.register_hook(self.save_attn_gradients)
|
| 256 |
+
|
| 257 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 258 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 259 |
+
attention_probs_dropped = self.dropout(attention_probs)
|
| 260 |
+
|
| 261 |
+
# Mask heads if we want to
|
| 262 |
+
if head_mask is not None:
|
| 263 |
+
attention_probs_dropped = attention_probs_dropped * head_mask
|
| 264 |
+
|
| 265 |
+
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
| 266 |
+
|
| 267 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 268 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 269 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 270 |
+
|
| 271 |
+
outputs = (
|
| 272 |
+
(context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
outputs = outputs + (past_key_value,)
|
| 276 |
+
return outputs
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class BertSelfOutput(nn.Module):
|
| 280 |
+
def __init__(self, config):
|
| 281 |
+
super().__init__()
|
| 282 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 283 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 284 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 285 |
+
|
| 286 |
+
def forward(self, hidden_states, input_tensor):
|
| 287 |
+
hidden_states = self.dense(hidden_states)
|
| 288 |
+
hidden_states = self.dropout(hidden_states)
|
| 289 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 290 |
+
return hidden_states
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class BertAttention(nn.Module):
|
| 294 |
+
def __init__(self, config, is_cross_attention=False):
|
| 295 |
+
super().__init__()
|
| 296 |
+
self.self = BertSelfAttention(config, is_cross_attention)
|
| 297 |
+
self.output = BertSelfOutput(config)
|
| 298 |
+
self.pruned_heads = set()
|
| 299 |
+
|
| 300 |
+
def prune_heads(self, heads):
|
| 301 |
+
if len(heads) == 0:
|
| 302 |
+
return
|
| 303 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 304 |
+
heads,
|
| 305 |
+
self.self.num_attention_heads,
|
| 306 |
+
self.self.attention_head_size,
|
| 307 |
+
self.pruned_heads,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
# Prune linear layers
|
| 311 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
| 312 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
| 313 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
| 314 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 315 |
+
|
| 316 |
+
# Update hyper params and store pruned heads
|
| 317 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
| 318 |
+
self.self.all_head_size = (
|
| 319 |
+
self.self.attention_head_size * self.self.num_attention_heads
|
| 320 |
+
)
|
| 321 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 322 |
+
|
| 323 |
+
def forward(
|
| 324 |
+
self,
|
| 325 |
+
hidden_states,
|
| 326 |
+
attention_mask=None,
|
| 327 |
+
head_mask=None,
|
| 328 |
+
encoder_hidden_states=None,
|
| 329 |
+
encoder_attention_mask=None,
|
| 330 |
+
past_key_value=None,
|
| 331 |
+
output_attentions=False,
|
| 332 |
+
):
|
| 333 |
+
self_outputs = self.self(
|
| 334 |
+
hidden_states,
|
| 335 |
+
attention_mask,
|
| 336 |
+
head_mask,
|
| 337 |
+
encoder_hidden_states,
|
| 338 |
+
encoder_attention_mask,
|
| 339 |
+
past_key_value,
|
| 340 |
+
output_attentions,
|
| 341 |
+
)
|
| 342 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 343 |
+
|
| 344 |
+
outputs = (attention_output,) + self_outputs[
|
| 345 |
+
1:
|
| 346 |
+
] # add attentions if we output them
|
| 347 |
+
return outputs
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
class BertIntermediate(nn.Module):
|
| 351 |
+
def __init__(self, config):
|
| 352 |
+
super().__init__()
|
| 353 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 354 |
+
if isinstance(config.hidden_act, str):
|
| 355 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 356 |
+
else:
|
| 357 |
+
self.intermediate_act_fn = config.hidden_act
|
| 358 |
+
|
| 359 |
+
def forward(self, hidden_states):
|
| 360 |
+
hidden_states = self.dense(hidden_states)
|
| 361 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 362 |
+
return hidden_states
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class BertOutput(nn.Module):
|
| 366 |
+
def __init__(self, config):
|
| 367 |
+
super().__init__()
|
| 368 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 369 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 370 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 371 |
+
|
| 372 |
+
def forward(self, hidden_states, input_tensor):
|
| 373 |
+
hidden_states = self.dense(hidden_states)
|
| 374 |
+
hidden_states = self.dropout(hidden_states)
|
| 375 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 376 |
+
return hidden_states
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
class BertLayer(nn.Module):
|
| 380 |
+
def __init__(self, config, layer_num):
|
| 381 |
+
super().__init__()
|
| 382 |
+
self.config = config
|
| 383 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 384 |
+
self.seq_len_dim = 1
|
| 385 |
+
self.attention = BertAttention(config)
|
| 386 |
+
self.layer_num = layer_num
|
| 387 |
+
if (
|
| 388 |
+
self.config.add_cross_attention
|
| 389 |
+
and layer_num % self.config.cross_attention_freq == 0
|
| 390 |
+
):
|
| 391 |
+
self.crossattention = BertAttention(
|
| 392 |
+
config, is_cross_attention=self.config.add_cross_attention
|
| 393 |
+
)
|
| 394 |
+
self.has_cross_attention = True
|
| 395 |
+
else:
|
| 396 |
+
self.has_cross_attention = False
|
| 397 |
+
self.intermediate = BertIntermediate(config)
|
| 398 |
+
self.output = BertOutput(config)
|
| 399 |
+
|
| 400 |
+
self.intermediate_query = BertIntermediate(config)
|
| 401 |
+
self.output_query = BertOutput(config)
|
| 402 |
+
|
| 403 |
+
def forward(
|
| 404 |
+
self,
|
| 405 |
+
hidden_states,
|
| 406 |
+
attention_mask=None,
|
| 407 |
+
head_mask=None,
|
| 408 |
+
encoder_hidden_states=None,
|
| 409 |
+
encoder_attention_mask=None,
|
| 410 |
+
past_key_value=None,
|
| 411 |
+
output_attentions=False,
|
| 412 |
+
query_length=0,
|
| 413 |
+
):
|
| 414 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
| 415 |
+
self_attn_past_key_value = (
|
| 416 |
+
past_key_value[:2] if past_key_value is not None else None
|
| 417 |
+
)
|
| 418 |
+
self_attention_outputs = self.attention(
|
| 419 |
+
hidden_states,
|
| 420 |
+
attention_mask,
|
| 421 |
+
head_mask,
|
| 422 |
+
output_attentions=output_attentions,
|
| 423 |
+
past_key_value=self_attn_past_key_value,
|
| 424 |
+
)
|
| 425 |
+
attention_output = self_attention_outputs[0]
|
| 426 |
+
outputs = self_attention_outputs[1:-1]
|
| 427 |
+
|
| 428 |
+
present_key_value = self_attention_outputs[-1]
|
| 429 |
+
|
| 430 |
+
if query_length > 0:
|
| 431 |
+
query_attention_output = attention_output[:, :query_length, :]
|
| 432 |
+
|
| 433 |
+
if self.has_cross_attention:
|
| 434 |
+
assert (
|
| 435 |
+
encoder_hidden_states is not None
|
| 436 |
+
), "encoder_hidden_states must be given for cross-attention layers"
|
| 437 |
+
cross_attention_outputs = self.crossattention(
|
| 438 |
+
query_attention_output,
|
| 439 |
+
attention_mask,
|
| 440 |
+
head_mask,
|
| 441 |
+
encoder_hidden_states,
|
| 442 |
+
encoder_attention_mask,
|
| 443 |
+
output_attentions=output_attentions,
|
| 444 |
+
)
|
| 445 |
+
query_attention_output = cross_attention_outputs[0]
|
| 446 |
+
outputs = (
|
| 447 |
+
outputs + cross_attention_outputs[1:-1]
|
| 448 |
+
) # add cross attentions if we output attention weights
|
| 449 |
+
|
| 450 |
+
layer_output = apply_chunking_to_forward(
|
| 451 |
+
self.feed_forward_chunk_query,
|
| 452 |
+
self.chunk_size_feed_forward,
|
| 453 |
+
self.seq_len_dim,
|
| 454 |
+
query_attention_output,
|
| 455 |
+
)
|
| 456 |
+
if attention_output.shape[1] > query_length:
|
| 457 |
+
layer_output_text = apply_chunking_to_forward(
|
| 458 |
+
self.feed_forward_chunk,
|
| 459 |
+
self.chunk_size_feed_forward,
|
| 460 |
+
self.seq_len_dim,
|
| 461 |
+
attention_output[:, query_length:, :],
|
| 462 |
+
)
|
| 463 |
+
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
|
| 464 |
+
else:
|
| 465 |
+
layer_output = apply_chunking_to_forward(
|
| 466 |
+
self.feed_forward_chunk,
|
| 467 |
+
self.chunk_size_feed_forward,
|
| 468 |
+
self.seq_len_dim,
|
| 469 |
+
attention_output,
|
| 470 |
+
)
|
| 471 |
+
outputs = (layer_output,) + outputs
|
| 472 |
+
|
| 473 |
+
outputs = outputs + (present_key_value,)
|
| 474 |
+
|
| 475 |
+
return outputs
|
| 476 |
+
|
| 477 |
+
def feed_forward_chunk(self, attention_output):
|
| 478 |
+
intermediate_output = self.intermediate(attention_output)
|
| 479 |
+
layer_output = self.output(intermediate_output, attention_output)
|
| 480 |
+
return layer_output
|
| 481 |
+
|
| 482 |
+
def feed_forward_chunk_query(self, attention_output):
|
| 483 |
+
intermediate_output = self.intermediate_query(attention_output)
|
| 484 |
+
layer_output = self.output_query(intermediate_output, attention_output)
|
| 485 |
+
return layer_output
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
class BertEncoder(nn.Module):
|
| 489 |
+
def __init__(self, config):
|
| 490 |
+
super().__init__()
|
| 491 |
+
self.config = config
|
| 492 |
+
self.layer = nn.ModuleList(
|
| 493 |
+
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
def forward(
|
| 497 |
+
self,
|
| 498 |
+
hidden_states,
|
| 499 |
+
attention_mask=None,
|
| 500 |
+
head_mask=None,
|
| 501 |
+
encoder_hidden_states=None,
|
| 502 |
+
encoder_attention_mask=None,
|
| 503 |
+
past_key_values=None,
|
| 504 |
+
use_cache=None,
|
| 505 |
+
output_attentions=False,
|
| 506 |
+
output_hidden_states=False,
|
| 507 |
+
return_dict=True,
|
| 508 |
+
query_length=0,
|
| 509 |
+
):
|
| 510 |
+
all_hidden_states = () if output_hidden_states else None
|
| 511 |
+
all_self_attentions = () if output_attentions else None
|
| 512 |
+
all_cross_attentions = (
|
| 513 |
+
() if output_attentions and self.config.add_cross_attention else None
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
next_decoder_cache = () if use_cache else None
|
| 517 |
+
|
| 518 |
+
for i in range(self.config.num_hidden_layers):
|
| 519 |
+
layer_module = self.layer[i]
|
| 520 |
+
if output_hidden_states:
|
| 521 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 522 |
+
|
| 523 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 524 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
| 525 |
+
|
| 526 |
+
if getattr(self.config, "gradient_checkpointing", False) and self.training:
|
| 527 |
+
|
| 528 |
+
if use_cache:
|
| 529 |
+
logger.warn(
|
| 530 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 531 |
+
)
|
| 532 |
+
use_cache = False
|
| 533 |
+
|
| 534 |
+
def create_custom_forward(module):
|
| 535 |
+
def custom_forward(*inputs):
|
| 536 |
+
return module(
|
| 537 |
+
*inputs, past_key_value, output_attentions, query_length
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
return custom_forward
|
| 541 |
+
|
| 542 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 543 |
+
create_custom_forward(layer_module),
|
| 544 |
+
hidden_states,
|
| 545 |
+
attention_mask,
|
| 546 |
+
layer_head_mask,
|
| 547 |
+
encoder_hidden_states,
|
| 548 |
+
encoder_attention_mask,
|
| 549 |
+
)
|
| 550 |
+
else:
|
| 551 |
+
layer_outputs = layer_module(
|
| 552 |
+
hidden_states,
|
| 553 |
+
attention_mask,
|
| 554 |
+
layer_head_mask,
|
| 555 |
+
encoder_hidden_states,
|
| 556 |
+
encoder_attention_mask,
|
| 557 |
+
past_key_value,
|
| 558 |
+
output_attentions,
|
| 559 |
+
query_length,
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
hidden_states = layer_outputs[0]
|
| 563 |
+
if use_cache:
|
| 564 |
+
next_decoder_cache += (layer_outputs[-1],)
|
| 565 |
+
if output_attentions:
|
| 566 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 567 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
| 568 |
+
|
| 569 |
+
if output_hidden_states:
|
| 570 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 571 |
+
|
| 572 |
+
if not return_dict:
|
| 573 |
+
return tuple(
|
| 574 |
+
v
|
| 575 |
+
for v in [
|
| 576 |
+
hidden_states,
|
| 577 |
+
next_decoder_cache,
|
| 578 |
+
all_hidden_states,
|
| 579 |
+
all_self_attentions,
|
| 580 |
+
all_cross_attentions,
|
| 581 |
+
]
|
| 582 |
+
if v is not None
|
| 583 |
+
)
|
| 584 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 585 |
+
last_hidden_state=hidden_states,
|
| 586 |
+
past_key_values=next_decoder_cache,
|
| 587 |
+
hidden_states=all_hidden_states,
|
| 588 |
+
attentions=all_self_attentions,
|
| 589 |
+
cross_attentions=all_cross_attentions,
|
| 590 |
+
)
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
class BertPooler(nn.Module):
|
| 594 |
+
def __init__(self, config):
|
| 595 |
+
super().__init__()
|
| 596 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 597 |
+
self.activation = nn.Tanh()
|
| 598 |
+
|
| 599 |
+
def forward(self, hidden_states):
|
| 600 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 601 |
+
# to the first token.
|
| 602 |
+
first_token_tensor = hidden_states[:, 0]
|
| 603 |
+
pooled_output = self.dense(first_token_tensor)
|
| 604 |
+
pooled_output = self.activation(pooled_output)
|
| 605 |
+
return pooled_output
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
class BertPredictionHeadTransform(nn.Module):
|
| 609 |
+
def __init__(self, config):
|
| 610 |
+
super().__init__()
|
| 611 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 612 |
+
if isinstance(config.hidden_act, str):
|
| 613 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 614 |
+
else:
|
| 615 |
+
self.transform_act_fn = config.hidden_act
|
| 616 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 617 |
+
|
| 618 |
+
def forward(self, hidden_states):
|
| 619 |
+
hidden_states = self.dense(hidden_states)
|
| 620 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 621 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 622 |
+
return hidden_states
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class BertLMPredictionHead(nn.Module):
|
| 626 |
+
def __init__(self, config):
|
| 627 |
+
super().__init__()
|
| 628 |
+
self.transform = BertPredictionHeadTransform(config)
|
| 629 |
+
|
| 630 |
+
# The output weights are the same as the input embeddings, but there is
|
| 631 |
+
# an output-only bias for each token.
|
| 632 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 633 |
+
|
| 634 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
| 635 |
+
|
| 636 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
| 637 |
+
self.decoder.bias = self.bias
|
| 638 |
+
|
| 639 |
+
def forward(self, hidden_states):
|
| 640 |
+
hidden_states = self.transform(hidden_states)
|
| 641 |
+
hidden_states = self.decoder(hidden_states)
|
| 642 |
+
return hidden_states
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
class BertOnlyMLMHead(nn.Module):
|
| 646 |
+
def __init__(self, config):
|
| 647 |
+
super().__init__()
|
| 648 |
+
self.predictions = BertLMPredictionHead(config)
|
| 649 |
+
|
| 650 |
+
def forward(self, sequence_output):
|
| 651 |
+
prediction_scores = self.predictions(sequence_output)
|
| 652 |
+
return prediction_scores
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
class BertPreTrainedModel(PreTrainedModel):
|
| 656 |
+
"""
|
| 657 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 658 |
+
models.
|
| 659 |
+
"""
|
| 660 |
+
|
| 661 |
+
config_class = BertConfig
|
| 662 |
+
base_model_prefix = "bert"
|
| 663 |
+
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
| 664 |
+
|
| 665 |
+
def _init_weights(self, module):
|
| 666 |
+
"""Initialize the weights"""
|
| 667 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
| 668 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 669 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 670 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 671 |
+
elif isinstance(module, nn.LayerNorm):
|
| 672 |
+
module.bias.data.zero_()
|
| 673 |
+
module.weight.data.fill_(1.0)
|
| 674 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
| 675 |
+
module.bias.data.zero_()
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
class BertModel(BertPreTrainedModel):
|
| 679 |
+
"""
|
| 680 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
| 681 |
+
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
| 682 |
+
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
| 683 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
| 684 |
+
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
| 685 |
+
input to the forward pass.
|
| 686 |
+
"""
|
| 687 |
+
|
| 688 |
+
def __init__(self, config, add_pooling_layer=False):
|
| 689 |
+
super().__init__(config)
|
| 690 |
+
self.config = config
|
| 691 |
+
|
| 692 |
+
self.embeddings = BertEmbeddings(config)
|
| 693 |
+
|
| 694 |
+
self.encoder = BertEncoder(config)
|
| 695 |
+
|
| 696 |
+
self.pooler = BertPooler(config) if add_pooling_layer else None
|
| 697 |
+
|
| 698 |
+
self.init_weights()
|
| 699 |
+
|
| 700 |
+
def get_input_embeddings(self):
|
| 701 |
+
return self.embeddings.word_embeddings
|
| 702 |
+
|
| 703 |
+
def set_input_embeddings(self, value):
|
| 704 |
+
self.embeddings.word_embeddings = value
|
| 705 |
+
|
| 706 |
+
def _prune_heads(self, heads_to_prune):
|
| 707 |
+
"""
|
| 708 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 709 |
+
class PreTrainedModel
|
| 710 |
+
"""
|
| 711 |
+
for layer, heads in heads_to_prune.items():
|
| 712 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 713 |
+
|
| 714 |
+
def get_extended_attention_mask(
|
| 715 |
+
self,
|
| 716 |
+
attention_mask: Tensor,
|
| 717 |
+
input_shape: Tuple[int],
|
| 718 |
+
device: device,
|
| 719 |
+
is_decoder: bool,
|
| 720 |
+
has_query: bool = False,
|
| 721 |
+
) -> Tensor:
|
| 722 |
+
"""
|
| 723 |
+
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
| 724 |
+
|
| 725 |
+
Arguments:
|
| 726 |
+
attention_mask (:obj:`torch.Tensor`):
|
| 727 |
+
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
| 728 |
+
input_shape (:obj:`Tuple[int]`):
|
| 729 |
+
The shape of the input to the model.
|
| 730 |
+
device: (:obj:`torch.device`):
|
| 731 |
+
The device of the input to the model.
|
| 732 |
+
|
| 733 |
+
Returns:
|
| 734 |
+
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
| 735 |
+
"""
|
| 736 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 737 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 738 |
+
if attention_mask.dim() == 3:
|
| 739 |
+
extended_attention_mask = attention_mask[:, None, :, :]
|
| 740 |
+
elif attention_mask.dim() == 2:
|
| 741 |
+
# Provided a padding mask of dimensions [batch_size, seq_length]
|
| 742 |
+
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
| 743 |
+
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 744 |
+
if is_decoder:
|
| 745 |
+
batch_size, seq_length = input_shape
|
| 746 |
+
|
| 747 |
+
seq_ids = torch.arange(seq_length, device=device)
|
| 748 |
+
causal_mask = (
|
| 749 |
+
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
|
| 750 |
+
<= seq_ids[None, :, None]
|
| 751 |
+
)
|
| 752 |
+
|
| 753 |
+
# add a prefix ones mask to the causal mask
|
| 754 |
+
# causal and attention masks must have same type with pytorch version < 1.3
|
| 755 |
+
causal_mask = causal_mask.to(attention_mask.dtype)
|
| 756 |
+
|
| 757 |
+
if causal_mask.shape[1] < attention_mask.shape[1]:
|
| 758 |
+
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
| 759 |
+
if has_query: # UniLM style attention mask
|
| 760 |
+
causal_mask = torch.cat(
|
| 761 |
+
[
|
| 762 |
+
torch.zeros(
|
| 763 |
+
(batch_size, prefix_seq_len, seq_length),
|
| 764 |
+
device=device,
|
| 765 |
+
dtype=causal_mask.dtype,
|
| 766 |
+
),
|
| 767 |
+
causal_mask,
|
| 768 |
+
],
|
| 769 |
+
axis=1,
|
| 770 |
+
)
|
| 771 |
+
causal_mask = torch.cat(
|
| 772 |
+
[
|
| 773 |
+
torch.ones(
|
| 774 |
+
(batch_size, causal_mask.shape[1], prefix_seq_len),
|
| 775 |
+
device=device,
|
| 776 |
+
dtype=causal_mask.dtype,
|
| 777 |
+
),
|
| 778 |
+
causal_mask,
|
| 779 |
+
],
|
| 780 |
+
axis=-1,
|
| 781 |
+
)
|
| 782 |
+
extended_attention_mask = (
|
| 783 |
+
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
| 784 |
+
)
|
| 785 |
+
else:
|
| 786 |
+
extended_attention_mask = attention_mask[:, None, None, :]
|
| 787 |
+
else:
|
| 788 |
+
raise ValueError(
|
| 789 |
+
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
| 790 |
+
input_shape, attention_mask.shape
|
| 791 |
+
)
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 795 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 796 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 797 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 798 |
+
# effectively the same as removing these entirely.
|
| 799 |
+
extended_attention_mask = extended_attention_mask.to(
|
| 800 |
+
dtype=self.dtype
|
| 801 |
+
) # fp16 compatibility
|
| 802 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
| 803 |
+
return extended_attention_mask
|
| 804 |
+
|
| 805 |
+
def forward(
|
| 806 |
+
self,
|
| 807 |
+
input_ids=None,
|
| 808 |
+
attention_mask=None,
|
| 809 |
+
position_ids=None,
|
| 810 |
+
head_mask=None,
|
| 811 |
+
query_embeds=None,
|
| 812 |
+
encoder_hidden_states=None,
|
| 813 |
+
encoder_attention_mask=None,
|
| 814 |
+
past_key_values=None,
|
| 815 |
+
use_cache=None,
|
| 816 |
+
output_attentions=None,
|
| 817 |
+
output_hidden_states=None,
|
| 818 |
+
return_dict=None,
|
| 819 |
+
is_decoder=False,
|
| 820 |
+
):
|
| 821 |
+
r"""
|
| 822 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
| 823 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
| 824 |
+
the model is configured as a decoder.
|
| 825 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 826 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
| 827 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
| 828 |
+
- 1 for tokens that are **not masked**,
|
| 829 |
+
- 0 for tokens that are **masked**.
|
| 830 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
| 831 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
| 832 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
| 833 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
| 834 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
| 835 |
+
use_cache (:obj:`bool`, `optional`):
|
| 836 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
| 837 |
+
decoding (see :obj:`past_key_values`).
|
| 838 |
+
"""
|
| 839 |
+
output_attentions = (
|
| 840 |
+
output_attentions
|
| 841 |
+
if output_attentions is not None
|
| 842 |
+
else self.config.output_attentions
|
| 843 |
+
)
|
| 844 |
+
output_hidden_states = (
|
| 845 |
+
output_hidden_states
|
| 846 |
+
if output_hidden_states is not None
|
| 847 |
+
else self.config.output_hidden_states
|
| 848 |
+
)
|
| 849 |
+
return_dict = (
|
| 850 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
# use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 854 |
+
|
| 855 |
+
if input_ids is None:
|
| 856 |
+
assert (
|
| 857 |
+
query_embeds is not None
|
| 858 |
+
), "You have to specify query_embeds when input_ids is None"
|
| 859 |
+
|
| 860 |
+
# past_key_values_length
|
| 861 |
+
past_key_values_length = (
|
| 862 |
+
past_key_values[0][0].shape[2] - self.config.query_length
|
| 863 |
+
if past_key_values is not None
|
| 864 |
+
else 0
|
| 865 |
+
)
|
| 866 |
+
|
| 867 |
+
query_length = query_embeds.shape[1] if query_embeds is not None else 0
|
| 868 |
+
|
| 869 |
+
embedding_output = self.embeddings(
|
| 870 |
+
input_ids=input_ids,
|
| 871 |
+
position_ids=position_ids,
|
| 872 |
+
query_embeds=query_embeds,
|
| 873 |
+
past_key_values_length=past_key_values_length,
|
| 874 |
+
)
|
| 875 |
+
|
| 876 |
+
input_shape = embedding_output.size()[:-1]
|
| 877 |
+
batch_size, seq_length = input_shape
|
| 878 |
+
device = embedding_output.device
|
| 879 |
+
|
| 880 |
+
if attention_mask is None:
|
| 881 |
+
attention_mask = torch.ones(
|
| 882 |
+
((batch_size, seq_length + past_key_values_length)), device=device
|
| 883 |
+
)
|
| 884 |
+
|
| 885 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 886 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 887 |
+
if is_decoder:
|
| 888 |
+
extended_attention_mask = self.get_extended_attention_mask(
|
| 889 |
+
attention_mask,
|
| 890 |
+
input_ids.shape,
|
| 891 |
+
device,
|
| 892 |
+
is_decoder,
|
| 893 |
+
has_query=(query_embeds is not None),
|
| 894 |
+
)
|
| 895 |
+
else:
|
| 896 |
+
extended_attention_mask = self.get_extended_attention_mask(
|
| 897 |
+
attention_mask, input_shape, device, is_decoder
|
| 898 |
+
)
|
| 899 |
+
|
| 900 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
| 901 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 902 |
+
if encoder_hidden_states is not None:
|
| 903 |
+
if type(encoder_hidden_states) == list:
|
| 904 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
|
| 905 |
+
0
|
| 906 |
+
].size()
|
| 907 |
+
else:
|
| 908 |
+
(
|
| 909 |
+
encoder_batch_size,
|
| 910 |
+
encoder_sequence_length,
|
| 911 |
+
_,
|
| 912 |
+
) = encoder_hidden_states.size()
|
| 913 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
| 914 |
+
|
| 915 |
+
if type(encoder_attention_mask) == list:
|
| 916 |
+
encoder_extended_attention_mask = [
|
| 917 |
+
self.invert_attention_mask(mask) for mask in encoder_attention_mask
|
| 918 |
+
]
|
| 919 |
+
elif encoder_attention_mask is None:
|
| 920 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
| 921 |
+
encoder_extended_attention_mask = self.invert_attention_mask(
|
| 922 |
+
encoder_attention_mask
|
| 923 |
+
)
|
| 924 |
+
else:
|
| 925 |
+
encoder_extended_attention_mask = self.invert_attention_mask(
|
| 926 |
+
encoder_attention_mask
|
| 927 |
+
)
|
| 928 |
+
else:
|
| 929 |
+
encoder_extended_attention_mask = None
|
| 930 |
+
|
| 931 |
+
# Prepare head mask if needed
|
| 932 |
+
# 1.0 in head_mask indicate we keep the head
|
| 933 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 934 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 935 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 936 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 937 |
+
|
| 938 |
+
encoder_outputs = self.encoder(
|
| 939 |
+
embedding_output,
|
| 940 |
+
attention_mask=extended_attention_mask,
|
| 941 |
+
head_mask=head_mask,
|
| 942 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 943 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
| 944 |
+
past_key_values=past_key_values,
|
| 945 |
+
use_cache=use_cache,
|
| 946 |
+
output_attentions=output_attentions,
|
| 947 |
+
output_hidden_states=output_hidden_states,
|
| 948 |
+
return_dict=return_dict,
|
| 949 |
+
query_length=query_length,
|
| 950 |
+
)
|
| 951 |
+
sequence_output = encoder_outputs[0]
|
| 952 |
+
pooled_output = (
|
| 953 |
+
self.pooler(sequence_output) if self.pooler is not None else None
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
if not return_dict:
|
| 957 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 958 |
+
|
| 959 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
| 960 |
+
last_hidden_state=sequence_output,
|
| 961 |
+
pooler_output=pooled_output,
|
| 962 |
+
past_key_values=encoder_outputs.past_key_values,
|
| 963 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 964 |
+
attentions=encoder_outputs.attentions,
|
| 965 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
| 966 |
+
)
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
class BertLMHeadModel(BertPreTrainedModel):
|
| 970 |
+
|
| 971 |
+
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
| 972 |
+
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
| 973 |
+
|
| 974 |
+
def __init__(self, config):
|
| 975 |
+
super().__init__(config)
|
| 976 |
+
|
| 977 |
+
self.bert = BertModel(config, add_pooling_layer=False)
|
| 978 |
+
self.cls = BertOnlyMLMHead(config)
|
| 979 |
+
|
| 980 |
+
self.init_weights()
|
| 981 |
+
|
| 982 |
+
def get_output_embeddings(self):
|
| 983 |
+
return self.cls.predictions.decoder
|
| 984 |
+
|
| 985 |
+
def set_output_embeddings(self, new_embeddings):
|
| 986 |
+
self.cls.predictions.decoder = new_embeddings
|
| 987 |
+
|
| 988 |
+
def forward(
|
| 989 |
+
self,
|
| 990 |
+
input_ids=None,
|
| 991 |
+
attention_mask=None,
|
| 992 |
+
position_ids=None,
|
| 993 |
+
head_mask=None,
|
| 994 |
+
query_embeds=None,
|
| 995 |
+
encoder_hidden_states=None,
|
| 996 |
+
encoder_attention_mask=None,
|
| 997 |
+
labels=None,
|
| 998 |
+
past_key_values=None,
|
| 999 |
+
use_cache=True,
|
| 1000 |
+
output_attentions=None,
|
| 1001 |
+
output_hidden_states=None,
|
| 1002 |
+
return_dict=None,
|
| 1003 |
+
return_logits=False,
|
| 1004 |
+
is_decoder=True,
|
| 1005 |
+
reduction="mean",
|
| 1006 |
+
):
|
| 1007 |
+
r"""
|
| 1008 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
| 1009 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
| 1010 |
+
the model is configured as a decoder.
|
| 1011 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 1012 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
| 1013 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
| 1014 |
+
- 1 for tokens that are **not masked**,
|
| 1015 |
+
- 0 for tokens that are **masked**.
|
| 1016 |
+
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 1017 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
| 1018 |
+
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
|
| 1019 |
+
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
|
| 1020 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
| 1021 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
| 1022 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
| 1023 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
| 1024 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
| 1025 |
+
use_cache (:obj:`bool`, `optional`):
|
| 1026 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
| 1027 |
+
decoding (see :obj:`past_key_values`).
|
| 1028 |
+
Returns:
|
| 1029 |
+
Example::
|
| 1030 |
+
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
|
| 1031 |
+
>>> import torch
|
| 1032 |
+
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
| 1033 |
+
>>> config = BertConfig.from_pretrained("bert-base-cased")
|
| 1034 |
+
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
|
| 1035 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
| 1036 |
+
>>> outputs = model(**inputs)
|
| 1037 |
+
>>> prediction_logits = outputs.logits
|
| 1038 |
+
"""
|
| 1039 |
+
return_dict = (
|
| 1040 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 1041 |
+
)
|
| 1042 |
+
if labels is not None:
|
| 1043 |
+
use_cache = False
|
| 1044 |
+
if past_key_values is not None:
|
| 1045 |
+
query_embeds = None
|
| 1046 |
+
|
| 1047 |
+
outputs = self.bert(
|
| 1048 |
+
input_ids,
|
| 1049 |
+
attention_mask=attention_mask,
|
| 1050 |
+
position_ids=position_ids,
|
| 1051 |
+
head_mask=head_mask,
|
| 1052 |
+
query_embeds=query_embeds,
|
| 1053 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 1054 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 1055 |
+
past_key_values=past_key_values,
|
| 1056 |
+
use_cache=use_cache,
|
| 1057 |
+
output_attentions=output_attentions,
|
| 1058 |
+
output_hidden_states=output_hidden_states,
|
| 1059 |
+
return_dict=return_dict,
|
| 1060 |
+
is_decoder=is_decoder,
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
sequence_output = outputs[0]
|
| 1064 |
+
if query_embeds is not None:
|
| 1065 |
+
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
|
| 1066 |
+
|
| 1067 |
+
prediction_scores = self.cls(sequence_output)
|
| 1068 |
+
|
| 1069 |
+
if return_logits:
|
| 1070 |
+
return prediction_scores[:, :-1, :].contiguous()
|
| 1071 |
+
|
| 1072 |
+
lm_loss = None
|
| 1073 |
+
if labels is not None:
|
| 1074 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
| 1075 |
+
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
| 1076 |
+
labels = labels[:, 1:].contiguous()
|
| 1077 |
+
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
|
| 1078 |
+
lm_loss = loss_fct(
|
| 1079 |
+
shifted_prediction_scores.view(-1, self.config.vocab_size),
|
| 1080 |
+
labels.view(-1),
|
| 1081 |
+
)
|
| 1082 |
+
if reduction == "none":
|
| 1083 |
+
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
|
| 1084 |
+
|
| 1085 |
+
if not return_dict:
|
| 1086 |
+
output = (prediction_scores,) + outputs[2:]
|
| 1087 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
| 1088 |
+
|
| 1089 |
+
return CausalLMOutputWithCrossAttentions(
|
| 1090 |
+
loss=lm_loss,
|
| 1091 |
+
logits=prediction_scores,
|
| 1092 |
+
past_key_values=outputs.past_key_values,
|
| 1093 |
+
hidden_states=outputs.hidden_states,
|
| 1094 |
+
attentions=outputs.attentions,
|
| 1095 |
+
cross_attentions=outputs.cross_attentions,
|
| 1096 |
+
)
|
| 1097 |
+
|
| 1098 |
+
def prepare_inputs_for_generation(
|
| 1099 |
+
self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
|
| 1100 |
+
):
|
| 1101 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
| 1102 |
+
if attention_mask is None:
|
| 1103 |
+
attention_mask = input_ids.new_ones(input_ids.shape)
|
| 1104 |
+
query_mask = input_ids.new_ones(query_embeds.shape[:-1])
|
| 1105 |
+
attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
|
| 1106 |
+
|
| 1107 |
+
# cut decoder_input_ids if past is used
|
| 1108 |
+
if past is not None:
|
| 1109 |
+
input_ids = input_ids[:, -1:]
|
| 1110 |
+
|
| 1111 |
+
return {
|
| 1112 |
+
"input_ids": input_ids,
|
| 1113 |
+
"query_embeds": query_embeds,
|
| 1114 |
+
"attention_mask": attention_mask,
|
| 1115 |
+
"past_key_values": past,
|
| 1116 |
+
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
|
| 1117 |
+
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
|
| 1118 |
+
"is_decoder": True,
|
| 1119 |
+
}
|
| 1120 |
+
|
| 1121 |
+
def _reorder_cache(self, past, beam_idx):
|
| 1122 |
+
reordered_past = ()
|
| 1123 |
+
for layer_past in past:
|
| 1124 |
+
reordered_past += (
|
| 1125 |
+
tuple(
|
| 1126 |
+
past_state.index_select(0, beam_idx) for past_state in layer_past
|
| 1127 |
+
),
|
| 1128 |
+
)
|
| 1129 |
+
return reordered_past
|
| 1130 |
+
|
| 1131 |
+
|
| 1132 |
+
class BertForMaskedLM(BertPreTrainedModel):
|
| 1133 |
+
|
| 1134 |
+
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
| 1135 |
+
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
| 1136 |
+
|
| 1137 |
+
def __init__(self, config):
|
| 1138 |
+
super().__init__(config)
|
| 1139 |
+
|
| 1140 |
+
self.bert = BertModel(config, add_pooling_layer=False)
|
| 1141 |
+
self.cls = BertOnlyMLMHead(config)
|
| 1142 |
+
|
| 1143 |
+
self.init_weights()
|
| 1144 |
+
|
| 1145 |
+
def get_output_embeddings(self):
|
| 1146 |
+
return self.cls.predictions.decoder
|
| 1147 |
+
|
| 1148 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1149 |
+
self.cls.predictions.decoder = new_embeddings
|
| 1150 |
+
|
| 1151 |
+
def forward(
|
| 1152 |
+
self,
|
| 1153 |
+
input_ids=None,
|
| 1154 |
+
attention_mask=None,
|
| 1155 |
+
position_ids=None,
|
| 1156 |
+
head_mask=None,
|
| 1157 |
+
query_embeds=None,
|
| 1158 |
+
encoder_hidden_states=None,
|
| 1159 |
+
encoder_attention_mask=None,
|
| 1160 |
+
labels=None,
|
| 1161 |
+
output_attentions=None,
|
| 1162 |
+
output_hidden_states=None,
|
| 1163 |
+
return_dict=None,
|
| 1164 |
+
return_logits=False,
|
| 1165 |
+
is_decoder=False,
|
| 1166 |
+
):
|
| 1167 |
+
r"""
|
| 1168 |
+
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 1169 |
+
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
|
| 1170 |
+
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
|
| 1171 |
+
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
|
| 1172 |
+
"""
|
| 1173 |
+
|
| 1174 |
+
return_dict = (
|
| 1175 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 1176 |
+
)
|
| 1177 |
+
|
| 1178 |
+
outputs = self.bert(
|
| 1179 |
+
input_ids,
|
| 1180 |
+
attention_mask=attention_mask,
|
| 1181 |
+
position_ids=position_ids,
|
| 1182 |
+
head_mask=head_mask,
|
| 1183 |
+
query_embeds=query_embeds,
|
| 1184 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 1185 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 1186 |
+
output_attentions=output_attentions,
|
| 1187 |
+
output_hidden_states=output_hidden_states,
|
| 1188 |
+
return_dict=return_dict,
|
| 1189 |
+
is_decoder=is_decoder,
|
| 1190 |
+
)
|
| 1191 |
+
|
| 1192 |
+
if query_embeds is not None:
|
| 1193 |
+
sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
|
| 1194 |
+
prediction_scores = self.cls(sequence_output)
|
| 1195 |
+
|
| 1196 |
+
if return_logits:
|
| 1197 |
+
return prediction_scores
|
| 1198 |
+
|
| 1199 |
+
masked_lm_loss = None
|
| 1200 |
+
if labels is not None:
|
| 1201 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
| 1202 |
+
masked_lm_loss = loss_fct(
|
| 1203 |
+
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
|
| 1204 |
+
)
|
| 1205 |
+
|
| 1206 |
+
if not return_dict:
|
| 1207 |
+
output = (prediction_scores,) + outputs[2:]
|
| 1208 |
+
return (
|
| 1209 |
+
((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
| 1210 |
+
)
|
| 1211 |
+
|
| 1212 |
+
return MaskedLMOutput(
|
| 1213 |
+
loss=masked_lm_loss,
|
| 1214 |
+
logits=prediction_scores,
|
| 1215 |
+
hidden_states=outputs.hidden_states,
|
| 1216 |
+
attentions=outputs.attentions,
|
| 1217 |
+
)
|
models/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Tsinghua University, Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from .salmonn import SALMONN
|
| 16 |
+
|
| 17 |
+
def load_model(config):
|
| 18 |
+
return SALMONN.from_config(config)
|
models/beats/BEATs.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
|
| 3 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beats
|
| 4 |
+
# Copyright (c) 2022 Microsoft
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# Based on fairseq code bases
|
| 7 |
+
# https://github.com/pytorch/fairseq
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
from torch.nn import LayerNorm
|
| 14 |
+
import torchaudio.compliance.kaldi as ta_kaldi
|
| 15 |
+
|
| 16 |
+
from .backbone import (
|
| 17 |
+
TransformerEncoder,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
import logging
|
| 21 |
+
from typing import Optional
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BEATsConfig:
|
| 27 |
+
def __init__(self, cfg=None):
|
| 28 |
+
self.input_patch_size: int = -1 # path size of patch embedding
|
| 29 |
+
self.embed_dim: int = 512 # patch embedding dimension
|
| 30 |
+
self.conv_bias: bool = False # include bias in conv encoder
|
| 31 |
+
|
| 32 |
+
self.encoder_layers: int = 12 # num encoder layers in the transformer
|
| 33 |
+
self.encoder_embed_dim: int = 768 # encoder embedding dimension
|
| 34 |
+
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
|
| 35 |
+
self.encoder_attention_heads: int = 12 # num encoder attention heads
|
| 36 |
+
self.activation_fn: str = "gelu" # activation function to use
|
| 37 |
+
|
| 38 |
+
self.layer_wise_gradient_decay_ratio: float = 1.0 # ratio for layer-wise gradient decay
|
| 39 |
+
self.layer_norm_first: bool = False # apply layernorm first in the transformer
|
| 40 |
+
self.deep_norm: bool = False # apply deep_norm first in the transformer
|
| 41 |
+
|
| 42 |
+
# dropouts
|
| 43 |
+
self.dropout: float = 0.1 # dropout probability for the transformer
|
| 44 |
+
self.attention_dropout: float = 0.1 # dropout probability for attention weights
|
| 45 |
+
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
|
| 46 |
+
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
|
| 47 |
+
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
|
| 48 |
+
|
| 49 |
+
# positional embeddings
|
| 50 |
+
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
|
| 51 |
+
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
|
| 52 |
+
|
| 53 |
+
# relative position embedding
|
| 54 |
+
self.relative_position_embedding: bool = False # apply relative position embedding
|
| 55 |
+
self.num_buckets: int = 320 # number of buckets for relative position embedding
|
| 56 |
+
self.max_distance: int = 1280 # maximum distance for relative position embedding
|
| 57 |
+
self.gru_rel_pos: bool = False # apply gated relative position embedding
|
| 58 |
+
|
| 59 |
+
# label predictor
|
| 60 |
+
self.finetuned_model: bool = False # whether the model is a fine-tuned model.
|
| 61 |
+
self.predictor_dropout: float = 0.1 # dropout probability for the predictor
|
| 62 |
+
self.predictor_class: int = 527 # target class number for the predictor
|
| 63 |
+
|
| 64 |
+
if cfg is not None:
|
| 65 |
+
self.update(cfg)
|
| 66 |
+
|
| 67 |
+
def update(self, cfg: dict):
|
| 68 |
+
self.__dict__.update(cfg)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class BEATs(nn.Module):
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
cfg: BEATsConfig,
|
| 75 |
+
) -> None:
|
| 76 |
+
super().__init__()
|
| 77 |
+
logger.info(f"BEATs Config: {cfg.__dict__}")
|
| 78 |
+
|
| 79 |
+
self.cfg = cfg
|
| 80 |
+
|
| 81 |
+
self.embed = cfg.embed_dim
|
| 82 |
+
self.post_extract_proj = (
|
| 83 |
+
nn.Linear(self.embed, cfg.encoder_embed_dim)
|
| 84 |
+
if self.embed != cfg.encoder_embed_dim
|
| 85 |
+
else None
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
self.input_patch_size = cfg.input_patch_size
|
| 89 |
+
self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size,
|
| 90 |
+
bias=cfg.conv_bias)
|
| 91 |
+
|
| 92 |
+
self.dropout_input = nn.Dropout(cfg.dropout_input)
|
| 93 |
+
|
| 94 |
+
assert not cfg.deep_norm or not cfg.layer_norm_first
|
| 95 |
+
self.encoder = TransformerEncoder(cfg)
|
| 96 |
+
self.layer_norm = LayerNorm(self.embed)
|
| 97 |
+
|
| 98 |
+
if cfg.finetuned_model:
|
| 99 |
+
self.predictor_dropout = nn.Dropout(cfg.predictor_dropout)
|
| 100 |
+
self.predictor = nn.Linear(cfg.encoder_embed_dim, cfg.predictor_class)
|
| 101 |
+
else:
|
| 102 |
+
self.predictor = None
|
| 103 |
+
|
| 104 |
+
def forward_padding_mask(
|
| 105 |
+
self,
|
| 106 |
+
features: torch.Tensor,
|
| 107 |
+
padding_mask: torch.Tensor,
|
| 108 |
+
) -> torch.Tensor:
|
| 109 |
+
extra = padding_mask.size(1) % features.size(1)
|
| 110 |
+
if extra > 0:
|
| 111 |
+
padding_mask = padding_mask[:, :-extra]
|
| 112 |
+
padding_mask = padding_mask.view(
|
| 113 |
+
padding_mask.size(0), features.size(1), -1
|
| 114 |
+
)
|
| 115 |
+
padding_mask = padding_mask.all(-1)
|
| 116 |
+
return padding_mask
|
| 117 |
+
|
| 118 |
+
def preprocess(
|
| 119 |
+
self,
|
| 120 |
+
source: torch.Tensor,
|
| 121 |
+
fbank_mean: float = 15.41663,
|
| 122 |
+
fbank_std: float = 6.55582,
|
| 123 |
+
) -> torch.Tensor:
|
| 124 |
+
fbanks = []
|
| 125 |
+
for waveform in source:
|
| 126 |
+
waveform = waveform.unsqueeze(0) * 2 ** 15
|
| 127 |
+
fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
|
| 128 |
+
fbanks.append(fbank)
|
| 129 |
+
fbank = torch.stack(fbanks, dim=0)
|
| 130 |
+
fbank = (fbank - fbank_mean) / (2 * fbank_std)
|
| 131 |
+
return fbank
|
| 132 |
+
|
| 133 |
+
def extract_features(
|
| 134 |
+
self,
|
| 135 |
+
source: torch.Tensor,
|
| 136 |
+
padding_mask: Optional[torch.Tensor] = None,
|
| 137 |
+
fbank_mean: float = 15.41663,
|
| 138 |
+
fbank_std: float = 6.55582,
|
| 139 |
+
feature_only=False,
|
| 140 |
+
):
|
| 141 |
+
fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std).to(torch.float32)
|
| 142 |
+
|
| 143 |
+
if padding_mask is not None:
|
| 144 |
+
padding_mask = self.forward_padding_mask(fbank, padding_mask)
|
| 145 |
+
|
| 146 |
+
fbank = fbank.unsqueeze(1)
|
| 147 |
+
features = self.patch_embedding(fbank)
|
| 148 |
+
features = features.reshape(features.shape[0], features.shape[1], -1)
|
| 149 |
+
features = features.transpose(1, 2)
|
| 150 |
+
features = self.layer_norm(features)
|
| 151 |
+
|
| 152 |
+
if padding_mask is not None:
|
| 153 |
+
padding_mask = self.forward_padding_mask(features, padding_mask)
|
| 154 |
+
|
| 155 |
+
if self.post_extract_proj is not None:
|
| 156 |
+
features = self.post_extract_proj(features)
|
| 157 |
+
|
| 158 |
+
x = self.dropout_input(features)
|
| 159 |
+
|
| 160 |
+
x, layer_results = self.encoder(
|
| 161 |
+
x,
|
| 162 |
+
padding_mask=padding_mask,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
if not feature_only and self.predictor is not None:
|
| 166 |
+
x = self.predictor_dropout(x)
|
| 167 |
+
logits = self.predictor(x)
|
| 168 |
+
|
| 169 |
+
if padding_mask is not None and padding_mask.any():
|
| 170 |
+
logits[padding_mask] = 0
|
| 171 |
+
logits = logits.sum(dim=1)
|
| 172 |
+
logits = logits / (~padding_mask).sum(dim=1).unsqueeze(-1).expand_as(logits)
|
| 173 |
+
else:
|
| 174 |
+
logits = logits.mean(dim=1)
|
| 175 |
+
|
| 176 |
+
lprobs = torch.sigmoid(logits)
|
| 177 |
+
|
| 178 |
+
return lprobs, padding_mask
|
| 179 |
+
else:
|
| 180 |
+
return x, padding_mask
|
models/beats/Tokenizers.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
|
| 3 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beats
|
| 4 |
+
# Copyright (c) 2022 Microsoft
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# Based on fairseq code bases
|
| 7 |
+
# https://github.com/pytorch/fairseq
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
from torch.nn import LayerNorm
|
| 14 |
+
import torchaudio.compliance.kaldi as ta_kaldi
|
| 15 |
+
|
| 16 |
+
from .backbone import (
|
| 17 |
+
TransformerEncoder,
|
| 18 |
+
)
|
| 19 |
+
from .quantizer import (
|
| 20 |
+
NormEMAVectorQuantizer,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
import logging
|
| 24 |
+
from typing import Optional
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class TokenizersConfig:
|
| 30 |
+
def __init__(self, cfg=None):
|
| 31 |
+
self.input_patch_size: int = -1 # path size of patch embedding
|
| 32 |
+
self.embed_dim: int = 512 # patch embedding dimension
|
| 33 |
+
self.conv_bias: bool = False # include bias in conv encoder
|
| 34 |
+
|
| 35 |
+
self.encoder_layers: int = 12 # num encoder layers in the transformer
|
| 36 |
+
self.encoder_embed_dim: int = 768 # encoder embedding dimension
|
| 37 |
+
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
|
| 38 |
+
self.encoder_attention_heads: int = 12 # num encoder attention heads
|
| 39 |
+
self.activation_fn: str = "gelu" # activation function to use
|
| 40 |
+
|
| 41 |
+
self.layer_norm_first: bool = False # apply layernorm first in the transformer
|
| 42 |
+
self.deep_norm: bool = False # apply deep_norm first in the transformer
|
| 43 |
+
|
| 44 |
+
# dropouts
|
| 45 |
+
self.dropout: float = 0.1 # dropout probability for the transformer
|
| 46 |
+
self.attention_dropout: float = 0.1 # dropout probability for attention weights
|
| 47 |
+
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
|
| 48 |
+
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
|
| 49 |
+
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
|
| 50 |
+
|
| 51 |
+
# positional embeddings
|
| 52 |
+
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
|
| 53 |
+
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
|
| 54 |
+
|
| 55 |
+
# relative position embedding
|
| 56 |
+
self.relative_position_embedding: bool = False # apply relative position embedding
|
| 57 |
+
self.num_buckets: int = 320 # number of buckets for relative position embedding
|
| 58 |
+
self.max_distance: int = 1280 # maximum distance for relative position embedding
|
| 59 |
+
self.gru_rel_pos: bool = False # apply gated relative position embedding
|
| 60 |
+
|
| 61 |
+
# quantizer
|
| 62 |
+
self.quant_n: int = 1024 # codebook number in quantizer
|
| 63 |
+
self.quant_dim: int = 256 # codebook dimension in quantizer
|
| 64 |
+
|
| 65 |
+
if cfg is not None:
|
| 66 |
+
self.update(cfg)
|
| 67 |
+
|
| 68 |
+
def update(self, cfg: dict):
|
| 69 |
+
self.__dict__.update(cfg)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class Tokenizers(nn.Module):
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
cfg: TokenizersConfig,
|
| 76 |
+
) -> None:
|
| 77 |
+
super().__init__()
|
| 78 |
+
logger.info(f"Tokenizers Config: {cfg.__dict__}")
|
| 79 |
+
|
| 80 |
+
self.cfg = cfg
|
| 81 |
+
|
| 82 |
+
self.embed = cfg.embed_dim
|
| 83 |
+
self.post_extract_proj = (
|
| 84 |
+
nn.Linear(self.embed, cfg.encoder_embed_dim)
|
| 85 |
+
if self.embed != cfg.encoder_embed_dim
|
| 86 |
+
else None
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.input_patch_size = cfg.input_patch_size
|
| 90 |
+
self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size,
|
| 91 |
+
bias=cfg.conv_bias)
|
| 92 |
+
|
| 93 |
+
self.dropout_input = nn.Dropout(cfg.dropout_input)
|
| 94 |
+
|
| 95 |
+
assert not cfg.deep_norm or not cfg.layer_norm_first
|
| 96 |
+
self.encoder = TransformerEncoder(cfg)
|
| 97 |
+
self.layer_norm = LayerNorm(self.embed)
|
| 98 |
+
|
| 99 |
+
self.quantize = NormEMAVectorQuantizer(
|
| 100 |
+
n_embed=cfg.quant_n, embedding_dim=cfg.quant_dim, beta=1.0, kmeans_init=True, decay=0.99,
|
| 101 |
+
)
|
| 102 |
+
self.quant_n = cfg.quant_n
|
| 103 |
+
self.quantize_layer = nn.Sequential(
|
| 104 |
+
nn.Linear(cfg.encoder_embed_dim, cfg.encoder_embed_dim),
|
| 105 |
+
nn.Tanh(),
|
| 106 |
+
nn.Linear(cfg.encoder_embed_dim, cfg.quant_dim) # for quantize
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
def forward_padding_mask(
|
| 110 |
+
self,
|
| 111 |
+
features: torch.Tensor,
|
| 112 |
+
padding_mask: torch.Tensor,
|
| 113 |
+
) -> torch.Tensor:
|
| 114 |
+
extra = padding_mask.size(1) % features.size(1)
|
| 115 |
+
if extra > 0:
|
| 116 |
+
padding_mask = padding_mask[:, :-extra]
|
| 117 |
+
padding_mask = padding_mask.view(
|
| 118 |
+
padding_mask.size(0), features.size(1), -1
|
| 119 |
+
)
|
| 120 |
+
padding_mask = padding_mask.all(-1)
|
| 121 |
+
return padding_mask
|
| 122 |
+
|
| 123 |
+
def preprocess(
|
| 124 |
+
self,
|
| 125 |
+
source: torch.Tensor,
|
| 126 |
+
fbank_mean: float = 15.41663,
|
| 127 |
+
fbank_std: float = 6.55582,
|
| 128 |
+
) -> torch.Tensor:
|
| 129 |
+
fbanks = []
|
| 130 |
+
for waveform in source:
|
| 131 |
+
waveform = waveform.unsqueeze(0) * 2 ** 15
|
| 132 |
+
fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
|
| 133 |
+
fbanks.append(fbank)
|
| 134 |
+
fbank = torch.stack(fbanks, dim=0)
|
| 135 |
+
fbank = (fbank - fbank_mean) / (2 * fbank_std)
|
| 136 |
+
return fbank
|
| 137 |
+
|
| 138 |
+
def extract_labels(
|
| 139 |
+
self,
|
| 140 |
+
source: torch.Tensor,
|
| 141 |
+
padding_mask: Optional[torch.Tensor] = None,
|
| 142 |
+
fbank_mean: float = 15.41663,
|
| 143 |
+
fbank_std: float = 6.55582,
|
| 144 |
+
):
|
| 145 |
+
fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std)
|
| 146 |
+
|
| 147 |
+
if padding_mask is not None:
|
| 148 |
+
padding_mask = self.forward_padding_mask(fbank, padding_mask)
|
| 149 |
+
|
| 150 |
+
fbank = fbank.unsqueeze(1)
|
| 151 |
+
features = self.patch_embedding(fbank)
|
| 152 |
+
features = features.reshape(features.shape[0], features.shape[1], -1)
|
| 153 |
+
features = features.transpose(1, 2)
|
| 154 |
+
features = self.layer_norm(features)
|
| 155 |
+
|
| 156 |
+
if padding_mask is not None:
|
| 157 |
+
padding_mask = self.forward_padding_mask(features, padding_mask)
|
| 158 |
+
|
| 159 |
+
if self.post_extract_proj is not None:
|
| 160 |
+
features = self.post_extract_proj(features)
|
| 161 |
+
|
| 162 |
+
x = self.dropout_input(features)
|
| 163 |
+
|
| 164 |
+
x, layer_results = self.encoder(
|
| 165 |
+
x,
|
| 166 |
+
padding_mask=padding_mask,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
quantize_input = self.quantize_layer(x)
|
| 170 |
+
quantize_feature, embed_loss, embed_ind = self.quantize(quantize_input)
|
| 171 |
+
|
| 172 |
+
return embed_ind
|
models/beats/__init__.py
ADDED
|
File without changes
|
models/beats/backbone.py
ADDED
|
@@ -0,0 +1,783 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
|
| 3 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beats
|
| 4 |
+
# Copyright (c) 2022 Microsoft
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# Based on fairseq code bases
|
| 7 |
+
# https://github.com/pytorch/fairseq
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
import math
|
| 11 |
+
import numpy as np
|
| 12 |
+
from typing import Dict, Optional, Tuple
|
| 13 |
+
import torch
|
| 14 |
+
from torch import Tensor, nn
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from torch.nn import LayerNorm, Parameter
|
| 17 |
+
from .modules import (
|
| 18 |
+
GradMultiply,
|
| 19 |
+
SamePad,
|
| 20 |
+
get_activation_fn,
|
| 21 |
+
GLU_Linear,
|
| 22 |
+
quant_noise,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class TransformerEncoder(nn.Module):
|
| 27 |
+
def __init__(self, args):
|
| 28 |
+
super().__init__()
|
| 29 |
+
|
| 30 |
+
self.dropout = args.dropout
|
| 31 |
+
self.embedding_dim = args.encoder_embed_dim
|
| 32 |
+
|
| 33 |
+
self.pos_conv = nn.Conv1d(
|
| 34 |
+
self.embedding_dim,
|
| 35 |
+
self.embedding_dim,
|
| 36 |
+
kernel_size=args.conv_pos,
|
| 37 |
+
padding=args.conv_pos // 2,
|
| 38 |
+
groups=args.conv_pos_groups,
|
| 39 |
+
)
|
| 40 |
+
dropout = 0
|
| 41 |
+
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
|
| 42 |
+
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
|
| 43 |
+
nn.init.constant_(self.pos_conv.bias, 0)
|
| 44 |
+
|
| 45 |
+
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
|
| 46 |
+
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
|
| 47 |
+
|
| 48 |
+
if hasattr(args, "relative_position_embedding"):
|
| 49 |
+
self.relative_position_embedding = args.relative_position_embedding
|
| 50 |
+
self.num_buckets = args.num_buckets
|
| 51 |
+
self.max_distance = args.max_distance
|
| 52 |
+
else:
|
| 53 |
+
self.relative_position_embedding = False
|
| 54 |
+
self.num_buckets = 0
|
| 55 |
+
self.max_distance = 0
|
| 56 |
+
|
| 57 |
+
self.layers = nn.ModuleList(
|
| 58 |
+
[
|
| 59 |
+
TransformerSentenceEncoderLayer(
|
| 60 |
+
embedding_dim=self.embedding_dim,
|
| 61 |
+
ffn_embedding_dim=args.encoder_ffn_embed_dim,
|
| 62 |
+
num_attention_heads=args.encoder_attention_heads,
|
| 63 |
+
dropout=self.dropout,
|
| 64 |
+
attention_dropout=args.attention_dropout,
|
| 65 |
+
activation_dropout=args.activation_dropout,
|
| 66 |
+
activation_fn=args.activation_fn,
|
| 67 |
+
layer_norm_first=args.layer_norm_first,
|
| 68 |
+
deep_norm=args.deep_norm,
|
| 69 |
+
has_relative_attention_bias=self.relative_position_embedding,
|
| 70 |
+
num_buckets=self.num_buckets,
|
| 71 |
+
max_distance=self.max_distance,
|
| 72 |
+
gru_rel_pos=args.gru_rel_pos,
|
| 73 |
+
encoder_layers=args.encoder_layers,
|
| 74 |
+
)
|
| 75 |
+
for i in range(args.encoder_layers)
|
| 76 |
+
]
|
| 77 |
+
)
|
| 78 |
+
if self.relative_position_embedding:
|
| 79 |
+
for i in range(1, args.encoder_layers):
|
| 80 |
+
del self.layers[i].self_attn.relative_attention_bias
|
| 81 |
+
self.layers[i].self_attn.relative_attention_bias = self.layers[0].self_attn.relative_attention_bias
|
| 82 |
+
|
| 83 |
+
self.layer_norm_first = args.layer_norm_first
|
| 84 |
+
self.layer_norm = LayerNorm(self.embedding_dim)
|
| 85 |
+
self.layerdrop = args.encoder_layerdrop
|
| 86 |
+
|
| 87 |
+
self.apply(init_bert_params)
|
| 88 |
+
|
| 89 |
+
if args.deep_norm:
|
| 90 |
+
deep_norm_beta = math.pow(8 * args.encoder_layers, -1 / 4)
|
| 91 |
+
for i in range(args.encoder_layers):
|
| 92 |
+
nn.init.xavier_normal_(self.layers[i].self_attn.k_proj.weight, gain=1)
|
| 93 |
+
nn.init.xavier_normal_(self.layers[i].self_attn.v_proj.weight, gain=deep_norm_beta)
|
| 94 |
+
nn.init.xavier_normal_(self.layers[i].self_attn.q_proj.weight, gain=1)
|
| 95 |
+
nn.init.xavier_normal_(self.layers[i].self_attn.out_proj.weight, gain=deep_norm_beta)
|
| 96 |
+
nn.init.xavier_normal_(self.layers[i].fc1.weight, gain=deep_norm_beta)
|
| 97 |
+
nn.init.xavier_normal_(self.layers[i].fc2.weight, gain=deep_norm_beta)
|
| 98 |
+
|
| 99 |
+
self.layer_wise_gradient_decay_ratio = getattr(args, "layer_wise_gradient_decay_ratio", 1)
|
| 100 |
+
|
| 101 |
+
def forward(self, x, padding_mask=None, layer=None):
|
| 102 |
+
x, layer_results = self.extract_features(x, padding_mask, layer)
|
| 103 |
+
|
| 104 |
+
if self.layer_norm_first and layer is None:
|
| 105 |
+
x = self.layer_norm(x)
|
| 106 |
+
|
| 107 |
+
return x, layer_results
|
| 108 |
+
|
| 109 |
+
def extract_features(self, x, padding_mask=None, tgt_layer=None):
|
| 110 |
+
|
| 111 |
+
if padding_mask is not None:
|
| 112 |
+
x[padding_mask] = 0
|
| 113 |
+
|
| 114 |
+
x_conv = self.pos_conv(x.transpose(1, 2))
|
| 115 |
+
x_conv = x_conv.transpose(1, 2)
|
| 116 |
+
x = x + x_conv
|
| 117 |
+
|
| 118 |
+
if not self.layer_norm_first:
|
| 119 |
+
x = self.layer_norm(x)
|
| 120 |
+
|
| 121 |
+
x = F.dropout(x, p=self.dropout, training=self.training)
|
| 122 |
+
|
| 123 |
+
# B x T x C -> T x B x C
|
| 124 |
+
x = x.transpose(0, 1)
|
| 125 |
+
|
| 126 |
+
layer_results = []
|
| 127 |
+
z = None
|
| 128 |
+
if tgt_layer is not None:
|
| 129 |
+
layer_results.append((x, z))
|
| 130 |
+
r = None
|
| 131 |
+
pos_bias = None
|
| 132 |
+
for i, layer in enumerate(self.layers):
|
| 133 |
+
if self.layer_wise_gradient_decay_ratio != 1.0:
|
| 134 |
+
x = GradMultiply.apply(x, self.layer_wise_gradient_decay_ratio)
|
| 135 |
+
dropout_probability = np.random.random()
|
| 136 |
+
if not self.training or (dropout_probability > self.layerdrop):
|
| 137 |
+
x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_bias)
|
| 138 |
+
if tgt_layer is not None:
|
| 139 |
+
layer_results.append((x, z))
|
| 140 |
+
if i == tgt_layer:
|
| 141 |
+
r = x
|
| 142 |
+
break
|
| 143 |
+
|
| 144 |
+
if r is not None:
|
| 145 |
+
x = r
|
| 146 |
+
|
| 147 |
+
# T x B x C -> B x T x C
|
| 148 |
+
x = x.transpose(0, 1)
|
| 149 |
+
|
| 150 |
+
return x, layer_results
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class TransformerSentenceEncoderLayer(nn.Module):
|
| 154 |
+
def __init__(
|
| 155 |
+
self,
|
| 156 |
+
embedding_dim: float = 768,
|
| 157 |
+
ffn_embedding_dim: float = 3072,
|
| 158 |
+
num_attention_heads: float = 8,
|
| 159 |
+
dropout: float = 0.1,
|
| 160 |
+
attention_dropout: float = 0.1,
|
| 161 |
+
activation_dropout: float = 0.1,
|
| 162 |
+
activation_fn: str = "relu",
|
| 163 |
+
layer_norm_first: bool = False,
|
| 164 |
+
deep_norm: bool = False,
|
| 165 |
+
has_relative_attention_bias: bool = False,
|
| 166 |
+
num_buckets: int = 0,
|
| 167 |
+
max_distance: int = 0,
|
| 168 |
+
rescale_init: bool = False,
|
| 169 |
+
gru_rel_pos: bool = False,
|
| 170 |
+
encoder_layers: int = 0,
|
| 171 |
+
) -> None:
|
| 172 |
+
|
| 173 |
+
super().__init__()
|
| 174 |
+
self.embedding_dim = embedding_dim
|
| 175 |
+
self.dropout = dropout
|
| 176 |
+
self.activation_dropout = activation_dropout
|
| 177 |
+
|
| 178 |
+
self.activation_name = activation_fn
|
| 179 |
+
self.activation_fn = get_activation_fn(activation_fn)
|
| 180 |
+
self.self_attn = MultiheadAttention(
|
| 181 |
+
self.embedding_dim,
|
| 182 |
+
num_attention_heads,
|
| 183 |
+
dropout=attention_dropout,
|
| 184 |
+
self_attention=True,
|
| 185 |
+
has_relative_attention_bias=has_relative_attention_bias,
|
| 186 |
+
num_buckets=num_buckets,
|
| 187 |
+
max_distance=max_distance,
|
| 188 |
+
rescale_init=rescale_init,
|
| 189 |
+
gru_rel_pos=gru_rel_pos,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 193 |
+
self.dropout2 = nn.Dropout(self.activation_dropout)
|
| 194 |
+
self.dropout3 = nn.Dropout(dropout)
|
| 195 |
+
|
| 196 |
+
self.layer_norm_first = layer_norm_first
|
| 197 |
+
|
| 198 |
+
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
|
| 199 |
+
|
| 200 |
+
if self.activation_name == "glu":
|
| 201 |
+
self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
|
| 202 |
+
else:
|
| 203 |
+
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
|
| 204 |
+
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
|
| 205 |
+
|
| 206 |
+
self.final_layer_norm = LayerNorm(self.embedding_dim)
|
| 207 |
+
|
| 208 |
+
self.deep_norm = deep_norm
|
| 209 |
+
if self.deep_norm:
|
| 210 |
+
self.deep_norm_alpha = math.pow(2 * encoder_layers, 1 / 4)
|
| 211 |
+
else:
|
| 212 |
+
self.deep_norm_alpha = 1
|
| 213 |
+
|
| 214 |
+
def forward(
|
| 215 |
+
self,
|
| 216 |
+
x: torch.Tensor,
|
| 217 |
+
self_attn_mask: torch.Tensor = None,
|
| 218 |
+
self_attn_padding_mask: torch.Tensor = None,
|
| 219 |
+
need_weights: bool = False,
|
| 220 |
+
pos_bias=None
|
| 221 |
+
):
|
| 222 |
+
residual = x
|
| 223 |
+
|
| 224 |
+
if self.layer_norm_first:
|
| 225 |
+
x = self.self_attn_layer_norm(x)
|
| 226 |
+
x, attn, pos_bias = self.self_attn(
|
| 227 |
+
query=x,
|
| 228 |
+
key=x,
|
| 229 |
+
value=x,
|
| 230 |
+
key_padding_mask=self_attn_padding_mask,
|
| 231 |
+
need_weights=False,
|
| 232 |
+
attn_mask=self_attn_mask,
|
| 233 |
+
position_bias=pos_bias
|
| 234 |
+
)
|
| 235 |
+
x = self.dropout1(x)
|
| 236 |
+
x = residual + x
|
| 237 |
+
|
| 238 |
+
residual = x
|
| 239 |
+
x = self.final_layer_norm(x)
|
| 240 |
+
if self.activation_name == "glu":
|
| 241 |
+
x = self.fc1(x)
|
| 242 |
+
else:
|
| 243 |
+
x = self.activation_fn(self.fc1(x))
|
| 244 |
+
x = self.dropout2(x)
|
| 245 |
+
x = self.fc2(x)
|
| 246 |
+
x = self.dropout3(x)
|
| 247 |
+
x = residual + x
|
| 248 |
+
else:
|
| 249 |
+
x, attn, pos_bias = self.self_attn(
|
| 250 |
+
query=x,
|
| 251 |
+
key=x,
|
| 252 |
+
value=x,
|
| 253 |
+
key_padding_mask=self_attn_padding_mask,
|
| 254 |
+
need_weights=need_weights,
|
| 255 |
+
attn_mask=self_attn_mask,
|
| 256 |
+
position_bias=pos_bias
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
x = self.dropout1(x)
|
| 260 |
+
x = residual * self.deep_norm_alpha + x
|
| 261 |
+
|
| 262 |
+
x = self.self_attn_layer_norm(x)
|
| 263 |
+
|
| 264 |
+
residual = x
|
| 265 |
+
if self.activation_name == "glu":
|
| 266 |
+
x = self.fc1(x)
|
| 267 |
+
else:
|
| 268 |
+
x = self.activation_fn(self.fc1(x))
|
| 269 |
+
x = self.dropout2(x)
|
| 270 |
+
x = self.fc2(x)
|
| 271 |
+
x = self.dropout3(x)
|
| 272 |
+
x = residual * self.deep_norm_alpha + x
|
| 273 |
+
x = self.final_layer_norm(x)
|
| 274 |
+
|
| 275 |
+
return x, attn, pos_bias
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class MultiheadAttention(nn.Module):
|
| 279 |
+
"""Multi-headed attention.
|
| 280 |
+
|
| 281 |
+
See "Attention Is All You Need" for more details.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
def __init__(
|
| 285 |
+
self,
|
| 286 |
+
embed_dim,
|
| 287 |
+
num_heads,
|
| 288 |
+
kdim=None,
|
| 289 |
+
vdim=None,
|
| 290 |
+
dropout=0.0,
|
| 291 |
+
bias=True,
|
| 292 |
+
add_bias_kv=False,
|
| 293 |
+
add_zero_attn=False,
|
| 294 |
+
self_attention=False,
|
| 295 |
+
encoder_decoder_attention=False,
|
| 296 |
+
q_noise=0.0,
|
| 297 |
+
qn_block_size=8,
|
| 298 |
+
has_relative_attention_bias=False,
|
| 299 |
+
num_buckets=32,
|
| 300 |
+
max_distance=128,
|
| 301 |
+
gru_rel_pos=False,
|
| 302 |
+
rescale_init=False,
|
| 303 |
+
):
|
| 304 |
+
super().__init__()
|
| 305 |
+
self.embed_dim = embed_dim
|
| 306 |
+
self.kdim = kdim if kdim is not None else embed_dim
|
| 307 |
+
self.vdim = vdim if vdim is not None else embed_dim
|
| 308 |
+
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
| 309 |
+
|
| 310 |
+
self.num_heads = num_heads
|
| 311 |
+
self.dropout_module = nn.Dropout(dropout)
|
| 312 |
+
|
| 313 |
+
self.has_relative_attention_bias = has_relative_attention_bias
|
| 314 |
+
self.num_buckets = num_buckets
|
| 315 |
+
self.max_distance = max_distance
|
| 316 |
+
if self.has_relative_attention_bias:
|
| 317 |
+
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
|
| 318 |
+
|
| 319 |
+
self.head_dim = embed_dim // num_heads
|
| 320 |
+
self.q_head_dim = self.head_dim
|
| 321 |
+
self.k_head_dim = self.head_dim
|
| 322 |
+
assert (
|
| 323 |
+
self.head_dim * num_heads == self.embed_dim
|
| 324 |
+
), "embed_dim must be divisible by num_heads"
|
| 325 |
+
self.scaling = self.head_dim ** -0.5
|
| 326 |
+
|
| 327 |
+
self.self_attention = self_attention
|
| 328 |
+
self.encoder_decoder_attention = encoder_decoder_attention
|
| 329 |
+
|
| 330 |
+
assert not self.self_attention or self.qkv_same_dim, (
|
| 331 |
+
"Self-attention requires query, key and " "value to be of the same size"
|
| 332 |
+
)
|
| 333 |
+
|
| 334 |
+
k_bias = True
|
| 335 |
+
if rescale_init:
|
| 336 |
+
k_bias = False
|
| 337 |
+
|
| 338 |
+
k_embed_dim = embed_dim
|
| 339 |
+
q_embed_dim = embed_dim
|
| 340 |
+
|
| 341 |
+
self.k_proj = quant_noise(
|
| 342 |
+
nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size
|
| 343 |
+
)
|
| 344 |
+
self.v_proj = quant_noise(
|
| 345 |
+
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
|
| 346 |
+
)
|
| 347 |
+
self.q_proj = quant_noise(
|
| 348 |
+
nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
self.out_proj = quant_noise(
|
| 352 |
+
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
if add_bias_kv:
|
| 356 |
+
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
|
| 357 |
+
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
|
| 358 |
+
else:
|
| 359 |
+
self.bias_k = self.bias_v = None
|
| 360 |
+
|
| 361 |
+
self.add_zero_attn = add_zero_attn
|
| 362 |
+
|
| 363 |
+
self.gru_rel_pos = gru_rel_pos
|
| 364 |
+
if self.gru_rel_pos:
|
| 365 |
+
self.grep_linear = nn.Linear(self.q_head_dim, 8)
|
| 366 |
+
self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
|
| 367 |
+
|
| 368 |
+
self.reset_parameters()
|
| 369 |
+
|
| 370 |
+
def reset_parameters(self):
|
| 371 |
+
if self.qkv_same_dim:
|
| 372 |
+
# Empirically observed the convergence to be much better with
|
| 373 |
+
# the scaled initialization
|
| 374 |
+
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
|
| 375 |
+
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
|
| 376 |
+
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
|
| 377 |
+
else:
|
| 378 |
+
nn.init.xavier_uniform_(self.k_proj.weight)
|
| 379 |
+
nn.init.xavier_uniform_(self.v_proj.weight)
|
| 380 |
+
nn.init.xavier_uniform_(self.q_proj.weight)
|
| 381 |
+
|
| 382 |
+
nn.init.xavier_uniform_(self.out_proj.weight)
|
| 383 |
+
if self.out_proj.bias is not None:
|
| 384 |
+
nn.init.constant_(self.out_proj.bias, 0.0)
|
| 385 |
+
if self.bias_k is not None:
|
| 386 |
+
nn.init.xavier_normal_(self.bias_k)
|
| 387 |
+
if self.bias_v is not None:
|
| 388 |
+
nn.init.xavier_normal_(self.bias_v)
|
| 389 |
+
if self.has_relative_attention_bias:
|
| 390 |
+
nn.init.xavier_normal_(self.relative_attention_bias.weight)
|
| 391 |
+
|
| 392 |
+
def _relative_positions_bucket(self, relative_positions, bidirectional=True):
|
| 393 |
+
num_buckets = self.num_buckets
|
| 394 |
+
max_distance = self.max_distance
|
| 395 |
+
relative_buckets = 0
|
| 396 |
+
|
| 397 |
+
if bidirectional:
|
| 398 |
+
num_buckets = num_buckets // 2
|
| 399 |
+
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
|
| 400 |
+
relative_positions = torch.abs(relative_positions)
|
| 401 |
+
else:
|
| 402 |
+
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
|
| 403 |
+
|
| 404 |
+
max_exact = num_buckets // 2
|
| 405 |
+
is_small = relative_positions < max_exact
|
| 406 |
+
|
| 407 |
+
relative_postion_if_large = max_exact + (
|
| 408 |
+
torch.log(relative_positions.float() / max_exact)
|
| 409 |
+
/ math.log(max_distance / max_exact)
|
| 410 |
+
* (num_buckets - max_exact)
|
| 411 |
+
).to(torch.long)
|
| 412 |
+
relative_postion_if_large = torch.min(
|
| 413 |
+
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
|
| 417 |
+
return relative_buckets
|
| 418 |
+
|
| 419 |
+
def compute_bias(self, query_length, key_length):
|
| 420 |
+
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
|
| 421 |
+
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
|
| 422 |
+
relative_position = memory_position - context_position
|
| 423 |
+
relative_position_bucket = self._relative_positions_bucket(
|
| 424 |
+
relative_position,
|
| 425 |
+
bidirectional=True
|
| 426 |
+
)
|
| 427 |
+
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
|
| 428 |
+
values = self.relative_attention_bias(relative_position_bucket)
|
| 429 |
+
values = values.permute([2, 0, 1])
|
| 430 |
+
return values
|
| 431 |
+
|
| 432 |
+
def forward(
|
| 433 |
+
self,
|
| 434 |
+
query,
|
| 435 |
+
key: Optional[Tensor],
|
| 436 |
+
value: Optional[Tensor],
|
| 437 |
+
key_padding_mask: Optional[Tensor] = None,
|
| 438 |
+
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
|
| 439 |
+
need_weights: bool = True,
|
| 440 |
+
static_kv: bool = False,
|
| 441 |
+
attn_mask: Optional[Tensor] = None,
|
| 442 |
+
before_softmax: bool = False,
|
| 443 |
+
need_head_weights: bool = False,
|
| 444 |
+
position_bias: Optional[Tensor] = None
|
| 445 |
+
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
|
| 446 |
+
"""Input shape: Time x Batch x Channel
|
| 447 |
+
|
| 448 |
+
Args:
|
| 449 |
+
key_padding_mask (ByteTensor, optional): mask to exclude
|
| 450 |
+
keys that are pads, of shape `(batch, src_len)`, where
|
| 451 |
+
padding elements are indicated by 1s.
|
| 452 |
+
need_weights (bool, optional): return the attention weights,
|
| 453 |
+
averaged over heads (default: False).
|
| 454 |
+
attn_mask (ByteTensor, optional): typically used to
|
| 455 |
+
implement causal attention, where the mask prevents the
|
| 456 |
+
attention from looking forward in time (default: None).
|
| 457 |
+
before_softmax (bool, optional): return the raw attention
|
| 458 |
+
weights and values before the attention softmax.
|
| 459 |
+
need_head_weights (bool, optional): return the attention
|
| 460 |
+
weights for each head. Implies *need_weights*. Default:
|
| 461 |
+
return the average attention weights over all heads.
|
| 462 |
+
"""
|
| 463 |
+
if need_head_weights:
|
| 464 |
+
need_weights = True
|
| 465 |
+
|
| 466 |
+
is_tpu = query.device.type == "xla"
|
| 467 |
+
|
| 468 |
+
tgt_len, bsz, embed_dim = query.size()
|
| 469 |
+
src_len = tgt_len
|
| 470 |
+
assert embed_dim == self.embed_dim
|
| 471 |
+
assert list(query.size()) == [tgt_len, bsz, embed_dim]
|
| 472 |
+
if key is not None:
|
| 473 |
+
src_len, key_bsz, _ = key.size()
|
| 474 |
+
if not torch.jit.is_scripting():
|
| 475 |
+
assert key_bsz == bsz
|
| 476 |
+
assert value is not None
|
| 477 |
+
assert src_len, bsz == value.shape[:2]
|
| 478 |
+
|
| 479 |
+
if self.has_relative_attention_bias and position_bias is None:
|
| 480 |
+
position_bias = self.compute_bias(tgt_len, src_len)
|
| 481 |
+
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
|
| 482 |
+
|
| 483 |
+
if incremental_state is not None:
|
| 484 |
+
saved_state = self._get_input_buffer(incremental_state)
|
| 485 |
+
if saved_state is not None and "prev_key" in saved_state:
|
| 486 |
+
# previous time steps are cached - no need to recompute
|
| 487 |
+
# key and value if they are static
|
| 488 |
+
if static_kv:
|
| 489 |
+
assert self.encoder_decoder_attention and not self.self_attention
|
| 490 |
+
key = value = None
|
| 491 |
+
else:
|
| 492 |
+
saved_state = None
|
| 493 |
+
|
| 494 |
+
if self.self_attention:
|
| 495 |
+
q = self.q_proj(query)
|
| 496 |
+
k = self.k_proj(query)
|
| 497 |
+
v = self.v_proj(query)
|
| 498 |
+
elif self.encoder_decoder_attention:
|
| 499 |
+
# encoder-decoder attention
|
| 500 |
+
q = self.q_proj(query)
|
| 501 |
+
if key is None:
|
| 502 |
+
assert value is None
|
| 503 |
+
k = v = None
|
| 504 |
+
else:
|
| 505 |
+
k = self.k_proj(key)
|
| 506 |
+
v = self.v_proj(key)
|
| 507 |
+
|
| 508 |
+
else:
|
| 509 |
+
assert key is not None and value is not None
|
| 510 |
+
q = self.q_proj(query)
|
| 511 |
+
k = self.k_proj(key)
|
| 512 |
+
v = self.v_proj(value)
|
| 513 |
+
q *= self.scaling
|
| 514 |
+
alpha = 32
|
| 515 |
+
q *= 1 / alpha
|
| 516 |
+
|
| 517 |
+
if self.bias_k is not None:
|
| 518 |
+
assert self.bias_v is not None
|
| 519 |
+
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
|
| 520 |
+
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
|
| 521 |
+
if attn_mask is not None:
|
| 522 |
+
attn_mask = torch.cat(
|
| 523 |
+
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
|
| 524 |
+
)
|
| 525 |
+
if key_padding_mask is not None:
|
| 526 |
+
key_padding_mask = torch.cat(
|
| 527 |
+
[
|
| 528 |
+
key_padding_mask,
|
| 529 |
+
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
|
| 530 |
+
],
|
| 531 |
+
dim=1,
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
q = (
|
| 535 |
+
q.contiguous()
|
| 536 |
+
.view(tgt_len, bsz * self.num_heads, self.q_head_dim)
|
| 537 |
+
.transpose(0, 1)
|
| 538 |
+
)
|
| 539 |
+
if k is not None:
|
| 540 |
+
k = (
|
| 541 |
+
k.contiguous()
|
| 542 |
+
.view(-1, bsz * self.num_heads, self.k_head_dim)
|
| 543 |
+
.transpose(0, 1)
|
| 544 |
+
)
|
| 545 |
+
if v is not None:
|
| 546 |
+
v = (
|
| 547 |
+
v.contiguous()
|
| 548 |
+
.view(-1, bsz * self.num_heads, self.head_dim)
|
| 549 |
+
.transpose(0, 1)
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
if saved_state is not None:
|
| 553 |
+
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
|
| 554 |
+
if "prev_key" in saved_state:
|
| 555 |
+
_prev_key = saved_state["prev_key"]
|
| 556 |
+
assert _prev_key is not None
|
| 557 |
+
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
|
| 558 |
+
if static_kv:
|
| 559 |
+
k = prev_key
|
| 560 |
+
else:
|
| 561 |
+
assert k is not None
|
| 562 |
+
k = torch.cat([prev_key, k], dim=1)
|
| 563 |
+
src_len = k.size(1)
|
| 564 |
+
if "prev_value" in saved_state:
|
| 565 |
+
_prev_value = saved_state["prev_value"]
|
| 566 |
+
assert _prev_value is not None
|
| 567 |
+
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
|
| 568 |
+
if static_kv:
|
| 569 |
+
v = prev_value
|
| 570 |
+
else:
|
| 571 |
+
assert v is not None
|
| 572 |
+
v = torch.cat([prev_value, v], dim=1)
|
| 573 |
+
prev_key_padding_mask: Optional[Tensor] = None
|
| 574 |
+
if "prev_key_padding_mask" in saved_state:
|
| 575 |
+
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
|
| 576 |
+
assert k is not None and v is not None
|
| 577 |
+
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
|
| 578 |
+
key_padding_mask=key_padding_mask,
|
| 579 |
+
prev_key_padding_mask=prev_key_padding_mask,
|
| 580 |
+
batch_size=bsz,
|
| 581 |
+
src_len=k.size(1),
|
| 582 |
+
static_kv=static_kv,
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
|
| 586 |
+
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
|
| 587 |
+
saved_state["prev_key_padding_mask"] = key_padding_mask
|
| 588 |
+
# In this branch incremental_state is never None
|
| 589 |
+
assert incremental_state is not None
|
| 590 |
+
incremental_state = self._set_input_buffer(incremental_state, saved_state)
|
| 591 |
+
assert k is not None
|
| 592 |
+
assert k.size(1) == src_len
|
| 593 |
+
|
| 594 |
+
# This is part of a workaround to get around fork/join parallelism
|
| 595 |
+
# not supporting Optional types.
|
| 596 |
+
if key_padding_mask is not None and key_padding_mask.dim() == 0:
|
| 597 |
+
key_padding_mask = None
|
| 598 |
+
|
| 599 |
+
if key_padding_mask is not None:
|
| 600 |
+
assert key_padding_mask.size(0) == bsz
|
| 601 |
+
assert key_padding_mask.size(1) == src_len
|
| 602 |
+
|
| 603 |
+
if self.add_zero_attn:
|
| 604 |
+
assert v is not None
|
| 605 |
+
src_len += 1
|
| 606 |
+
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
|
| 607 |
+
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
|
| 608 |
+
if attn_mask is not None:
|
| 609 |
+
attn_mask = torch.cat(
|
| 610 |
+
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
|
| 611 |
+
)
|
| 612 |
+
if key_padding_mask is not None:
|
| 613 |
+
key_padding_mask = torch.cat(
|
| 614 |
+
[
|
| 615 |
+
key_padding_mask,
|
| 616 |
+
torch.zeros(key_padding_mask.size(0), 1).type_as(
|
| 617 |
+
key_padding_mask
|
| 618 |
+
),
|
| 619 |
+
],
|
| 620 |
+
dim=1,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
attn_weights = torch.bmm(q, k.transpose(1, 2))
|
| 624 |
+
attn_weights = (attn_weights - attn_weights.max(dim=-1, keepdim=True)[0]) * alpha
|
| 625 |
+
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
|
| 626 |
+
|
| 627 |
+
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
|
| 628 |
+
|
| 629 |
+
if attn_mask is not None:
|
| 630 |
+
attn_mask = attn_mask.unsqueeze(0)
|
| 631 |
+
attn_weights += attn_mask
|
| 632 |
+
|
| 633 |
+
if key_padding_mask is not None:
|
| 634 |
+
# don't attend to padding symbols
|
| 635 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 636 |
+
if not is_tpu:
|
| 637 |
+
attn_weights = attn_weights.masked_fill(
|
| 638 |
+
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
|
| 639 |
+
float("-inf"),
|
| 640 |
+
)
|
| 641 |
+
else:
|
| 642 |
+
attn_weights = attn_weights.transpose(0, 2)
|
| 643 |
+
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
|
| 644 |
+
attn_weights = attn_weights.transpose(0, 2)
|
| 645 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 646 |
+
|
| 647 |
+
if before_softmax:
|
| 648 |
+
return attn_weights, v, position_bias
|
| 649 |
+
|
| 650 |
+
if position_bias is not None:
|
| 651 |
+
attn_mask_rel_pos = position_bias
|
| 652 |
+
if self.gru_rel_pos == 1:
|
| 653 |
+
query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) * alpha / self.scaling
|
| 654 |
+
_B, _H, _L, __ = query_layer.size()
|
| 655 |
+
gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
|
| 656 |
+
_B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
|
| 657 |
+
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
|
| 658 |
+
attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, tgt_len, 1) * position_bias
|
| 659 |
+
|
| 660 |
+
attn_mask_rel_pos = attn_mask_rel_pos.view(attn_weights.size())
|
| 661 |
+
|
| 662 |
+
attn_weights = attn_weights + attn_mask_rel_pos
|
| 663 |
+
|
| 664 |
+
attn_weights_float = F.softmax(
|
| 665 |
+
attn_weights, dim=-1
|
| 666 |
+
)
|
| 667 |
+
attn_weights = attn_weights_float.type_as(attn_weights)
|
| 668 |
+
attn_probs = self.dropout_module(attn_weights)
|
| 669 |
+
|
| 670 |
+
assert v is not None
|
| 671 |
+
attn = torch.bmm(attn_probs, v)
|
| 672 |
+
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
|
| 673 |
+
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
|
| 674 |
+
attn = self.out_proj(attn)
|
| 675 |
+
attn_weights: Optional[Tensor] = None
|
| 676 |
+
if need_weights:
|
| 677 |
+
attn_weights = attn_weights_float.view(
|
| 678 |
+
bsz, self.num_heads, tgt_len, src_len
|
| 679 |
+
).transpose(1, 0)
|
| 680 |
+
if not need_head_weights:
|
| 681 |
+
# average attention weights over heads
|
| 682 |
+
attn_weights = attn_weights.mean(dim=0)
|
| 683 |
+
|
| 684 |
+
return attn, attn_weights, position_bias
|
| 685 |
+
|
| 686 |
+
@staticmethod
|
| 687 |
+
def _append_prev_key_padding_mask(
|
| 688 |
+
key_padding_mask: Optional[Tensor],
|
| 689 |
+
prev_key_padding_mask: Optional[Tensor],
|
| 690 |
+
batch_size: int,
|
| 691 |
+
src_len: int,
|
| 692 |
+
static_kv: bool,
|
| 693 |
+
) -> Optional[Tensor]:
|
| 694 |
+
# saved key padding masks have shape (bsz, seq_len)
|
| 695 |
+
if prev_key_padding_mask is not None and static_kv:
|
| 696 |
+
new_key_padding_mask = prev_key_padding_mask
|
| 697 |
+
elif prev_key_padding_mask is not None and key_padding_mask is not None:
|
| 698 |
+
new_key_padding_mask = torch.cat(
|
| 699 |
+
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
|
| 700 |
+
)
|
| 701 |
+
# During incremental decoding, as the padding token enters and
|
| 702 |
+
# leaves the frame, there will be a time when prev or current
|
| 703 |
+
# is None
|
| 704 |
+
elif prev_key_padding_mask is not None:
|
| 705 |
+
if src_len > prev_key_padding_mask.size(1):
|
| 706 |
+
filler = torch.zeros(
|
| 707 |
+
(batch_size, src_len - prev_key_padding_mask.size(1)),
|
| 708 |
+
device=prev_key_padding_mask.device,
|
| 709 |
+
)
|
| 710 |
+
new_key_padding_mask = torch.cat(
|
| 711 |
+
[prev_key_padding_mask.float(), filler.float()], dim=1
|
| 712 |
+
)
|
| 713 |
+
else:
|
| 714 |
+
new_key_padding_mask = prev_key_padding_mask.float()
|
| 715 |
+
elif key_padding_mask is not None:
|
| 716 |
+
if src_len > key_padding_mask.size(1):
|
| 717 |
+
filler = torch.zeros(
|
| 718 |
+
(batch_size, src_len - key_padding_mask.size(1)),
|
| 719 |
+
device=key_padding_mask.device,
|
| 720 |
+
)
|
| 721 |
+
new_key_padding_mask = torch.cat(
|
| 722 |
+
[filler.float(), key_padding_mask.float()], dim=1
|
| 723 |
+
)
|
| 724 |
+
else:
|
| 725 |
+
new_key_padding_mask = key_padding_mask.float()
|
| 726 |
+
else:
|
| 727 |
+
new_key_padding_mask = prev_key_padding_mask
|
| 728 |
+
return new_key_padding_mask
|
| 729 |
+
|
| 730 |
+
def _get_input_buffer(
|
| 731 |
+
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
|
| 732 |
+
) -> Dict[str, Optional[Tensor]]:
|
| 733 |
+
result = self.get_incremental_state(incremental_state, "attn_state")
|
| 734 |
+
if result is not None:
|
| 735 |
+
return result
|
| 736 |
+
else:
|
| 737 |
+
empty_result: Dict[str, Optional[Tensor]] = {}
|
| 738 |
+
return empty_result
|
| 739 |
+
|
| 740 |
+
def _set_input_buffer(
|
| 741 |
+
self,
|
| 742 |
+
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
|
| 743 |
+
buffer: Dict[str, Optional[Tensor]],
|
| 744 |
+
):
|
| 745 |
+
return self.set_incremental_state(incremental_state, "attn_state", buffer)
|
| 746 |
+
|
| 747 |
+
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
|
| 748 |
+
return attn_weights
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
def init_bert_params(module):
|
| 752 |
+
"""
|
| 753 |
+
Initialize the weights specific to the BERT Model.
|
| 754 |
+
This overrides the default initializations depending on the specified arguments.
|
| 755 |
+
1. If normal_init_linear_weights is set then weights of linear
|
| 756 |
+
layer will be initialized using the normal distribution and
|
| 757 |
+
bais will be set to the specified value.
|
| 758 |
+
2. If normal_init_embed_weights is set then weights of embedding
|
| 759 |
+
layer will be initialized using the normal distribution.
|
| 760 |
+
3. If normal_init_proj_weights is set then weights of
|
| 761 |
+
in_project_weight for MultiHeadAttention initialized using
|
| 762 |
+
the normal distribution (to be validated).
|
| 763 |
+
"""
|
| 764 |
+
|
| 765 |
+
def normal_(data):
|
| 766 |
+
# with FSDP, module params will be on CUDA, so we cast them back to CPU
|
| 767 |
+
# so that the RNG is consistent with and without FSDP
|
| 768 |
+
data.copy_(
|
| 769 |
+
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
if isinstance(module, nn.Linear):
|
| 773 |
+
normal_(module.weight.data)
|
| 774 |
+
if module.bias is not None:
|
| 775 |
+
module.bias.data.zero_()
|
| 776 |
+
if isinstance(module, nn.Embedding):
|
| 777 |
+
normal_(module.weight.data)
|
| 778 |
+
if module.padding_idx is not None:
|
| 779 |
+
module.weight.data[module.padding_idx].zero_()
|
| 780 |
+
if isinstance(module, MultiheadAttention):
|
| 781 |
+
normal_(module.q_proj.weight.data)
|
| 782 |
+
normal_(module.k_proj.weight.data)
|
| 783 |
+
normal_(module.v_proj.weight.data)
|
models/beats/modules.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
|
| 3 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beats
|
| 4 |
+
# Copyright (c) 2022 Microsoft
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# Based on fairseq code bases
|
| 7 |
+
# https://github.com/pytorch/fairseq
|
| 8 |
+
# --------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
import math
|
| 11 |
+
import warnings
|
| 12 |
+
import torch
|
| 13 |
+
from torch import Tensor, nn
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class GradMultiply(torch.autograd.Function):
|
| 18 |
+
@staticmethod
|
| 19 |
+
def forward(ctx, x, scale):
|
| 20 |
+
ctx.scale = scale
|
| 21 |
+
res = x.new(x)
|
| 22 |
+
return res
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def backward(ctx, grad):
|
| 26 |
+
return grad * ctx.scale, None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class SamePad(nn.Module):
|
| 30 |
+
def __init__(self, kernel_size, causal=False):
|
| 31 |
+
super().__init__()
|
| 32 |
+
if causal:
|
| 33 |
+
self.remove = kernel_size - 1
|
| 34 |
+
else:
|
| 35 |
+
self.remove = 1 if kernel_size % 2 == 0 else 0
|
| 36 |
+
|
| 37 |
+
def forward(self, x):
|
| 38 |
+
if self.remove > 0:
|
| 39 |
+
x = x[:, :, : -self.remove]
|
| 40 |
+
return x
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class Swish(nn.Module):
|
| 44 |
+
def __init__(self):
|
| 45 |
+
super(Swish, self).__init__()
|
| 46 |
+
self.act = torch.nn.Sigmoid()
|
| 47 |
+
|
| 48 |
+
def forward(self, x):
|
| 49 |
+
return x * self.act(x)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class GLU_Linear(nn.Module):
|
| 53 |
+
def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True):
|
| 54 |
+
super(GLU_Linear, self).__init__()
|
| 55 |
+
|
| 56 |
+
self.glu_type = glu_type
|
| 57 |
+
self.output_dim = output_dim
|
| 58 |
+
|
| 59 |
+
if glu_type == "sigmoid":
|
| 60 |
+
self.glu_act = torch.nn.Sigmoid()
|
| 61 |
+
elif glu_type == "swish":
|
| 62 |
+
self.glu_act = Swish()
|
| 63 |
+
elif glu_type == "relu":
|
| 64 |
+
self.glu_act = torch.nn.ReLU()
|
| 65 |
+
elif glu_type == "gelu":
|
| 66 |
+
self.glu_act = torch.nn.GELU()
|
| 67 |
+
|
| 68 |
+
if bias_in_glu:
|
| 69 |
+
self.linear = nn.Linear(input_dim, output_dim * 2, True)
|
| 70 |
+
else:
|
| 71 |
+
self.linear = nn.Linear(input_dim, output_dim * 2, False)
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
# to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case
|
| 75 |
+
x = self.linear(x)
|
| 76 |
+
|
| 77 |
+
if self.glu_type == "bilinear":
|
| 78 |
+
x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2])
|
| 79 |
+
else:
|
| 80 |
+
x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2]))
|
| 81 |
+
|
| 82 |
+
return x
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def gelu_accurate(x):
|
| 86 |
+
if not hasattr(gelu_accurate, "_a"):
|
| 87 |
+
gelu_accurate._a = math.sqrt(2 / math.pi)
|
| 88 |
+
return (
|
| 89 |
+
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def gelu(x: torch.Tensor) -> torch.Tensor:
|
| 94 |
+
return torch.nn.functional.gelu(x.float()).type_as(x)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_activation_fn(activation: str):
|
| 98 |
+
"""Returns the activation function corresponding to `activation`"""
|
| 99 |
+
|
| 100 |
+
if activation == "relu":
|
| 101 |
+
return F.relu
|
| 102 |
+
elif activation == "gelu":
|
| 103 |
+
return gelu
|
| 104 |
+
elif activation == "gelu_fast":
|
| 105 |
+
warnings.warn(
|
| 106 |
+
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
|
| 107 |
+
)
|
| 108 |
+
return gelu_accurate
|
| 109 |
+
elif activation == "gelu_accurate":
|
| 110 |
+
return gelu_accurate
|
| 111 |
+
elif activation == "tanh":
|
| 112 |
+
return torch.tanh
|
| 113 |
+
elif activation == "linear":
|
| 114 |
+
return lambda x: x
|
| 115 |
+
elif activation == "glu":
|
| 116 |
+
return lambda x: x
|
| 117 |
+
else:
|
| 118 |
+
raise RuntimeError("--activation-fn {} not supported".format(activation))
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def quant_noise(module, p, block_size):
|
| 122 |
+
"""
|
| 123 |
+
Wraps modules and applies quantization noise to the weights for
|
| 124 |
+
subsequent quantization with Iterative Product Quantization as
|
| 125 |
+
described in "Training with Quantization Noise for Extreme Model Compression"
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
- module: nn.Module
|
| 129 |
+
- p: amount of Quantization Noise
|
| 130 |
+
- block_size: size of the blocks for subsequent quantization with iPQ
|
| 131 |
+
|
| 132 |
+
Remarks:
|
| 133 |
+
- Module weights must have the right sizes wrt the block size
|
| 134 |
+
- Only Linear, Embedding and Conv2d modules are supported for the moment
|
| 135 |
+
- For more detail on how to quantize by blocks with convolutional weights,
|
| 136 |
+
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
|
| 137 |
+
- We implement the simplest form of noise here as stated in the paper
|
| 138 |
+
which consists in randomly dropping blocks
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
# if no quantization noise, don't register hook
|
| 142 |
+
if p <= 0:
|
| 143 |
+
return module
|
| 144 |
+
|
| 145 |
+
# supported modules
|
| 146 |
+
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
|
| 147 |
+
|
| 148 |
+
# test whether module.weight has the right sizes wrt block_size
|
| 149 |
+
is_conv = module.weight.ndim == 4
|
| 150 |
+
|
| 151 |
+
# 2D matrix
|
| 152 |
+
if not is_conv:
|
| 153 |
+
assert (
|
| 154 |
+
module.weight.size(1) % block_size == 0
|
| 155 |
+
), "Input features must be a multiple of block sizes"
|
| 156 |
+
|
| 157 |
+
# 4D matrix
|
| 158 |
+
else:
|
| 159 |
+
# 1x1 convolutions
|
| 160 |
+
if module.kernel_size == (1, 1):
|
| 161 |
+
assert (
|
| 162 |
+
module.in_channels % block_size == 0
|
| 163 |
+
), "Input channels must be a multiple of block sizes"
|
| 164 |
+
# regular convolutions
|
| 165 |
+
else:
|
| 166 |
+
k = module.kernel_size[0] * module.kernel_size[1]
|
| 167 |
+
assert k % block_size == 0, "Kernel size must be a multiple of block size"
|
| 168 |
+
|
| 169 |
+
def _forward_pre_hook(mod, input):
|
| 170 |
+
# no noise for evaluation
|
| 171 |
+
if mod.training:
|
| 172 |
+
if not is_conv:
|
| 173 |
+
# gather weight and sizes
|
| 174 |
+
weight = mod.weight
|
| 175 |
+
in_features = weight.size(1)
|
| 176 |
+
out_features = weight.size(0)
|
| 177 |
+
|
| 178 |
+
# split weight matrix into blocks and randomly drop selected blocks
|
| 179 |
+
mask = torch.zeros(
|
| 180 |
+
in_features // block_size * out_features, device=weight.device
|
| 181 |
+
)
|
| 182 |
+
mask.bernoulli_(p)
|
| 183 |
+
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
|
| 184 |
+
|
| 185 |
+
else:
|
| 186 |
+
# gather weight and sizes
|
| 187 |
+
weight = mod.weight
|
| 188 |
+
in_channels = mod.in_channels
|
| 189 |
+
out_channels = mod.out_channels
|
| 190 |
+
|
| 191 |
+
# split weight matrix into blocks and randomly drop selected blocks
|
| 192 |
+
if mod.kernel_size == (1, 1):
|
| 193 |
+
mask = torch.zeros(
|
| 194 |
+
int(in_channels // block_size * out_channels),
|
| 195 |
+
device=weight.device,
|
| 196 |
+
)
|
| 197 |
+
mask.bernoulli_(p)
|
| 198 |
+
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
|
| 199 |
+
else:
|
| 200 |
+
mask = torch.zeros(
|
| 201 |
+
weight.size(0), weight.size(1), device=weight.device
|
| 202 |
+
)
|
| 203 |
+
mask.bernoulli_(p)
|
| 204 |
+
mask = (
|
| 205 |
+
mask.unsqueeze(2)
|
| 206 |
+
.unsqueeze(3)
|
| 207 |
+
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# scale weights and apply mask
|
| 211 |
+
mask = mask.to(
|
| 212 |
+
torch.bool
|
| 213 |
+
) # x.bool() is not currently supported in TorchScript
|
| 214 |
+
s = 1 / (1 - p)
|
| 215 |
+
mod.weight.data = s * weight.masked_fill(mask, 0)
|
| 216 |
+
|
| 217 |
+
module.register_forward_pre_hook(_forward_pre_hook)
|
| 218 |
+
return module
|
models/beats/quantizer.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
|
| 3 |
+
# Github source: https://github.com/microsoft/unilm/tree/master/beats
|
| 4 |
+
# Copyright (c) 2022 Microsoft
|
| 5 |
+
# Licensed under The MIT License [see LICENSE for details]
|
| 6 |
+
# Based on VQGAN code bases
|
| 7 |
+
# https://github.com/CompVis/taming-transformers
|
| 8 |
+
# --------------------------------------------------------'
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
import torch.distributed as distributed
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from einops import rearrange, repeat
|
| 17 |
+
except ImportError:
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def l2norm(t):
|
| 22 |
+
return F.normalize(t, p=2, dim=-1)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def ema_inplace(moving_avg, new, decay):
|
| 26 |
+
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def sample_vectors(samples, num):
|
| 30 |
+
num_samples, device = samples.shape[0], samples.device
|
| 31 |
+
|
| 32 |
+
if num_samples >= num:
|
| 33 |
+
indices = torch.randperm(num_samples, device=device)[:num]
|
| 34 |
+
else:
|
| 35 |
+
indices = torch.randint(0, num_samples, (num,), device=device)
|
| 36 |
+
|
| 37 |
+
return samples[indices]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def kmeans(samples, num_clusters, num_iters=10, use_cosine_sim=False):
|
| 41 |
+
dim, dtype, device = samples.shape[-1], samples.dtype, samples.device
|
| 42 |
+
|
| 43 |
+
means = sample_vectors(samples, num_clusters)
|
| 44 |
+
|
| 45 |
+
for _ in range(num_iters):
|
| 46 |
+
if use_cosine_sim:
|
| 47 |
+
dists = samples @ means.t()
|
| 48 |
+
else:
|
| 49 |
+
diffs = rearrange(samples, 'n d -> n () d') \
|
| 50 |
+
- rearrange(means, 'c d -> () c d')
|
| 51 |
+
dists = -(diffs ** 2).sum(dim=-1)
|
| 52 |
+
|
| 53 |
+
buckets = dists.max(dim=-1).indices
|
| 54 |
+
bins = torch.bincount(buckets, minlength=num_clusters)
|
| 55 |
+
zero_mask = bins == 0
|
| 56 |
+
bins_min_clamped = bins.masked_fill(zero_mask, 1)
|
| 57 |
+
|
| 58 |
+
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
|
| 59 |
+
new_means.scatter_add_(0, repeat(buckets, 'n -> n d', d=dim), samples)
|
| 60 |
+
new_means = new_means / bins_min_clamped[..., None]
|
| 61 |
+
|
| 62 |
+
if use_cosine_sim:
|
| 63 |
+
new_means = l2norm(new_means)
|
| 64 |
+
|
| 65 |
+
means = torch.where(zero_mask[..., None], means, new_means)
|
| 66 |
+
|
| 67 |
+
return means, bins
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class EmbeddingEMA(nn.Module):
|
| 71 |
+
def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5, kmeans_init=True, codebook_init_path=''):
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.num_tokens = num_tokens
|
| 74 |
+
self.codebook_dim = codebook_dim
|
| 75 |
+
self.decay = decay
|
| 76 |
+
self.eps = eps
|
| 77 |
+
if codebook_init_path == '':
|
| 78 |
+
if not kmeans_init:
|
| 79 |
+
weight = torch.randn(num_tokens, codebook_dim)
|
| 80 |
+
weight = l2norm(weight)
|
| 81 |
+
else:
|
| 82 |
+
weight = torch.zeros(num_tokens, codebook_dim)
|
| 83 |
+
self.register_buffer('initted', torch.Tensor([not kmeans_init]))
|
| 84 |
+
else:
|
| 85 |
+
print(f"load init codebook weight from {codebook_init_path}")
|
| 86 |
+
codebook_ckpt_weight = torch.load(codebook_init_path, map_location='cpu')
|
| 87 |
+
weight = codebook_ckpt_weight.clone()
|
| 88 |
+
self.register_buffer('initted', torch.Tensor([True]))
|
| 89 |
+
|
| 90 |
+
self.weight = nn.Parameter(weight, requires_grad=False)
|
| 91 |
+
self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad=False)
|
| 92 |
+
self.embed_avg = nn.Parameter(weight.clone(), requires_grad=False)
|
| 93 |
+
# self.register_buffer('initted', torch.Tensor([not kmeans_init]))
|
| 94 |
+
self.update = True
|
| 95 |
+
|
| 96 |
+
@torch.jit.ignore
|
| 97 |
+
def init_embed_(self, data):
|
| 98 |
+
if self.initted:
|
| 99 |
+
return
|
| 100 |
+
print("Performing Kemans init for codebook")
|
| 101 |
+
embed, cluster_size = kmeans(data, self.num_tokens, 10, use_cosine_sim=True)
|
| 102 |
+
self.weight.data.copy_(embed)
|
| 103 |
+
self.cluster_size.data.copy_(cluster_size)
|
| 104 |
+
self.initted.data.copy_(torch.Tensor([True]))
|
| 105 |
+
|
| 106 |
+
def forward(self, embed_id):
|
| 107 |
+
return F.embedding(embed_id, self.weight)
|
| 108 |
+
|
| 109 |
+
def cluster_size_ema_update(self, new_cluster_size):
|
| 110 |
+
self.cluster_size.data.mul_(self.decay).add_(new_cluster_size, alpha=1 - self.decay)
|
| 111 |
+
|
| 112 |
+
def embed_avg_ema_update(self, new_embed_avg):
|
| 113 |
+
self.embed_avg.data.mul_(self.decay).add_(new_embed_avg, alpha=1 - self.decay)
|
| 114 |
+
|
| 115 |
+
def weight_update(self, num_tokens):
|
| 116 |
+
n = self.cluster_size.sum()
|
| 117 |
+
smoothed_cluster_size = (
|
| 118 |
+
(self.cluster_size + self.eps) / (n + num_tokens * self.eps) * n
|
| 119 |
+
)
|
| 120 |
+
# normalize embedding average with smoothed cluster size
|
| 121 |
+
embed_normalized = self.embed_avg / smoothed_cluster_size.unsqueeze(1)
|
| 122 |
+
# embed_normalized = l2norm(self.embed_avg / smoothed_cluster_size.unsqueeze(1))
|
| 123 |
+
self.weight.data.copy_(embed_normalized)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def norm_ema_inplace(moving_avg, new, decay):
|
| 127 |
+
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
|
| 128 |
+
moving_avg.data.copy_(l2norm(moving_avg.data))
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class NormEMAVectorQuantizer(nn.Module):
|
| 132 |
+
def __init__(self, n_embed, embedding_dim, beta, decay=0.99, eps=1e-5,
|
| 133 |
+
statistic_code_usage=True, kmeans_init=False, codebook_init_path=''):
|
| 134 |
+
super().__init__()
|
| 135 |
+
self.codebook_dim = embedding_dim
|
| 136 |
+
self.num_tokens = n_embed
|
| 137 |
+
self.beta = beta
|
| 138 |
+
self.decay = decay
|
| 139 |
+
|
| 140 |
+
# learnable = True if orthogonal_reg_weight > 0 else False
|
| 141 |
+
self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps, kmeans_init, codebook_init_path)
|
| 142 |
+
|
| 143 |
+
self.statistic_code_usage = statistic_code_usage
|
| 144 |
+
if statistic_code_usage:
|
| 145 |
+
self.register_buffer('cluster_size', torch.zeros(n_embed))
|
| 146 |
+
if distributed.is_available() and distributed.is_initialized():
|
| 147 |
+
print("ddp is enable, so use ddp_reduce to sync the statistic_code_usage for each gpu!")
|
| 148 |
+
self.all_reduce_fn = distributed.all_reduce
|
| 149 |
+
else:
|
| 150 |
+
self.all_reduce_fn = nn.Identity()
|
| 151 |
+
|
| 152 |
+
def reset_cluster_size(self, device):
|
| 153 |
+
if self.statistic_code_usage:
|
| 154 |
+
self.register_buffer('cluster_size', torch.zeros(self.num_tokens))
|
| 155 |
+
self.cluster_size = self.cluster_size.to(device)
|
| 156 |
+
|
| 157 |
+
def forward(self, z):
|
| 158 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
| 159 |
+
# z, 'b c h w -> b h w c'
|
| 160 |
+
# z = rearrange(z, 'b c h w -> b h w c')
|
| 161 |
+
# z = z.transpose(1, 2)
|
| 162 |
+
z = l2norm(z)
|
| 163 |
+
z_flattened = z.reshape(-1, self.codebook_dim)
|
| 164 |
+
|
| 165 |
+
self.embedding.init_embed_(z_flattened)
|
| 166 |
+
|
| 167 |
+
d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \
|
| 168 |
+
self.embedding.weight.pow(2).sum(dim=1) - 2 * \
|
| 169 |
+
torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) # 'n d -> d n'
|
| 170 |
+
|
| 171 |
+
encoding_indices = torch.argmin(d, dim=1)
|
| 172 |
+
|
| 173 |
+
z_q = self.embedding(encoding_indices).view(z.shape)
|
| 174 |
+
|
| 175 |
+
encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype)
|
| 176 |
+
|
| 177 |
+
if not self.training:
|
| 178 |
+
with torch.no_grad():
|
| 179 |
+
cluster_size = encodings.sum(0)
|
| 180 |
+
self.all_reduce_fn(cluster_size)
|
| 181 |
+
ema_inplace(self.cluster_size, cluster_size, self.decay)
|
| 182 |
+
|
| 183 |
+
if self.training and self.embedding.update:
|
| 184 |
+
# EMA cluster size
|
| 185 |
+
|
| 186 |
+
bins = encodings.sum(0)
|
| 187 |
+
self.all_reduce_fn(bins)
|
| 188 |
+
|
| 189 |
+
# self.embedding.cluster_size_ema_update(bins)
|
| 190 |
+
ema_inplace(self.cluster_size, bins, self.decay)
|
| 191 |
+
|
| 192 |
+
zero_mask = (bins == 0)
|
| 193 |
+
bins = bins.masked_fill(zero_mask, 1.)
|
| 194 |
+
|
| 195 |
+
embed_sum = z_flattened.t() @ encodings
|
| 196 |
+
self.all_reduce_fn(embed_sum)
|
| 197 |
+
|
| 198 |
+
embed_normalized = (embed_sum / bins.unsqueeze(0)).t()
|
| 199 |
+
embed_normalized = l2norm(embed_normalized)
|
| 200 |
+
|
| 201 |
+
embed_normalized = torch.where(zero_mask[..., None], self.embedding.weight,
|
| 202 |
+
embed_normalized)
|
| 203 |
+
norm_ema_inplace(self.embedding.weight, embed_normalized, self.decay)
|
| 204 |
+
|
| 205 |
+
# compute loss for embedding
|
| 206 |
+
loss = self.beta * F.mse_loss(z_q.detach(), z)
|
| 207 |
+
|
| 208 |
+
# preserve gradients
|
| 209 |
+
z_q = z + (z_q - z).detach()
|
| 210 |
+
|
| 211 |
+
# reshape back to match original input shape
|
| 212 |
+
# z_q, 'b h w c -> b c h w'
|
| 213 |
+
# z_q = rearrange(z_q, 'b h w c -> b c h w')
|
| 214 |
+
# z_q = z_q.transpose(1, 2)
|
| 215 |
+
return z_q, loss, encoding_indices
|
models/modeling_llama.py
ADDED
|
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
|
| 2 |
+
|
| 3 |
+
""" PyTorch LLaMA model."""
|
| 4 |
+
import math
|
| 5 |
+
from typing import List, Optional, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.utils.checkpoint
|
| 9 |
+
from torch import nn
|
| 10 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 11 |
+
|
| 12 |
+
from transformers.activations import ACT2FN
|
| 13 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
| 14 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 15 |
+
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
| 16 |
+
from transformers.models.llama.configuration_llama import LlamaConfig
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.get_logger(__name__)
|
| 20 |
+
|
| 21 |
+
_CONFIG_FOR_DOC = "LlamaConfig"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
| 25 |
+
def _make_causal_mask(
|
| 26 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 27 |
+
):
|
| 28 |
+
"""
|
| 29 |
+
Make causal mask used for bi-directional self-attention.
|
| 30 |
+
"""
|
| 31 |
+
bsz, tgt_len = input_ids_shape
|
| 32 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
| 33 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
| 34 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
| 35 |
+
mask = mask.to(dtype)
|
| 36 |
+
|
| 37 |
+
if past_key_values_length > 0:
|
| 38 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
| 39 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
| 43 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 44 |
+
"""
|
| 45 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| 46 |
+
"""
|
| 47 |
+
bsz, src_len = mask.size()
|
| 48 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
| 49 |
+
|
| 50 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
| 51 |
+
|
| 52 |
+
inverted_mask = 1.0 - expanded_mask
|
| 53 |
+
|
| 54 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class LlamaRMSNorm(nn.Module):
|
| 58 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 59 |
+
"""
|
| 60 |
+
LlamaRMSNorm is equivalent to T5LayerNorm
|
| 61 |
+
"""
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 64 |
+
self.variance_epsilon = eps
|
| 65 |
+
|
| 66 |
+
def forward(self, hidden_states):
|
| 67 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
| 68 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 69 |
+
|
| 70 |
+
# convert into half-precision if necessary
|
| 71 |
+
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
| 72 |
+
hidden_states = hidden_states.to(self.weight.dtype)
|
| 73 |
+
|
| 74 |
+
return self.weight * hidden_states
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class LlamaRotaryEmbedding(torch.nn.Module):
|
| 78 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| 79 |
+
super().__init__()
|
| 80 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
|
| 81 |
+
self.register_buffer("inv_freq", inv_freq)
|
| 82 |
+
|
| 83 |
+
# Build here to make `torch.jit.trace` work.
|
| 84 |
+
self.max_seq_len_cached = max_position_embeddings
|
| 85 |
+
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
|
| 86 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 87 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 88 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 89 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
| 90 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
| 91 |
+
|
| 92 |
+
def forward(self, x, seq_len=None):
|
| 93 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 94 |
+
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
|
| 95 |
+
if seq_len > self.max_seq_len_cached:
|
| 96 |
+
self.max_seq_len_cached = seq_len
|
| 97 |
+
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
|
| 98 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 99 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 100 |
+
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
|
| 101 |
+
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
|
| 102 |
+
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
|
| 103 |
+
return (
|
| 104 |
+
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
| 105 |
+
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def rotate_half(x):
|
| 110 |
+
"""Rotates half the hidden dims of the input."""
|
| 111 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 112 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 113 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
|
| 117 |
+
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
|
| 118 |
+
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
|
| 119 |
+
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 120 |
+
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
|
| 121 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 122 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 123 |
+
return q_embed, k_embed
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class LlamaMLP(nn.Module):
|
| 127 |
+
def __init__(
|
| 128 |
+
self,
|
| 129 |
+
hidden_size: int,
|
| 130 |
+
intermediate_size: int,
|
| 131 |
+
hidden_act: str,
|
| 132 |
+
):
|
| 133 |
+
super().__init__()
|
| 134 |
+
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 135 |
+
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
|
| 136 |
+
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
| 137 |
+
self.act_fn = ACT2FN[hidden_act]
|
| 138 |
+
|
| 139 |
+
def forward(self, x):
|
| 140 |
+
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class LlamaAttention(nn.Module):
|
| 144 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 145 |
+
|
| 146 |
+
def __init__(self, config: LlamaConfig):
|
| 147 |
+
super().__init__()
|
| 148 |
+
self.config = config
|
| 149 |
+
self.hidden_size = config.hidden_size
|
| 150 |
+
self.num_heads = config.num_attention_heads
|
| 151 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 152 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 153 |
+
|
| 154 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 155 |
+
raise ValueError(
|
| 156 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 157 |
+
f" and `num_heads`: {self.num_heads})."
|
| 158 |
+
)
|
| 159 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 160 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 161 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| 162 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
| 163 |
+
self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
|
| 164 |
+
|
| 165 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 166 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 167 |
+
|
| 168 |
+
def forward(
|
| 169 |
+
self,
|
| 170 |
+
hidden_states: torch.Tensor,
|
| 171 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 172 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 173 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 174 |
+
output_attentions: bool = False,
|
| 175 |
+
use_cache: bool = False,
|
| 176 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 177 |
+
bsz, q_len, _ = hidden_states.size()
|
| 178 |
+
|
| 179 |
+
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 180 |
+
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 181 |
+
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 182 |
+
|
| 183 |
+
kv_seq_len = key_states.shape[-2]
|
| 184 |
+
if past_key_value is not None:
|
| 185 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
| 186 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 187 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 188 |
+
# [bsz, nh, t, hd]
|
| 189 |
+
|
| 190 |
+
if past_key_value is not None:
|
| 191 |
+
# reuse k, v, self_attention
|
| 192 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
| 193 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
| 194 |
+
|
| 195 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
| 196 |
+
|
| 197 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| 198 |
+
|
| 199 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| 200 |
+
raise ValueError(
|
| 201 |
+
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
|
| 202 |
+
f" {attn_weights.size()}"
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
if attention_mask is not None:
|
| 206 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| 207 |
+
raise ValueError(
|
| 208 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| 209 |
+
)
|
| 210 |
+
attn_weights = attn_weights + attention_mask
|
| 211 |
+
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
|
| 212 |
+
|
| 213 |
+
# upcast attention to fp32
|
| 214 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| 215 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 216 |
+
|
| 217 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| 218 |
+
raise ValueError(
|
| 219 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| 220 |
+
f" {attn_output.size()}"
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
attn_output = attn_output.transpose(1, 2)
|
| 224 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
| 225 |
+
|
| 226 |
+
attn_output = self.o_proj(attn_output)
|
| 227 |
+
|
| 228 |
+
if not output_attentions:
|
| 229 |
+
attn_weights = None
|
| 230 |
+
|
| 231 |
+
return attn_output, attn_weights, past_key_value
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class LlamaDecoderLayer(nn.Module):
|
| 235 |
+
def __init__(self, config: LlamaConfig):
|
| 236 |
+
super().__init__()
|
| 237 |
+
self.hidden_size = config.hidden_size
|
| 238 |
+
self.self_attn = LlamaAttention(config=config)
|
| 239 |
+
self.mlp = LlamaMLP(
|
| 240 |
+
hidden_size=self.hidden_size,
|
| 241 |
+
intermediate_size=config.intermediate_size,
|
| 242 |
+
hidden_act=config.hidden_act,
|
| 243 |
+
)
|
| 244 |
+
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 245 |
+
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 246 |
+
|
| 247 |
+
def forward(
|
| 248 |
+
self,
|
| 249 |
+
hidden_states: torch.Tensor,
|
| 250 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 251 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 252 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 253 |
+
output_attentions: Optional[bool] = False,
|
| 254 |
+
use_cache: Optional[bool] = False,
|
| 255 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| 256 |
+
"""
|
| 257 |
+
Args:
|
| 258 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 259 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 260 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 261 |
+
output_attentions (`bool`, *optional*):
|
| 262 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 263 |
+
returned tensors for more detail.
|
| 264 |
+
use_cache (`bool`, *optional*):
|
| 265 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 266 |
+
(see `past_key_values`).
|
| 267 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
residual = hidden_states
|
| 271 |
+
|
| 272 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 273 |
+
|
| 274 |
+
# Self Attention
|
| 275 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 276 |
+
hidden_states=hidden_states,
|
| 277 |
+
attention_mask=attention_mask,
|
| 278 |
+
position_ids=position_ids,
|
| 279 |
+
past_key_value=past_key_value,
|
| 280 |
+
output_attentions=output_attentions,
|
| 281 |
+
use_cache=use_cache,
|
| 282 |
+
)
|
| 283 |
+
hidden_states = residual + hidden_states
|
| 284 |
+
|
| 285 |
+
# Fully Connected
|
| 286 |
+
residual = hidden_states
|
| 287 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 288 |
+
hidden_states = self.mlp(hidden_states)
|
| 289 |
+
hidden_states = residual + hidden_states
|
| 290 |
+
|
| 291 |
+
outputs = (hidden_states,)
|
| 292 |
+
|
| 293 |
+
if output_attentions:
|
| 294 |
+
outputs += (self_attn_weights,)
|
| 295 |
+
|
| 296 |
+
if use_cache:
|
| 297 |
+
outputs += (present_key_value,)
|
| 298 |
+
|
| 299 |
+
return outputs
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
LLAMA_START_DOCSTRING = r"""
|
| 303 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 304 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 305 |
+
etc.)
|
| 306 |
+
|
| 307 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 308 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 309 |
+
and behavior.
|
| 310 |
+
|
| 311 |
+
Parameters:
|
| 312 |
+
config ([`LlamaConfig`]):
|
| 313 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 314 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 315 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@add_start_docstrings(
|
| 320 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
| 321 |
+
LLAMA_START_DOCSTRING,
|
| 322 |
+
)
|
| 323 |
+
class LlamaPreTrainedModel(PreTrainedModel):
|
| 324 |
+
config_class = LlamaConfig
|
| 325 |
+
base_model_prefix = "model"
|
| 326 |
+
supports_gradient_checkpointing = True
|
| 327 |
+
_no_split_modules = ["LlamaDecoderLayer"]
|
| 328 |
+
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
|
| 329 |
+
|
| 330 |
+
def _init_weights(self, module):
|
| 331 |
+
std = self.config.initializer_range
|
| 332 |
+
if isinstance(module, nn.Linear):
|
| 333 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 334 |
+
if module.bias is not None:
|
| 335 |
+
module.bias.data.zero_()
|
| 336 |
+
elif isinstance(module, nn.Embedding):
|
| 337 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 338 |
+
if module.padding_idx is not None:
|
| 339 |
+
module.weight.data[module.padding_idx].zero_()
|
| 340 |
+
|
| 341 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 342 |
+
if isinstance(module, LlamaModel):
|
| 343 |
+
module.gradient_checkpointing = value
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
LLAMA_INPUTS_DOCSTRING = r"""
|
| 347 |
+
Args:
|
| 348 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 349 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 350 |
+
it.
|
| 351 |
+
|
| 352 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 353 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 354 |
+
|
| 355 |
+
[What are input IDs?](../glossary#input-ids)
|
| 356 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 357 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 358 |
+
|
| 359 |
+
- 1 for tokens that are **not masked**,
|
| 360 |
+
- 0 for tokens that are **masked**.
|
| 361 |
+
|
| 362 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 363 |
+
|
| 364 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 365 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 366 |
+
|
| 367 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 368 |
+
`past_key_values`).
|
| 369 |
+
|
| 370 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| 371 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| 372 |
+
information on the default strategy.
|
| 373 |
+
|
| 374 |
+
- 1 indicates the head is **not masked**,
|
| 375 |
+
- 0 indicates the head is **masked**.
|
| 376 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 377 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 378 |
+
config.n_positions - 1]`.
|
| 379 |
+
|
| 380 |
+
[What are position IDs?](../glossary#position-ids)
|
| 381 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 382 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 383 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| 384 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 385 |
+
|
| 386 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 387 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 388 |
+
|
| 389 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 390 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 391 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 392 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 393 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 394 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 395 |
+
model's internal embedding lookup matrix.
|
| 396 |
+
use_cache (`bool`, *optional*):
|
| 397 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 398 |
+
`past_key_values`).
|
| 399 |
+
output_attentions (`bool`, *optional*):
|
| 400 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 401 |
+
tensors for more detail.
|
| 402 |
+
output_hidden_states (`bool`, *optional*):
|
| 403 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 404 |
+
more detail.
|
| 405 |
+
return_dict (`bool`, *optional*):
|
| 406 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 407 |
+
"""
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@add_start_docstrings(
|
| 411 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
| 412 |
+
LLAMA_START_DOCSTRING,
|
| 413 |
+
)
|
| 414 |
+
class LlamaModel(LlamaPreTrainedModel):
|
| 415 |
+
"""
|
| 416 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
|
| 417 |
+
|
| 418 |
+
Args:
|
| 419 |
+
config: LlamaConfig
|
| 420 |
+
"""
|
| 421 |
+
|
| 422 |
+
def __init__(self, config: LlamaConfig):
|
| 423 |
+
super().__init__(config)
|
| 424 |
+
self.padding_idx = config.pad_token_id
|
| 425 |
+
self.vocab_size = config.vocab_size
|
| 426 |
+
|
| 427 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 428 |
+
self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
| 429 |
+
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 430 |
+
|
| 431 |
+
self.gradient_checkpointing = False
|
| 432 |
+
# Initialize weights and apply final processing
|
| 433 |
+
self.post_init()
|
| 434 |
+
|
| 435 |
+
def get_input_embeddings(self):
|
| 436 |
+
return self.embed_tokens
|
| 437 |
+
|
| 438 |
+
def set_input_embeddings(self, value):
|
| 439 |
+
self.embed_tokens = value
|
| 440 |
+
|
| 441 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
|
| 442 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
| 443 |
+
# create causal mask
|
| 444 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 445 |
+
combined_attention_mask = None
|
| 446 |
+
if input_shape[-1] > 1:
|
| 447 |
+
combined_attention_mask = _make_causal_mask(
|
| 448 |
+
input_shape,
|
| 449 |
+
inputs_embeds.dtype,
|
| 450 |
+
device=inputs_embeds.device,
|
| 451 |
+
past_key_values_length=past_key_values_length,
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
if attention_mask is not None:
|
| 455 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 456 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
| 457 |
+
inputs_embeds.device
|
| 458 |
+
)
|
| 459 |
+
combined_attention_mask = (
|
| 460 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
return combined_attention_mask
|
| 464 |
+
|
| 465 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
| 466 |
+
def forward(
|
| 467 |
+
self,
|
| 468 |
+
input_ids: torch.LongTensor = None,
|
| 469 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 470 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 471 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 472 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 473 |
+
query_embeds: Optional[torch.FloatTensor] = None,
|
| 474 |
+
use_cache: Optional[bool] = None,
|
| 475 |
+
output_attentions: Optional[bool] = None,
|
| 476 |
+
output_hidden_states: Optional[bool] = None,
|
| 477 |
+
return_dict: Optional[bool] = None,
|
| 478 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 479 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 480 |
+
output_hidden_states = (
|
| 481 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 482 |
+
)
|
| 483 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 484 |
+
|
| 485 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 486 |
+
|
| 487 |
+
# retrieve input_ids and inputs_embeds
|
| 488 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 489 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 490 |
+
elif input_ids is not None:
|
| 491 |
+
batch_size, seq_length = input_ids.shape
|
| 492 |
+
elif inputs_embeds is not None:
|
| 493 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 494 |
+
else:
|
| 495 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 496 |
+
|
| 497 |
+
if inputs_embeds is None:
|
| 498 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 499 |
+
if query_embeds is not None:
|
| 500 |
+
inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1)
|
| 501 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 502 |
+
|
| 503 |
+
seq_length_with_past = seq_length
|
| 504 |
+
past_key_values_length = 0
|
| 505 |
+
|
| 506 |
+
if past_key_values is not None:
|
| 507 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
| 508 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
| 509 |
+
|
| 510 |
+
if position_ids is None:
|
| 511 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| 512 |
+
position_ids = torch.arange(
|
| 513 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
| 514 |
+
)
|
| 515 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
| 516 |
+
else:
|
| 517 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
| 518 |
+
|
| 519 |
+
# embed positions
|
| 520 |
+
if attention_mask is None:
|
| 521 |
+
attention_mask = torch.ones(
|
| 522 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
| 523 |
+
)
|
| 524 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
| 525 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
hidden_states = inputs_embeds
|
| 529 |
+
|
| 530 |
+
if self.gradient_checkpointing and self.training:
|
| 531 |
+
if use_cache:
|
| 532 |
+
logger.warning_once(
|
| 533 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 534 |
+
)
|
| 535 |
+
use_cache = False
|
| 536 |
+
|
| 537 |
+
# decoder layers
|
| 538 |
+
all_hidden_states = () if output_hidden_states else None
|
| 539 |
+
all_self_attns = () if output_attentions else None
|
| 540 |
+
next_decoder_cache = () if use_cache else None
|
| 541 |
+
|
| 542 |
+
for idx, decoder_layer in enumerate(self.layers):
|
| 543 |
+
if output_hidden_states:
|
| 544 |
+
all_hidden_states += (hidden_states,)
|
| 545 |
+
|
| 546 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
| 547 |
+
|
| 548 |
+
if self.gradient_checkpointing and self.training:
|
| 549 |
+
|
| 550 |
+
def create_custom_forward(module):
|
| 551 |
+
def custom_forward(*inputs):
|
| 552 |
+
# None for past_key_value
|
| 553 |
+
return module(*inputs, output_attentions, None)
|
| 554 |
+
|
| 555 |
+
return custom_forward
|
| 556 |
+
|
| 557 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 558 |
+
create_custom_forward(decoder_layer),
|
| 559 |
+
hidden_states,
|
| 560 |
+
attention_mask,
|
| 561 |
+
position_ids,
|
| 562 |
+
None,
|
| 563 |
+
)
|
| 564 |
+
else:
|
| 565 |
+
layer_outputs = decoder_layer(
|
| 566 |
+
hidden_states,
|
| 567 |
+
attention_mask=attention_mask,
|
| 568 |
+
position_ids=position_ids,
|
| 569 |
+
past_key_value=past_key_value,
|
| 570 |
+
output_attentions=output_attentions,
|
| 571 |
+
use_cache=use_cache,
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
hidden_states = layer_outputs[0]
|
| 575 |
+
|
| 576 |
+
if use_cache:
|
| 577 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
| 578 |
+
|
| 579 |
+
if output_attentions:
|
| 580 |
+
all_self_attns += (layer_outputs[1],)
|
| 581 |
+
|
| 582 |
+
hidden_states = self.norm(hidden_states)
|
| 583 |
+
|
| 584 |
+
# add hidden states from the last decoder layer
|
| 585 |
+
if output_hidden_states:
|
| 586 |
+
all_hidden_states += (hidden_states,)
|
| 587 |
+
|
| 588 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 589 |
+
if not return_dict:
|
| 590 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| 591 |
+
return BaseModelOutputWithPast(
|
| 592 |
+
last_hidden_state=hidden_states,
|
| 593 |
+
past_key_values=next_cache,
|
| 594 |
+
hidden_states=all_hidden_states,
|
| 595 |
+
attentions=all_self_attns,
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
class LlamaForCausalLM(LlamaPreTrainedModel):
|
| 600 |
+
def __init__(self, config):
|
| 601 |
+
super().__init__(config)
|
| 602 |
+
self.model = LlamaModel(config)
|
| 603 |
+
|
| 604 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 605 |
+
|
| 606 |
+
# Initialize weights and apply final processing
|
| 607 |
+
self.post_init()
|
| 608 |
+
|
| 609 |
+
def get_input_embeddings(self):
|
| 610 |
+
return self.model.embed_tokens
|
| 611 |
+
|
| 612 |
+
def set_input_embeddings(self, value):
|
| 613 |
+
self.model.embed_tokens = value
|
| 614 |
+
|
| 615 |
+
def get_output_embeddings(self):
|
| 616 |
+
return self.lm_head
|
| 617 |
+
|
| 618 |
+
def set_output_embeddings(self, new_embeddings):
|
| 619 |
+
self.lm_head = new_embeddings
|
| 620 |
+
|
| 621 |
+
def set_decoder(self, decoder):
|
| 622 |
+
self.model = decoder
|
| 623 |
+
|
| 624 |
+
def get_decoder(self):
|
| 625 |
+
return self.model
|
| 626 |
+
|
| 627 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
| 628 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| 629 |
+
def forward(
|
| 630 |
+
self,
|
| 631 |
+
input_ids: torch.LongTensor = None,
|
| 632 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 633 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 634 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 635 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 636 |
+
query_embeds: Optional[torch.FloatTensor] = None,
|
| 637 |
+
labels: Optional[torch.LongTensor] = None,
|
| 638 |
+
use_cache: Optional[bool] = None,
|
| 639 |
+
output_attentions: Optional[bool] = None,
|
| 640 |
+
output_hidden_states: Optional[bool] = None,
|
| 641 |
+
return_dict: Optional[bool] = None,
|
| 642 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 643 |
+
r"""
|
| 644 |
+
Args:
|
| 645 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 646 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 647 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 648 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 649 |
+
|
| 650 |
+
Returns:
|
| 651 |
+
|
| 652 |
+
Example:
|
| 653 |
+
|
| 654 |
+
```python
|
| 655 |
+
>>> from transformers import AutoTokenizer, LlamaForCausalLM
|
| 656 |
+
|
| 657 |
+
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
| 658 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
| 659 |
+
|
| 660 |
+
>>> prompt = "Hey, are you consciours? Can you talk to me?"
|
| 661 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
| 662 |
+
|
| 663 |
+
>>> # Generate
|
| 664 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 665 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 666 |
+
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
|
| 667 |
+
```"""
|
| 668 |
+
|
| 669 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 670 |
+
output_hidden_states = (
|
| 671 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 672 |
+
)
|
| 673 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 674 |
+
|
| 675 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
| 676 |
+
outputs = self.model(
|
| 677 |
+
input_ids=input_ids,
|
| 678 |
+
attention_mask=attention_mask,
|
| 679 |
+
position_ids=position_ids,
|
| 680 |
+
past_key_values=past_key_values,
|
| 681 |
+
inputs_embeds=inputs_embeds,
|
| 682 |
+
query_embeds=query_embeds,
|
| 683 |
+
use_cache=use_cache,
|
| 684 |
+
output_attentions=output_attentions,
|
| 685 |
+
output_hidden_states=output_hidden_states,
|
| 686 |
+
return_dict=return_dict,
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
hidden_states = outputs[0]
|
| 690 |
+
logits = self.lm_head(hidden_states)
|
| 691 |
+
|
| 692 |
+
loss = None
|
| 693 |
+
if labels is not None:
|
| 694 |
+
# Shift so that tokens < n predict n
|
| 695 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 696 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 697 |
+
# Flatten the tokens
|
| 698 |
+
loss_fct = CrossEntropyLoss()
|
| 699 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| 700 |
+
shift_labels = shift_labels.view(-1)
|
| 701 |
+
# Enable model parallelism
|
| 702 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 703 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 704 |
+
|
| 705 |
+
if not return_dict:
|
| 706 |
+
output = (logits,) + outputs[1:]
|
| 707 |
+
return (loss,) + output if loss is not None else output
|
| 708 |
+
|
| 709 |
+
return CausalLMOutputWithPast(
|
| 710 |
+
loss=loss,
|
| 711 |
+
logits=logits,
|
| 712 |
+
past_key_values=outputs.past_key_values,
|
| 713 |
+
hidden_states=outputs.hidden_states,
|
| 714 |
+
attentions=outputs.attentions,
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
def prepare_inputs_for_generation(
|
| 718 |
+
self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| 719 |
+
):
|
| 720 |
+
if past_key_values:
|
| 721 |
+
input_ids = input_ids[:, -1:]
|
| 722 |
+
|
| 723 |
+
position_ids = kwargs.get("position_ids", None)
|
| 724 |
+
if attention_mask is not None and position_ids is None:
|
| 725 |
+
# create position_ids on the fly for batch generation
|
| 726 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 727 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 728 |
+
if past_key_values:
|
| 729 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
| 730 |
+
query_embeds = None
|
| 731 |
+
|
| 732 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 733 |
+
if inputs_embeds is not None and past_key_values is None:
|
| 734 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 735 |
+
else:
|
| 736 |
+
model_inputs = {"input_ids": input_ids}
|
| 737 |
+
|
| 738 |
+
model_inputs.update(
|
| 739 |
+
{
|
| 740 |
+
"position_ids": position_ids,
|
| 741 |
+
"query_embeds": query_embeds,
|
| 742 |
+
"past_key_values": past_key_values,
|
| 743 |
+
"use_cache": kwargs.get("use_cache"),
|
| 744 |
+
"attention_mask": attention_mask,
|
| 745 |
+
}
|
| 746 |
+
)
|
| 747 |
+
return model_inputs
|
| 748 |
+
|
| 749 |
+
@staticmethod
|
| 750 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 751 |
+
reordered_past = ()
|
| 752 |
+
for layer_past in past_key_values:
|
| 753 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
| 754 |
+
return reordered_past
|
models/modeling_whisper.py
ADDED
|
@@ -0,0 +1,1770 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This script is based on https://github.com/huggingface/transformers/blob/v4.29.1/src/transformers/models/whisper/modeling_whisper.py
|
| 2 |
+
|
| 3 |
+
""" PyTorch Whisper model."""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import random
|
| 7 |
+
from typing import Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torch.utils.checkpoint
|
| 12 |
+
from torch import nn
|
| 13 |
+
from torch.nn import CrossEntropyLoss
|
| 14 |
+
|
| 15 |
+
from transformers.activations import ACT2FN
|
| 16 |
+
from transformers.generation.logits_process import WhisperTimeStampLogitsProcessor
|
| 17 |
+
from transformers.modeling_outputs import (
|
| 18 |
+
BaseModelOutput,
|
| 19 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 20 |
+
Seq2SeqLMOutput,
|
| 21 |
+
Seq2SeqModelOutput,
|
| 22 |
+
SequenceClassifierOutput,
|
| 23 |
+
)
|
| 24 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 25 |
+
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
| 26 |
+
from transformers.models.whisper.configuration_whisper import WhisperConfig
|
| 27 |
+
from transformers.models.whisper.tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = logging.get_logger(__name__)
|
| 31 |
+
|
| 32 |
+
_CONFIG_FOR_DOC = "WhisperConfig"
|
| 33 |
+
_CHECKPOINT_FOR_DOC = "openai/whisper-tiny"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
| 37 |
+
"openai/whisper-base",
|
| 38 |
+
# See all Whisper models at https://huggingface.co/models?filter=whisper
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
|
| 43 |
+
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
|
| 44 |
+
"""
|
| 45 |
+
Shift input ids one token to the right.
|
| 46 |
+
"""
|
| 47 |
+
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
| 48 |
+
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
|
| 49 |
+
shifted_input_ids[:, 0] = decoder_start_token_id
|
| 50 |
+
|
| 51 |
+
if pad_token_id is None:
|
| 52 |
+
raise ValueError("self.model.config.pad_token_id has to be defined.")
|
| 53 |
+
# replace possible -100 values in labels by `pad_token_id`
|
| 54 |
+
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
|
| 55 |
+
|
| 56 |
+
return shifted_input_ids
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
| 60 |
+
def _make_causal_mask(
|
| 61 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 62 |
+
):
|
| 63 |
+
"""
|
| 64 |
+
Make causal mask used for bi-directional self-attention.
|
| 65 |
+
"""
|
| 66 |
+
bsz, tgt_len = input_ids_shape
|
| 67 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
| 68 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
| 69 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
| 70 |
+
mask = mask.to(dtype)
|
| 71 |
+
|
| 72 |
+
if past_key_values_length > 0:
|
| 73 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
| 74 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
| 78 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 79 |
+
"""
|
| 80 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
| 81 |
+
"""
|
| 82 |
+
bsz, src_len = mask.size()
|
| 83 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
| 84 |
+
|
| 85 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
| 86 |
+
|
| 87 |
+
inverted_mask = 1.0 - expanded_mask
|
| 88 |
+
|
| 89 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
|
| 93 |
+
def _compute_mask_indices(
|
| 94 |
+
shape: Tuple[int, int],
|
| 95 |
+
mask_prob: float,
|
| 96 |
+
mask_length: int,
|
| 97 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 98 |
+
min_masks: int = 0,
|
| 99 |
+
) -> np.ndarray:
|
| 100 |
+
"""
|
| 101 |
+
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
|
| 102 |
+
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
|
| 103 |
+
CPU as part of the preprocessing during training.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
|
| 107 |
+
the first element is the batch size and the second element is the length of the axis to span.
|
| 108 |
+
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
|
| 109 |
+
independently generated mask spans of length `mask_length` is computed by
|
| 110 |
+
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
|
| 111 |
+
actual percentage will be smaller.
|
| 112 |
+
mask_length: size of the mask
|
| 113 |
+
min_masks: minimum number of masked spans
|
| 114 |
+
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
|
| 115 |
+
each batch dimension.
|
| 116 |
+
"""
|
| 117 |
+
batch_size, sequence_length = shape
|
| 118 |
+
|
| 119 |
+
if mask_length < 1:
|
| 120 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
| 121 |
+
|
| 122 |
+
if mask_length > sequence_length:
|
| 123 |
+
raise ValueError(
|
| 124 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
|
| 125 |
+
f" and `sequence_length`: {sequence_length}`"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# epsilon is used for probabilistic rounding
|
| 129 |
+
epsilon = np.random.rand(1).item()
|
| 130 |
+
|
| 131 |
+
def compute_num_masked_span(input_length):
|
| 132 |
+
"""Given input length, compute how many spans should be masked"""
|
| 133 |
+
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
|
| 134 |
+
num_masked_span = max(num_masked_span, min_masks)
|
| 135 |
+
|
| 136 |
+
# make sure num masked span <= sequence_length
|
| 137 |
+
if num_masked_span * mask_length > sequence_length:
|
| 138 |
+
num_masked_span = sequence_length // mask_length
|
| 139 |
+
|
| 140 |
+
# make sure num_masked span is also <= input_length - (mask_length - 1)
|
| 141 |
+
if input_length - (mask_length - 1) < num_masked_span:
|
| 142 |
+
num_masked_span = max(input_length - (mask_length - 1), 0)
|
| 143 |
+
|
| 144 |
+
return num_masked_span
|
| 145 |
+
|
| 146 |
+
# compute number of masked spans in batch
|
| 147 |
+
input_lengths = (
|
| 148 |
+
attention_mask.sum(-1).detach().tolist()
|
| 149 |
+
if attention_mask is not None
|
| 150 |
+
else [sequence_length for _ in range(batch_size)]
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# SpecAugment mask to fill
|
| 154 |
+
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
|
| 155 |
+
spec_aug_mask_idxs = []
|
| 156 |
+
|
| 157 |
+
max_num_masked_span = compute_num_masked_span(sequence_length)
|
| 158 |
+
|
| 159 |
+
if max_num_masked_span == 0:
|
| 160 |
+
return spec_aug_mask
|
| 161 |
+
|
| 162 |
+
for input_length in input_lengths:
|
| 163 |
+
# compute num of masked spans for this input
|
| 164 |
+
num_masked_span = compute_num_masked_span(input_length)
|
| 165 |
+
|
| 166 |
+
# get random indices to mask
|
| 167 |
+
spec_aug_mask_idx = np.random.choice(
|
| 168 |
+
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# pick first sampled index that will serve as a dummy index to pad vector
|
| 172 |
+
# to ensure same dimension for all batches due to probabilistic rounding
|
| 173 |
+
# Picking first sample just pads those vectors twice.
|
| 174 |
+
if len(spec_aug_mask_idx) == 0:
|
| 175 |
+
# this case can only happen if `input_length` is strictly smaller then
|
| 176 |
+
# `sequence_length` in which case the last token has to be a padding
|
| 177 |
+
# token which we can use as a dummy mask id
|
| 178 |
+
dummy_mask_idx = sequence_length - 1
|
| 179 |
+
else:
|
| 180 |
+
dummy_mask_idx = spec_aug_mask_idx[0]
|
| 181 |
+
|
| 182 |
+
spec_aug_mask_idx = np.concatenate(
|
| 183 |
+
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
|
| 184 |
+
)
|
| 185 |
+
spec_aug_mask_idxs.append(spec_aug_mask_idx)
|
| 186 |
+
|
| 187 |
+
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
|
| 188 |
+
|
| 189 |
+
# expand masked indices to masked spans
|
| 190 |
+
spec_aug_mask_idxs = np.broadcast_to(
|
| 191 |
+
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
|
| 192 |
+
)
|
| 193 |
+
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
|
| 194 |
+
|
| 195 |
+
# add offset to the starting indexes so that indexes now create a span
|
| 196 |
+
offsets = np.arange(mask_length)[None, None, :]
|
| 197 |
+
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
|
| 198 |
+
batch_size, max_num_masked_span * mask_length
|
| 199 |
+
)
|
| 200 |
+
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
|
| 201 |
+
|
| 202 |
+
# ensure that we cannot have indices larger than sequence_length
|
| 203 |
+
if spec_aug_mask_idxs.max() > sequence_length - 1:
|
| 204 |
+
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
|
| 205 |
+
|
| 206 |
+
# scatter indices to mask
|
| 207 |
+
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
|
| 208 |
+
|
| 209 |
+
return spec_aug_mask
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class WhisperPositionalEmbedding(nn.Embedding):
|
| 213 |
+
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
|
| 214 |
+
super().__init__(num_positions, embedding_dim)
|
| 215 |
+
|
| 216 |
+
def forward(self, input_ids, past_key_values_length=0):
|
| 217 |
+
return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[1]]
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class WhisperAttention(nn.Module):
|
| 221 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 222 |
+
|
| 223 |
+
def __init__(
|
| 224 |
+
self,
|
| 225 |
+
embed_dim: int,
|
| 226 |
+
num_heads: int,
|
| 227 |
+
dropout: float = 0.0,
|
| 228 |
+
is_decoder: bool = False,
|
| 229 |
+
bias: bool = True,
|
| 230 |
+
):
|
| 231 |
+
super().__init__()
|
| 232 |
+
self.embed_dim = embed_dim
|
| 233 |
+
self.num_heads = num_heads
|
| 234 |
+
self.dropout = dropout
|
| 235 |
+
self.head_dim = embed_dim // num_heads
|
| 236 |
+
|
| 237 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
| 238 |
+
raise ValueError(
|
| 239 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
| 240 |
+
f" and `num_heads`: {num_heads})."
|
| 241 |
+
)
|
| 242 |
+
self.scaling = self.head_dim**-0.5
|
| 243 |
+
self.is_decoder = is_decoder
|
| 244 |
+
|
| 245 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
|
| 246 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 247 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 248 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
| 249 |
+
|
| 250 |
+
# Copied from transformers.models.bart.modeling_bart.BartAttention._shape with BART->whisper
|
| 251 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
| 252 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
| 253 |
+
|
| 254 |
+
# Copied from transformers.models.bart.modeling_bart.BartAttention.forward with BART->whisper
|
| 255 |
+
def forward(
|
| 256 |
+
self,
|
| 257 |
+
hidden_states: torch.Tensor,
|
| 258 |
+
key_value_states: Optional[torch.Tensor] = None,
|
| 259 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 260 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 261 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
| 262 |
+
output_attentions: bool = False,
|
| 263 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 264 |
+
"""Input shape: Batch x Time x Channel"""
|
| 265 |
+
|
| 266 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
| 267 |
+
# for the decoder
|
| 268 |
+
is_cross_attention = key_value_states is not None
|
| 269 |
+
|
| 270 |
+
bsz, tgt_len, _ = hidden_states.size()
|
| 271 |
+
|
| 272 |
+
# get query proj
|
| 273 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
| 274 |
+
# get key, value proj
|
| 275 |
+
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
|
| 276 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
| 277 |
+
# the provided `key_value_states` to support prefix tuning
|
| 278 |
+
if (
|
| 279 |
+
is_cross_attention
|
| 280 |
+
and past_key_value is not None
|
| 281 |
+
and past_key_value[0].shape[2] == key_value_states.shape[1]
|
| 282 |
+
):
|
| 283 |
+
# reuse k,v, cross_attentions
|
| 284 |
+
key_states = past_key_value[0]
|
| 285 |
+
value_states = past_key_value[1]
|
| 286 |
+
elif is_cross_attention:
|
| 287 |
+
# cross_attentions
|
| 288 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
| 289 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
| 290 |
+
elif past_key_value is not None:
|
| 291 |
+
# reuse k, v, self_attention
|
| 292 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 293 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 294 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
| 295 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
| 296 |
+
else:
|
| 297 |
+
# self_attention
|
| 298 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
| 299 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
| 300 |
+
|
| 301 |
+
if self.is_decoder:
|
| 302 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
| 303 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
| 304 |
+
# key/value_states (first "if" case)
|
| 305 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
| 306 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
| 307 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
| 308 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
| 309 |
+
past_key_value = (key_states, value_states)
|
| 310 |
+
|
| 311 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
| 312 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
| 313 |
+
key_states = key_states.reshape(*proj_shape)
|
| 314 |
+
value_states = value_states.reshape(*proj_shape)
|
| 315 |
+
|
| 316 |
+
src_len = key_states.size(1)
|
| 317 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
| 318 |
+
|
| 319 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
| 320 |
+
raise ValueError(
|
| 321 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
| 322 |
+
f" {attn_weights.size()}"
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
if attention_mask is not None:
|
| 326 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
| 327 |
+
raise ValueError(
|
| 328 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
| 329 |
+
)
|
| 330 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
| 331 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 332 |
+
|
| 333 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
| 334 |
+
|
| 335 |
+
if layer_head_mask is not None:
|
| 336 |
+
if layer_head_mask.size() != (self.num_heads,):
|
| 337 |
+
raise ValueError(
|
| 338 |
+
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
|
| 339 |
+
f" {layer_head_mask.size()}"
|
| 340 |
+
)
|
| 341 |
+
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 342 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
| 343 |
+
|
| 344 |
+
if output_attentions:
|
| 345 |
+
# this operation is a bit awkward, but it's required to
|
| 346 |
+
# make sure that attn_weights keeps its gradient.
|
| 347 |
+
# In order to do so, attn_weights have to be reshaped
|
| 348 |
+
# twice and have to be reused in the following
|
| 349 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
| 350 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
| 351 |
+
else:
|
| 352 |
+
attn_weights_reshaped = None
|
| 353 |
+
|
| 354 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
| 355 |
+
|
| 356 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
| 357 |
+
|
| 358 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
| 359 |
+
raise ValueError(
|
| 360 |
+
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
|
| 361 |
+
f" {attn_output.size()}"
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
| 365 |
+
attn_output = attn_output.transpose(1, 2)
|
| 366 |
+
|
| 367 |
+
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
| 368 |
+
# partitioned across GPUs when using tensor-parallelism.
|
| 369 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
| 370 |
+
|
| 371 |
+
attn_output = self.out_proj(attn_output)
|
| 372 |
+
|
| 373 |
+
return attn_output, attn_weights_reshaped, past_key_value
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Whisper
|
| 377 |
+
class WhisperEncoderLayer(nn.Module):
|
| 378 |
+
def __init__(self, config: WhisperConfig):
|
| 379 |
+
super().__init__()
|
| 380 |
+
self.embed_dim = config.d_model
|
| 381 |
+
self.self_attn = WhisperAttention(
|
| 382 |
+
embed_dim=self.embed_dim,
|
| 383 |
+
num_heads=config.encoder_attention_heads,
|
| 384 |
+
dropout=config.attention_dropout,
|
| 385 |
+
)
|
| 386 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
| 387 |
+
self.dropout = config.dropout
|
| 388 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
| 389 |
+
self.activation_dropout = config.activation_dropout
|
| 390 |
+
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
|
| 391 |
+
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
|
| 392 |
+
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
| 393 |
+
|
| 394 |
+
def forward(
|
| 395 |
+
self,
|
| 396 |
+
hidden_states: torch.Tensor,
|
| 397 |
+
attention_mask: torch.Tensor,
|
| 398 |
+
layer_head_mask: torch.Tensor,
|
| 399 |
+
output_attentions: bool = False,
|
| 400 |
+
) -> torch.Tensor:
|
| 401 |
+
"""
|
| 402 |
+
Args:
|
| 403 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
|
| 404 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
| 405 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 406 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
| 407 |
+
`(encoder_attention_heads,)`.
|
| 408 |
+
output_attentions (`bool`, *optional*):
|
| 409 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 410 |
+
returned tensors for more detail.
|
| 411 |
+
"""
|
| 412 |
+
residual = hidden_states
|
| 413 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
| 414 |
+
hidden_states, attn_weights, _ = self.self_attn(
|
| 415 |
+
hidden_states=hidden_states,
|
| 416 |
+
attention_mask=attention_mask,
|
| 417 |
+
layer_head_mask=layer_head_mask,
|
| 418 |
+
output_attentions=output_attentions,
|
| 419 |
+
)
|
| 420 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 421 |
+
hidden_states = residual + hidden_states
|
| 422 |
+
|
| 423 |
+
residual = hidden_states
|
| 424 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
| 425 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
| 426 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
| 427 |
+
hidden_states = self.fc2(hidden_states)
|
| 428 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 429 |
+
hidden_states = residual + hidden_states
|
| 430 |
+
|
| 431 |
+
if hidden_states.dtype == torch.float16 and (
|
| 432 |
+
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
|
| 433 |
+
):
|
| 434 |
+
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
|
| 435 |
+
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
|
| 436 |
+
|
| 437 |
+
outputs = (hidden_states,)
|
| 438 |
+
|
| 439 |
+
if output_attentions:
|
| 440 |
+
outputs += (attn_weights,)
|
| 441 |
+
|
| 442 |
+
return outputs
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Whisper
|
| 446 |
+
class WhisperDecoderLayer(nn.Module):
|
| 447 |
+
def __init__(self, config: WhisperConfig):
|
| 448 |
+
super().__init__()
|
| 449 |
+
self.embed_dim = config.d_model
|
| 450 |
+
|
| 451 |
+
self.self_attn = WhisperAttention(
|
| 452 |
+
embed_dim=self.embed_dim,
|
| 453 |
+
num_heads=config.decoder_attention_heads,
|
| 454 |
+
dropout=config.attention_dropout,
|
| 455 |
+
is_decoder=True,
|
| 456 |
+
)
|
| 457 |
+
self.dropout = config.dropout
|
| 458 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
| 459 |
+
self.activation_dropout = config.activation_dropout
|
| 460 |
+
|
| 461 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
| 462 |
+
self.encoder_attn = WhisperAttention(
|
| 463 |
+
self.embed_dim,
|
| 464 |
+
config.decoder_attention_heads,
|
| 465 |
+
dropout=config.attention_dropout,
|
| 466 |
+
is_decoder=True,
|
| 467 |
+
)
|
| 468 |
+
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
| 469 |
+
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
|
| 470 |
+
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
|
| 471 |
+
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
| 472 |
+
|
| 473 |
+
def forward(
|
| 474 |
+
self,
|
| 475 |
+
hidden_states: torch.Tensor,
|
| 476 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 477 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 478 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
| 479 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
| 480 |
+
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
|
| 481 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| 482 |
+
output_attentions: Optional[bool] = False,
|
| 483 |
+
use_cache: Optional[bool] = True,
|
| 484 |
+
) -> torch.Tensor:
|
| 485 |
+
"""
|
| 486 |
+
Args:
|
| 487 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 488 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
| 489 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 490 |
+
encoder_hidden_states (`torch.FloatTensor`):
|
| 491 |
+
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 492 |
+
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
|
| 493 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
| 494 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
| 495 |
+
`(encoder_attention_heads,)`.
|
| 496 |
+
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
|
| 497 |
+
size `(decoder_attention_heads,)`.
|
| 498 |
+
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
|
| 499 |
+
output_attentions (`bool`, *optional*):
|
| 500 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 501 |
+
returned tensors for more detail.
|
| 502 |
+
"""
|
| 503 |
+
residual = hidden_states
|
| 504 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
| 505 |
+
|
| 506 |
+
# Self Attention
|
| 507 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
| 508 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
| 509 |
+
# add present self-attn cache to positions 1,2 of present_key_value tuple
|
| 510 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| 511 |
+
hidden_states=hidden_states,
|
| 512 |
+
past_key_value=self_attn_past_key_value,
|
| 513 |
+
attention_mask=attention_mask,
|
| 514 |
+
layer_head_mask=layer_head_mask,
|
| 515 |
+
output_attentions=output_attentions,
|
| 516 |
+
)
|
| 517 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 518 |
+
hidden_states = residual + hidden_states
|
| 519 |
+
|
| 520 |
+
# Cross-Attention Block
|
| 521 |
+
cross_attn_present_key_value = None
|
| 522 |
+
cross_attn_weights = None
|
| 523 |
+
if encoder_hidden_states is not None:
|
| 524 |
+
residual = hidden_states
|
| 525 |
+
hidden_states = self.encoder_attn_layer_norm(hidden_states)
|
| 526 |
+
|
| 527 |
+
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
|
| 528 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
| 529 |
+
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
|
| 530 |
+
hidden_states=hidden_states,
|
| 531 |
+
key_value_states=encoder_hidden_states,
|
| 532 |
+
attention_mask=encoder_attention_mask,
|
| 533 |
+
layer_head_mask=cross_attn_layer_head_mask,
|
| 534 |
+
past_key_value=cross_attn_past_key_value,
|
| 535 |
+
output_attentions=output_attentions,
|
| 536 |
+
)
|
| 537 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 538 |
+
hidden_states = residual + hidden_states
|
| 539 |
+
|
| 540 |
+
# add cross-attn to positions 3,4 of present_key_value tuple
|
| 541 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
| 542 |
+
|
| 543 |
+
# Fully Connected
|
| 544 |
+
residual = hidden_states
|
| 545 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
| 546 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
| 547 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
| 548 |
+
hidden_states = self.fc2(hidden_states)
|
| 549 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 550 |
+
hidden_states = residual + hidden_states
|
| 551 |
+
|
| 552 |
+
outputs = (hidden_states,)
|
| 553 |
+
|
| 554 |
+
if output_attentions:
|
| 555 |
+
outputs += (self_attn_weights, cross_attn_weights)
|
| 556 |
+
|
| 557 |
+
if use_cache:
|
| 558 |
+
outputs += (present_key_value,)
|
| 559 |
+
|
| 560 |
+
return outputs
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class WhisperPreTrainedModel(PreTrainedModel):
|
| 564 |
+
config_class = WhisperConfig
|
| 565 |
+
base_model_prefix = "model"
|
| 566 |
+
main_input_name = "input_features"
|
| 567 |
+
supports_gradient_checkpointing = True
|
| 568 |
+
_no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"]
|
| 569 |
+
|
| 570 |
+
def _init_weights(self, module):
|
| 571 |
+
std = self.config.init_std
|
| 572 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 573 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 574 |
+
if module.bias is not None:
|
| 575 |
+
module.bias.data.zero_()
|
| 576 |
+
elif isinstance(module, nn.Embedding):
|
| 577 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 578 |
+
if module.padding_idx is not None:
|
| 579 |
+
module.weight.data[module.padding_idx].zero_()
|
| 580 |
+
|
| 581 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 582 |
+
if isinstance(module, (WhisperDecoder, WhisperEncoder)):
|
| 583 |
+
module.gradient_checkpointing = value
|
| 584 |
+
|
| 585 |
+
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
|
| 586 |
+
"""
|
| 587 |
+
Computes the output length of the convolutional layers
|
| 588 |
+
"""
|
| 589 |
+
input_lengths = (input_lengths - 1) // 2 + 1
|
| 590 |
+
|
| 591 |
+
return input_lengths
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
WHISPER_START_DOCSTRING = r"""
|
| 595 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 596 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 597 |
+
etc.)
|
| 598 |
+
|
| 599 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 600 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 601 |
+
and behavior.
|
| 602 |
+
|
| 603 |
+
Parameters:
|
| 604 |
+
config ([`WhisperConfig`]):
|
| 605 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| 606 |
+
load the weights associated with the model, only the configuration. Check out the
|
| 607 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 608 |
+
"""
|
| 609 |
+
|
| 610 |
+
WHISPER_INPUTS_DOCSTRING = r"""
|
| 611 |
+
Args:
|
| 612 |
+
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
| 613 |
+
Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
|
| 614 |
+
loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
|
| 615 |
+
the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
|
| 616 |
+
[`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
|
| 617 |
+
tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
|
| 618 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 619 |
+
Mask to avoid performing *SpecAugment* data augmentation on padding token indices. Mask values selected in
|
| 620 |
+
`[0, 1]`:
|
| 621 |
+
|
| 622 |
+
- 1 for tokens that are **not masked**,
|
| 623 |
+
- 0 for tokens that are **masked**.
|
| 624 |
+
|
| 625 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 626 |
+
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 627 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
| 628 |
+
|
| 629 |
+
Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 630 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 631 |
+
|
| 632 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
| 633 |
+
|
| 634 |
+
Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
|
| 635 |
+
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| 636 |
+
`past_key_values`).
|
| 637 |
+
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
| 638 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
| 639 |
+
be used by default.
|
| 640 |
+
|
| 641 |
+
If you want to change padding behavior, you should read
|
| 642 |
+
[`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
|
| 643 |
+
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
|
| 644 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
| 645 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
| 646 |
+
|
| 647 |
+
- 1 indicates the head is **not masked**,
|
| 648 |
+
- 0 indicates the head is **masked**.
|
| 649 |
+
|
| 650 |
+
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 651 |
+
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
| 652 |
+
|
| 653 |
+
- 1 indicates the head is **not masked**,
|
| 654 |
+
- 0 indicates the head is **masked**.
|
| 655 |
+
|
| 656 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 657 |
+
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
| 658 |
+
|
| 659 |
+
- 1 indicates the head is **not masked**,
|
| 660 |
+
- 0 indicates the head is **masked**.
|
| 661 |
+
|
| 662 |
+
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
| 663 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
| 664 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
| 665 |
+
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
| 666 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 667 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 668 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
| 669 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 670 |
+
|
| 671 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| 672 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 673 |
+
|
| 674 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
| 675 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
| 676 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
| 677 |
+
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
|
| 678 |
+
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
|
| 679 |
+
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
|
| 680 |
+
input (see `past_key_values`). This is useful if you want more control over how to convert
|
| 681 |
+
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
|
| 682 |
+
use_cache (`bool`, *optional*):
|
| 683 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 684 |
+
`past_key_values`).
|
| 685 |
+
output_attentions (`bool`, *optional*):
|
| 686 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 687 |
+
tensors for more detail.
|
| 688 |
+
output_hidden_states (`bool`, *optional*):
|
| 689 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 690 |
+
more detail.
|
| 691 |
+
return_dict (`bool`, *optional*):
|
| 692 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 693 |
+
"""
|
| 694 |
+
|
| 695 |
+
WHISPER_ENCODER_INPUTS_DOCSTRING = r"""
|
| 696 |
+
Args:
|
| 697 |
+
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
| 698 |
+
Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
|
| 699 |
+
loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
|
| 700 |
+
the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
|
| 701 |
+
[`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
|
| 702 |
+
tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
|
| 703 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
| 704 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
| 705 |
+
|
| 706 |
+
- 1 indicates the head is **not masked**,
|
| 707 |
+
- 0 indicates the head is **masked**.
|
| 708 |
+
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
| 709 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
| 710 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
| 711 |
+
hidden-states at the output of the last layer of the encoder.
|
| 712 |
+
output_attentions (`bool`, *optional*):
|
| 713 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 714 |
+
tensors for more detail.
|
| 715 |
+
output_hidden_states (`bool`, *optional*):
|
| 716 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 717 |
+
more detail.
|
| 718 |
+
return_dict (`bool`, *optional*):
|
| 719 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 720 |
+
"""
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
class WhisperEncoder(WhisperPreTrainedModel):
|
| 724 |
+
"""
|
| 725 |
+
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
|
| 726 |
+
[`WhisperEncoderLayer`].
|
| 727 |
+
|
| 728 |
+
Args:
|
| 729 |
+
config: WhisperConfig
|
| 730 |
+
"""
|
| 731 |
+
|
| 732 |
+
def __init__(self, config: WhisperConfig):
|
| 733 |
+
super().__init__(config)
|
| 734 |
+
self.dropout = config.dropout
|
| 735 |
+
self.layerdrop = config.encoder_layerdrop
|
| 736 |
+
|
| 737 |
+
embed_dim = config.d_model
|
| 738 |
+
self.num_mel_bins = config.num_mel_bins
|
| 739 |
+
self.padding_idx = config.pad_token_id
|
| 740 |
+
self.max_source_positions = config.max_source_positions
|
| 741 |
+
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
|
| 742 |
+
|
| 743 |
+
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
|
| 744 |
+
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
|
| 745 |
+
|
| 746 |
+
self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim)
|
| 747 |
+
|
| 748 |
+
self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)])
|
| 749 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
| 750 |
+
|
| 751 |
+
self.gradient_checkpointing = False
|
| 752 |
+
# Initialize weights and apply final processing
|
| 753 |
+
self.post_init()
|
| 754 |
+
|
| 755 |
+
def _freeze_parameters(self):
|
| 756 |
+
for param in self.parameters():
|
| 757 |
+
param.requires_grad = False
|
| 758 |
+
self._requires_grad = False
|
| 759 |
+
|
| 760 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 761 |
+
return self.conv1
|
| 762 |
+
|
| 763 |
+
def set_input_embeddings(self, value: nn.Module):
|
| 764 |
+
self.conv1 = value
|
| 765 |
+
|
| 766 |
+
def forward(
|
| 767 |
+
self,
|
| 768 |
+
input_features,
|
| 769 |
+
attention_mask=None,
|
| 770 |
+
head_mask=None,
|
| 771 |
+
output_attentions=None,
|
| 772 |
+
output_hidden_states=None,
|
| 773 |
+
return_dict=None,
|
| 774 |
+
):
|
| 775 |
+
r"""
|
| 776 |
+
Args:
|
| 777 |
+
input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
|
| 778 |
+
Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
|
| 779 |
+
obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
|
| 780 |
+
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
|
| 781 |
+
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
|
| 782 |
+
and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
|
| 783 |
+
attention_mask (`torch.Tensor`)`, *optional*):
|
| 784 |
+
Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,
|
| 785 |
+
but it is not used. By default the silence in the input log mel spectrogram are ignored.
|
| 786 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
| 787 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
| 788 |
+
|
| 789 |
+
- 1 indicates the head is **not masked**,
|
| 790 |
+
- 0 indicates the head is **masked**.
|
| 791 |
+
output_attentions (`bool`, *optional*):
|
| 792 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 793 |
+
returned tensors for more detail.
|
| 794 |
+
output_hidden_states (`bool`, *optional*):
|
| 795 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 796 |
+
for more detail.
|
| 797 |
+
return_dict (`bool`, *optional*):
|
| 798 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 799 |
+
"""
|
| 800 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 801 |
+
output_hidden_states = (
|
| 802 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 803 |
+
)
|
| 804 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 805 |
+
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
|
| 806 |
+
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
|
| 807 |
+
|
| 808 |
+
inputs_embeds = inputs_embeds.permute(0, 2, 1)
|
| 809 |
+
embed_pos = self.embed_positions.weight
|
| 810 |
+
|
| 811 |
+
hidden_states = inputs_embeds + embed_pos
|
| 812 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 813 |
+
|
| 814 |
+
encoder_states = () if output_hidden_states else None
|
| 815 |
+
all_attentions = () if output_attentions else None
|
| 816 |
+
|
| 817 |
+
# check if head_mask has a correct number of layers specified if desired
|
| 818 |
+
if head_mask is not None:
|
| 819 |
+
assert head_mask.size()[0] == (
|
| 820 |
+
len(self.layers)
|
| 821 |
+
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
|
| 822 |
+
|
| 823 |
+
for idx, encoder_layer in enumerate(self.layers):
|
| 824 |
+
if output_hidden_states:
|
| 825 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 826 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 827 |
+
dropout_probability = random.uniform(0, 1)
|
| 828 |
+
if self.training and (dropout_probability < self.layerdrop): # skip the layer
|
| 829 |
+
layer_outputs = (None, None)
|
| 830 |
+
else:
|
| 831 |
+
if self.gradient_checkpointing and self.training:
|
| 832 |
+
|
| 833 |
+
def create_custom_forward(module):
|
| 834 |
+
def custom_forward(*inputs):
|
| 835 |
+
return module(*inputs, output_attentions)
|
| 836 |
+
|
| 837 |
+
return custom_forward
|
| 838 |
+
|
| 839 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 840 |
+
create_custom_forward(encoder_layer),
|
| 841 |
+
hidden_states,
|
| 842 |
+
None,
|
| 843 |
+
(head_mask[idx] if head_mask is not None else None),
|
| 844 |
+
)
|
| 845 |
+
else:
|
| 846 |
+
layer_outputs = encoder_layer(
|
| 847 |
+
hidden_states,
|
| 848 |
+
None,
|
| 849 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
| 850 |
+
output_attentions=output_attentions,
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
hidden_states = layer_outputs[0]
|
| 854 |
+
|
| 855 |
+
if output_attentions:
|
| 856 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
| 857 |
+
|
| 858 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 859 |
+
if output_hidden_states:
|
| 860 |
+
encoder_states = encoder_states + (hidden_states,)
|
| 861 |
+
|
| 862 |
+
if not return_dict:
|
| 863 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
| 864 |
+
return BaseModelOutput(
|
| 865 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
| 866 |
+
)
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
class WhisperDecoder(WhisperPreTrainedModel):
|
| 870 |
+
"""
|
| 871 |
+
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`]
|
| 872 |
+
|
| 873 |
+
Args:
|
| 874 |
+
config: WhisperConfig
|
| 875 |
+
"""
|
| 876 |
+
|
| 877 |
+
def __init__(self, config: WhisperConfig):
|
| 878 |
+
super().__init__(config)
|
| 879 |
+
self.dropout = config.dropout
|
| 880 |
+
self.layerdrop = config.decoder_layerdrop
|
| 881 |
+
self.padding_idx = config.pad_token_id
|
| 882 |
+
self.max_target_positions = config.max_target_positions
|
| 883 |
+
self.max_source_positions = config.max_source_positions
|
| 884 |
+
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
|
| 885 |
+
|
| 886 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
|
| 887 |
+
self.embed_positions = WhisperPositionalEmbedding(self.max_target_positions, config.d_model)
|
| 888 |
+
|
| 889 |
+
self.layers = nn.ModuleList([WhisperDecoderLayer(config) for _ in range(config.decoder_layers)])
|
| 890 |
+
|
| 891 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
| 892 |
+
|
| 893 |
+
self.gradient_checkpointing = False
|
| 894 |
+
# Initialize weights and apply final processing
|
| 895 |
+
self.post_init()
|
| 896 |
+
|
| 897 |
+
def get_input_embeddings(self):
|
| 898 |
+
return self.embed_tokens
|
| 899 |
+
|
| 900 |
+
def set_input_embeddings(self, value):
|
| 901 |
+
self.embed_tokens = value
|
| 902 |
+
|
| 903 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
| 904 |
+
# create causal mask
|
| 905 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 906 |
+
combined_attention_mask = None
|
| 907 |
+
|
| 908 |
+
if input_shape[-1] > 1:
|
| 909 |
+
combined_attention_mask = _make_causal_mask(
|
| 910 |
+
input_shape,
|
| 911 |
+
inputs_embeds.dtype,
|
| 912 |
+
device=inputs_embeds.device,
|
| 913 |
+
past_key_values_length=past_key_values_length,
|
| 914 |
+
)
|
| 915 |
+
|
| 916 |
+
if attention_mask is not None:
|
| 917 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
| 918 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
|
| 919 |
+
combined_attention_mask = (
|
| 920 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
return combined_attention_mask
|
| 924 |
+
|
| 925 |
+
def forward(
|
| 926 |
+
self,
|
| 927 |
+
input_ids=None,
|
| 928 |
+
attention_mask=None,
|
| 929 |
+
encoder_hidden_states=None,
|
| 930 |
+
head_mask=None,
|
| 931 |
+
cross_attn_head_mask=None,
|
| 932 |
+
past_key_values=None,
|
| 933 |
+
inputs_embeds=None,
|
| 934 |
+
use_cache=None,
|
| 935 |
+
output_attentions=None,
|
| 936 |
+
output_hidden_states=None,
|
| 937 |
+
return_dict=None,
|
| 938 |
+
):
|
| 939 |
+
r"""
|
| 940 |
+
Args:
|
| 941 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 942 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
| 943 |
+
provide it.
|
| 944 |
+
|
| 945 |
+
Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 946 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 947 |
+
|
| 948 |
+
[What are input IDs?](../glossary#input-ids)
|
| 949 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 950 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 951 |
+
|
| 952 |
+
- 1 for tokens that are **not masked**,
|
| 953 |
+
- 0 for tokens that are **masked**.
|
| 954 |
+
|
| 955 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 956 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
|
| 957 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
| 958 |
+
of the decoder.
|
| 959 |
+
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 960 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
| 961 |
+
|
| 962 |
+
- 1 indicates the head is **not masked**,
|
| 963 |
+
- 0 indicates the head is **masked**.
|
| 964 |
+
|
| 965 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
| 966 |
+
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
|
| 967 |
+
on hidden heads. Mask values selected in `[0, 1]`:
|
| 968 |
+
|
| 969 |
+
- 1 indicates the head is **not masked**,
|
| 970 |
+
- 0 indicates the head is **masked**.
|
| 971 |
+
|
| 972 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 973 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| 974 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
| 975 |
+
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
| 976 |
+
|
| 977 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
|
| 978 |
+
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
| 979 |
+
|
| 980 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
|
| 981 |
+
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
|
| 982 |
+
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
|
| 983 |
+
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
|
| 984 |
+
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
|
| 985 |
+
control over how to convert `input_ids` indices into associated vectors than the model's internal
|
| 986 |
+
embedding lookup matrix.
|
| 987 |
+
output_attentions (`bool`, *optional*):
|
| 988 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 989 |
+
returned tensors for more detail.
|
| 990 |
+
output_hidden_states (`bool`, *optional*):
|
| 991 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
| 992 |
+
for more detail.
|
| 993 |
+
return_dict (`bool`, *optional*):
|
| 994 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 995 |
+
"""
|
| 996 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 997 |
+
output_hidden_states = (
|
| 998 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 999 |
+
)
|
| 1000 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1001 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1002 |
+
|
| 1003 |
+
# retrieve input_ids and inputs_embeds
|
| 1004 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 1005 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
| 1006 |
+
elif input_ids is not None:
|
| 1007 |
+
input_shape = input_ids.size()
|
| 1008 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
| 1009 |
+
elif inputs_embeds is not None:
|
| 1010 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 1011 |
+
else:
|
| 1012 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
| 1013 |
+
|
| 1014 |
+
# past_key_values_length
|
| 1015 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
| 1016 |
+
|
| 1017 |
+
if inputs_embeds is None:
|
| 1018 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 1019 |
+
|
| 1020 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
| 1021 |
+
attention_mask, input_shape, inputs_embeds, past_key_values_length
|
| 1022 |
+
)
|
| 1023 |
+
|
| 1024 |
+
# embed positions
|
| 1025 |
+
if input_ids is not None:
|
| 1026 |
+
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
|
| 1027 |
+
else:
|
| 1028 |
+
positions = self.embed_positions(inputs_embeds, past_key_values_length=past_key_values_length)
|
| 1029 |
+
|
| 1030 |
+
hidden_states = inputs_embeds + positions
|
| 1031 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
| 1032 |
+
|
| 1033 |
+
if self.gradient_checkpointing and self.training:
|
| 1034 |
+
if use_cache:
|
| 1035 |
+
logger.warning_once(
|
| 1036 |
+
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
|
| 1037 |
+
)
|
| 1038 |
+
use_cache = False
|
| 1039 |
+
# decoder layers
|
| 1040 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1041 |
+
all_self_attns = () if output_attentions else None
|
| 1042 |
+
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
|
| 1043 |
+
next_decoder_cache = () if use_cache else None
|
| 1044 |
+
|
| 1045 |
+
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
|
| 1046 |
+
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
|
| 1047 |
+
if attn_mask is not None:
|
| 1048 |
+
assert attn_mask.size()[0] == (len(self.layers)), (
|
| 1049 |
+
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
|
| 1050 |
+
f" {head_mask.size()[0]}."
|
| 1051 |
+
)
|
| 1052 |
+
for idx, decoder_layer in enumerate(self.layers):
|
| 1053 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
| 1054 |
+
if output_hidden_states:
|
| 1055 |
+
all_hidden_states += (hidden_states,)
|
| 1056 |
+
dropout_probability = random.uniform(0, 1)
|
| 1057 |
+
if self.training and (dropout_probability < self.layerdrop):
|
| 1058 |
+
continue
|
| 1059 |
+
|
| 1060 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
| 1061 |
+
|
| 1062 |
+
if self.gradient_checkpointing and self.training:
|
| 1063 |
+
|
| 1064 |
+
def create_custom_forward(module):
|
| 1065 |
+
def custom_forward(*inputs):
|
| 1066 |
+
# None for past_key_value
|
| 1067 |
+
return module(*inputs, output_attentions, use_cache)
|
| 1068 |
+
|
| 1069 |
+
return custom_forward
|
| 1070 |
+
|
| 1071 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 1072 |
+
create_custom_forward(decoder_layer),
|
| 1073 |
+
hidden_states,
|
| 1074 |
+
attention_mask,
|
| 1075 |
+
encoder_hidden_states,
|
| 1076 |
+
None, # encoder attention mask
|
| 1077 |
+
head_mask[idx] if head_mask is not None else None,
|
| 1078 |
+
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
|
| 1079 |
+
None, # past_key_value
|
| 1080 |
+
)
|
| 1081 |
+
else:
|
| 1082 |
+
layer_outputs = decoder_layer(
|
| 1083 |
+
hidden_states,
|
| 1084 |
+
attention_mask=attention_mask,
|
| 1085 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 1086 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
| 1087 |
+
cross_attn_layer_head_mask=(
|
| 1088 |
+
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
|
| 1089 |
+
),
|
| 1090 |
+
past_key_value=past_key_value,
|
| 1091 |
+
output_attentions=output_attentions,
|
| 1092 |
+
use_cache=use_cache,
|
| 1093 |
+
)
|
| 1094 |
+
hidden_states = layer_outputs[0]
|
| 1095 |
+
|
| 1096 |
+
if use_cache:
|
| 1097 |
+
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
|
| 1098 |
+
|
| 1099 |
+
if output_attentions:
|
| 1100 |
+
all_self_attns += (layer_outputs[1],)
|
| 1101 |
+
|
| 1102 |
+
if encoder_hidden_states is not None:
|
| 1103 |
+
all_cross_attentions += (layer_outputs[2],)
|
| 1104 |
+
|
| 1105 |
+
hidden_states = self.layer_norm(hidden_states)
|
| 1106 |
+
# add hidden states from the last decoder layer
|
| 1107 |
+
if output_hidden_states:
|
| 1108 |
+
all_hidden_states += (hidden_states,)
|
| 1109 |
+
|
| 1110 |
+
next_cache = next_decoder_cache if use_cache else None
|
| 1111 |
+
if not return_dict:
|
| 1112 |
+
return tuple(
|
| 1113 |
+
v
|
| 1114 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
|
| 1115 |
+
if v is not None
|
| 1116 |
+
)
|
| 1117 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 1118 |
+
last_hidden_state=hidden_states,
|
| 1119 |
+
past_key_values=next_cache,
|
| 1120 |
+
hidden_states=all_hidden_states,
|
| 1121 |
+
attentions=all_self_attns,
|
| 1122 |
+
cross_attentions=all_cross_attentions,
|
| 1123 |
+
)
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
@add_start_docstrings(
|
| 1127 |
+
"The bare Whisper Model outputting raw hidden-states without any specific head on top.",
|
| 1128 |
+
WHISPER_START_DOCSTRING,
|
| 1129 |
+
)
|
| 1130 |
+
class WhisperModel(WhisperPreTrainedModel):
|
| 1131 |
+
_keys_to_ignore_on_load_missing = [r"proj_out.weight"]
|
| 1132 |
+
|
| 1133 |
+
def __init__(self, config: WhisperConfig):
|
| 1134 |
+
super().__init__(config)
|
| 1135 |
+
|
| 1136 |
+
self.encoder = WhisperEncoder(config)
|
| 1137 |
+
self.decoder = WhisperDecoder(config)
|
| 1138 |
+
# Initialize weights and apply final processing
|
| 1139 |
+
self.post_init()
|
| 1140 |
+
|
| 1141 |
+
def get_input_embeddings(self):
|
| 1142 |
+
return self.decoder.embed_tokens
|
| 1143 |
+
|
| 1144 |
+
def set_input_embeddings(self, value):
|
| 1145 |
+
self.decoder.embed_tokens = value
|
| 1146 |
+
|
| 1147 |
+
def get_encoder(self):
|
| 1148 |
+
return self.encoder
|
| 1149 |
+
|
| 1150 |
+
def get_decoder(self):
|
| 1151 |
+
return self.decoder
|
| 1152 |
+
|
| 1153 |
+
def freeze_encoder(self):
|
| 1154 |
+
"""
|
| 1155 |
+
Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
|
| 1156 |
+
not be updated during training.
|
| 1157 |
+
"""
|
| 1158 |
+
self.encoder._freeze_parameters()
|
| 1159 |
+
|
| 1160 |
+
def _mask_input_features(
|
| 1161 |
+
self,
|
| 1162 |
+
input_features: torch.FloatTensor,
|
| 1163 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1164 |
+
):
|
| 1165 |
+
"""
|
| 1166 |
+
Masks extracted features along time axis and/or along feature axis according to
|
| 1167 |
+
[SpecAugment](https://arxiv.org/abs/1904.08779).
|
| 1168 |
+
"""
|
| 1169 |
+
|
| 1170 |
+
# `config.apply_spec_augment` can set masking to False
|
| 1171 |
+
if not getattr(self.config, "apply_spec_augment", True):
|
| 1172 |
+
return input_features
|
| 1173 |
+
|
| 1174 |
+
# generate indices & apply SpecAugment along time axis
|
| 1175 |
+
batch_size, hidden_size, sequence_length = input_features.size()
|
| 1176 |
+
|
| 1177 |
+
if self.config.mask_time_prob > 0 and self.training:
|
| 1178 |
+
# generate indices & apply SpecAugment along time axis
|
| 1179 |
+
mask_time_indices = _compute_mask_indices(
|
| 1180 |
+
(batch_size, sequence_length),
|
| 1181 |
+
mask_prob=self.config.mask_time_prob,
|
| 1182 |
+
mask_length=self.config.mask_time_length,
|
| 1183 |
+
attention_mask=attention_mask,
|
| 1184 |
+
min_masks=self.config.mask_time_min_masks,
|
| 1185 |
+
)
|
| 1186 |
+
mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool)
|
| 1187 |
+
mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1)
|
| 1188 |
+
input_features[mask_time_indices] = 0
|
| 1189 |
+
|
| 1190 |
+
if self.config.mask_feature_prob > 0 and self.training:
|
| 1191 |
+
# generate indices & apply SpecAugment along feature axis
|
| 1192 |
+
mask_feature_indices = _compute_mask_indices(
|
| 1193 |
+
(batch_size, hidden_size),
|
| 1194 |
+
mask_prob=self.config.mask_feature_prob,
|
| 1195 |
+
mask_length=self.config.mask_feature_length,
|
| 1196 |
+
min_masks=self.config.mask_feature_min_masks,
|
| 1197 |
+
)
|
| 1198 |
+
mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool)
|
| 1199 |
+
input_features[mask_feature_indices] = 0
|
| 1200 |
+
|
| 1201 |
+
return input_features
|
| 1202 |
+
|
| 1203 |
+
@add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING)
|
| 1204 |
+
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
|
| 1205 |
+
def forward(
|
| 1206 |
+
self,
|
| 1207 |
+
input_features: Optional[torch.FloatTensor] = None,
|
| 1208 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1209 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 1210 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
| 1211 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1212 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
| 1213 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
| 1214 |
+
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1215 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1216 |
+
decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1217 |
+
use_cache: Optional[bool] = None,
|
| 1218 |
+
output_attentions: Optional[bool] = None,
|
| 1219 |
+
output_hidden_states: Optional[bool] = None,
|
| 1220 |
+
return_dict: Optional[bool] = None,
|
| 1221 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
|
| 1222 |
+
r"""
|
| 1223 |
+
Returns:
|
| 1224 |
+
|
| 1225 |
+
Example:
|
| 1226 |
+
```python
|
| 1227 |
+
>>> import torch
|
| 1228 |
+
>>> from transformers import AutoFeatureExtractor, WhisperModel
|
| 1229 |
+
>>> from datasets import load_dataset
|
| 1230 |
+
|
| 1231 |
+
>>> model = WhisperModel.from_pretrained("openai/whisper-base")
|
| 1232 |
+
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base")
|
| 1233 |
+
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
| 1234 |
+
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
|
| 1235 |
+
>>> input_features = inputs.input_features
|
| 1236 |
+
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
|
| 1237 |
+
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
|
| 1238 |
+
>>> list(last_hidden_state.shape)
|
| 1239 |
+
[1, 2, 512]
|
| 1240 |
+
```"""
|
| 1241 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1242 |
+
output_hidden_states = (
|
| 1243 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1244 |
+
)
|
| 1245 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1246 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1247 |
+
|
| 1248 |
+
if encoder_outputs is None:
|
| 1249 |
+
input_features = self._mask_input_features(input_features, attention_mask=attention_mask)
|
| 1250 |
+
|
| 1251 |
+
encoder_outputs = self.encoder(
|
| 1252 |
+
input_features,
|
| 1253 |
+
head_mask=head_mask,
|
| 1254 |
+
output_attentions=output_attentions,
|
| 1255 |
+
output_hidden_states=output_hidden_states,
|
| 1256 |
+
return_dict=return_dict,
|
| 1257 |
+
)
|
| 1258 |
+
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
|
| 1259 |
+
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
|
| 1260 |
+
encoder_outputs = BaseModelOutput(
|
| 1261 |
+
last_hidden_state=encoder_outputs[0],
|
| 1262 |
+
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
| 1263 |
+
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
| 1264 |
+
)
|
| 1265 |
+
|
| 1266 |
+
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
|
| 1267 |
+
decoder_outputs = self.decoder(
|
| 1268 |
+
input_ids=decoder_input_ids,
|
| 1269 |
+
attention_mask=decoder_attention_mask,
|
| 1270 |
+
encoder_hidden_states=encoder_outputs[0],
|
| 1271 |
+
head_mask=decoder_head_mask,
|
| 1272 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
| 1273 |
+
past_key_values=past_key_values,
|
| 1274 |
+
inputs_embeds=decoder_inputs_embeds,
|
| 1275 |
+
use_cache=use_cache,
|
| 1276 |
+
output_attentions=output_attentions,
|
| 1277 |
+
output_hidden_states=output_hidden_states,
|
| 1278 |
+
return_dict=return_dict,
|
| 1279 |
+
)
|
| 1280 |
+
|
| 1281 |
+
if not return_dict:
|
| 1282 |
+
return decoder_outputs + encoder_outputs
|
| 1283 |
+
|
| 1284 |
+
return Seq2SeqModelOutput(
|
| 1285 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
| 1286 |
+
past_key_values=decoder_outputs.past_key_values,
|
| 1287 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
| 1288 |
+
decoder_attentions=decoder_outputs.attentions,
|
| 1289 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
| 1290 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
| 1291 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
| 1292 |
+
encoder_attentions=encoder_outputs.attentions,
|
| 1293 |
+
)
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
@add_start_docstrings(
|
| 1297 |
+
"The Whisper Model with a language modeling head. Can be used for automatic speech recognition.",
|
| 1298 |
+
WHISPER_START_DOCSTRING,
|
| 1299 |
+
)
|
| 1300 |
+
class WhisperForConditionalGeneration(WhisperPreTrainedModel):
|
| 1301 |
+
base_model_prefix = "model"
|
| 1302 |
+
_keys_to_ignore_on_load_missing = [
|
| 1303 |
+
r"encoder.version",
|
| 1304 |
+
r"decoder.version",
|
| 1305 |
+
r"proj_out.weight",
|
| 1306 |
+
]
|
| 1307 |
+
_keys_to_ignore_on_save = [
|
| 1308 |
+
r"proj_out.weight",
|
| 1309 |
+
]
|
| 1310 |
+
|
| 1311 |
+
def __init__(self, config: WhisperConfig):
|
| 1312 |
+
super().__init__(config)
|
| 1313 |
+
self.model = WhisperModel(config)
|
| 1314 |
+
self.proj_out = nn.Linear(config.d_model, config.vocab_size, bias=False)
|
| 1315 |
+
|
| 1316 |
+
# Initialize weights and apply final processing
|
| 1317 |
+
self.post_init()
|
| 1318 |
+
|
| 1319 |
+
def get_encoder(self):
|
| 1320 |
+
return self.model.get_encoder()
|
| 1321 |
+
|
| 1322 |
+
def get_decoder(self):
|
| 1323 |
+
return self.model.get_decoder()
|
| 1324 |
+
|
| 1325 |
+
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
|
| 1326 |
+
new_embeddings = super().resize_token_embeddings(new_num_tokens)
|
| 1327 |
+
return new_embeddings
|
| 1328 |
+
|
| 1329 |
+
def get_output_embeddings(self):
|
| 1330 |
+
return self.proj_out
|
| 1331 |
+
|
| 1332 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1333 |
+
self.proj_out = new_embeddings
|
| 1334 |
+
|
| 1335 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1336 |
+
return self.model.get_input_embeddings()
|
| 1337 |
+
|
| 1338 |
+
def freeze_encoder(self):
|
| 1339 |
+
"""
|
| 1340 |
+
Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
|
| 1341 |
+
not be updated during training.
|
| 1342 |
+
"""
|
| 1343 |
+
self.model.encoder._freeze_parameters()
|
| 1344 |
+
|
| 1345 |
+
@add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING)
|
| 1346 |
+
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
|
| 1347 |
+
def forward(
|
| 1348 |
+
self,
|
| 1349 |
+
input_features: Optional[torch.FloatTensor] = None,
|
| 1350 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 1351 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
| 1352 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
| 1353 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1354 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
| 1355 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
| 1356 |
+
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1357 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1358 |
+
decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None,
|
| 1359 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1360 |
+
use_cache: Optional[bool] = None,
|
| 1361 |
+
output_attentions: Optional[bool] = None,
|
| 1362 |
+
output_hidden_states: Optional[bool] = None,
|
| 1363 |
+
return_dict: Optional[bool] = None,
|
| 1364 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
|
| 1365 |
+
r"""
|
| 1366 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1367 |
+
Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
|
| 1368 |
+
or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
|
| 1369 |
+
only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1370 |
+
|
| 1371 |
+
Returns:
|
| 1372 |
+
|
| 1373 |
+
Example:
|
| 1374 |
+
|
| 1375 |
+
```python
|
| 1376 |
+
>>> import torch
|
| 1377 |
+
>>> from transformers import AutoProcessor, WhisperForConditionalGeneration
|
| 1378 |
+
>>> from datasets import load_dataset
|
| 1379 |
+
|
| 1380 |
+
>>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
|
| 1381 |
+
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
|
| 1382 |
+
|
| 1383 |
+
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
| 1384 |
+
|
| 1385 |
+
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
|
| 1386 |
+
>>> input_features = inputs.input_features
|
| 1387 |
+
|
| 1388 |
+
>>> generated_ids = model.generate(inputs=input_features)
|
| 1389 |
+
|
| 1390 |
+
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 1391 |
+
>>> transcription
|
| 1392 |
+
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
|
| 1393 |
+
```"""
|
| 1394 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1395 |
+
|
| 1396 |
+
if labels is not None:
|
| 1397 |
+
if decoder_input_ids is None and decoder_inputs_embeds is None:
|
| 1398 |
+
decoder_input_ids = shift_tokens_right(
|
| 1399 |
+
labels, self.config.pad_token_id, self.config.decoder_start_token_id
|
| 1400 |
+
)
|
| 1401 |
+
|
| 1402 |
+
outputs = self.model(
|
| 1403 |
+
input_features,
|
| 1404 |
+
attention_mask=attention_mask,
|
| 1405 |
+
decoder_input_ids=decoder_input_ids,
|
| 1406 |
+
encoder_outputs=encoder_outputs,
|
| 1407 |
+
decoder_attention_mask=decoder_attention_mask,
|
| 1408 |
+
head_mask=head_mask,
|
| 1409 |
+
decoder_head_mask=decoder_head_mask,
|
| 1410 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
| 1411 |
+
past_key_values=past_key_values,
|
| 1412 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
| 1413 |
+
use_cache=use_cache,
|
| 1414 |
+
output_attentions=output_attentions,
|
| 1415 |
+
output_hidden_states=output_hidden_states,
|
| 1416 |
+
return_dict=return_dict,
|
| 1417 |
+
)
|
| 1418 |
+
lm_logits = self.proj_out(outputs[0])
|
| 1419 |
+
|
| 1420 |
+
loss = None
|
| 1421 |
+
if labels is not None:
|
| 1422 |
+
loss_fct = CrossEntropyLoss()
|
| 1423 |
+
# move labels to correct device to enable PP
|
| 1424 |
+
labels = labels.to(lm_logits.device)
|
| 1425 |
+
loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.reshape(-1))
|
| 1426 |
+
|
| 1427 |
+
if not return_dict:
|
| 1428 |
+
output = (lm_logits,) + outputs[1:]
|
| 1429 |
+
return ((loss,) + output) if loss is not None else output
|
| 1430 |
+
|
| 1431 |
+
return Seq2SeqLMOutput(
|
| 1432 |
+
loss=loss,
|
| 1433 |
+
logits=lm_logits,
|
| 1434 |
+
past_key_values=outputs.past_key_values,
|
| 1435 |
+
decoder_hidden_states=outputs.decoder_hidden_states,
|
| 1436 |
+
decoder_attentions=outputs.decoder_attentions,
|
| 1437 |
+
cross_attentions=outputs.cross_attentions,
|
| 1438 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
|
| 1439 |
+
encoder_hidden_states=outputs.encoder_hidden_states,
|
| 1440 |
+
encoder_attentions=outputs.encoder_attentions,
|
| 1441 |
+
)
|
| 1442 |
+
|
| 1443 |
+
def generate(
|
| 1444 |
+
self,
|
| 1445 |
+
inputs: Optional[torch.Tensor] = None,
|
| 1446 |
+
generation_config=None,
|
| 1447 |
+
logits_processor=None,
|
| 1448 |
+
stopping_criteria=None,
|
| 1449 |
+
prefix_allowed_tokens_fn=None,
|
| 1450 |
+
synced_gpus=False,
|
| 1451 |
+
return_timestamps=None,
|
| 1452 |
+
task=None,
|
| 1453 |
+
language=None,
|
| 1454 |
+
is_multilingual=None,
|
| 1455 |
+
**kwargs,
|
| 1456 |
+
):
|
| 1457 |
+
"""
|
| 1458 |
+
|
| 1459 |
+
Generates sequences of token ids for models with a language modeling head.
|
| 1460 |
+
|
| 1461 |
+
<Tip warning={true}>
|
| 1462 |
+
|
| 1463 |
+
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
|
| 1464 |
+
model's default generation configuration. You can override any `generation_config` by passing the corresponding
|
| 1465 |
+
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
|
| 1466 |
+
|
| 1467 |
+
For an overview of generation strategies and code examples, check out the [following
|
| 1468 |
+
guide](./generation_strategies).
|
| 1469 |
+
|
| 1470 |
+
</Tip>
|
| 1471 |
+
|
| 1472 |
+
Parameters:
|
| 1473 |
+
inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
|
| 1474 |
+
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
|
| 1475 |
+
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
|
| 1476 |
+
should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
|
| 1477 |
+
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
|
| 1478 |
+
generation_config (`~generation.GenerationConfig`, *optional*):
|
| 1479 |
+
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
|
| 1480 |
+
passed to generate matching the attributes of `generation_config` will override them. If
|
| 1481 |
+
`generation_config` is not provided, the default will be used, which had the following loading
|
| 1482 |
+
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
|
| 1483 |
+
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
|
| 1484 |
+
default values, whose documentation should be checked to parameterize generation.
|
| 1485 |
+
logits_processor (`LogitsProcessorList`, *optional*):
|
| 1486 |
+
Custom logits processors that complement the default logits processors built from arguments and
|
| 1487 |
+
generation config. If a logit processor is passed that is already created with the arguments or a
|
| 1488 |
+
generation config an error is thrown. This feature is intended for advanced users.
|
| 1489 |
+
stopping_criteria (`StoppingCriteriaList`, *optional*):
|
| 1490 |
+
Custom stopping criteria that complement the default stopping criteria built from arguments and a
|
| 1491 |
+
generation config. If a stopping criteria is passed that is already created with the arguments or a
|
| 1492 |
+
generation config an error is thrown. This feature is intended for advanced users.
|
| 1493 |
+
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
|
| 1494 |
+
If provided, this function constraints the beam search to allowed tokens only at each step. If not
|
| 1495 |
+
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
|
| 1496 |
+
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
|
| 1497 |
+
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
|
| 1498 |
+
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
|
| 1499 |
+
Retrieval](https://arxiv.org/abs/2010.00904).
|
| 1500 |
+
synced_gpus (`bool`, *optional*, defaults to `False`):
|
| 1501 |
+
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
|
| 1502 |
+
return_timestamps (`bool`, *optional*):
|
| 1503 |
+
Whether to return the timestamps with the text. This enables the `WhisperTimestampsLogitsProcessor`.
|
| 1504 |
+
task (`bool`, *optional*):
|
| 1505 |
+
Task to use for generation, either "translate" or "transcribe". The `model.config.forced_decoder_ids`
|
| 1506 |
+
will be updated accordingly.
|
| 1507 |
+
language (`bool`, *optional*):
|
| 1508 |
+
Language token to use for generation, can be either in the form of `<|en|>`, `en` or `english`. You can
|
| 1509 |
+
find all the possible language tokens in the `model.generation_config.lang_to_id` dictionary.
|
| 1510 |
+
is_multilingual (`bool`, *optional*):
|
| 1511 |
+
Whether or not the model is multilingual.
|
| 1512 |
+
kwargs:
|
| 1513 |
+
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
|
| 1514 |
+
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
|
| 1515 |
+
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
|
| 1516 |
+
|
| 1517 |
+
Return:
|
| 1518 |
+
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
|
| 1519 |
+
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
|
| 1520 |
+
|
| 1521 |
+
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
|
| 1522 |
+
[`~utils.ModelOutput`] types are:
|
| 1523 |
+
|
| 1524 |
+
- [`~generation.GreedySearchDecoderOnlyOutput`],
|
| 1525 |
+
- [`~generation.SampleDecoderOnlyOutput`],
|
| 1526 |
+
- [`~generation.BeamSearchDecoderOnlyOutput`],
|
| 1527 |
+
- [`~generation.BeamSampleDecoderOnlyOutput`]
|
| 1528 |
+
|
| 1529 |
+
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
|
| 1530 |
+
[`~utils.ModelOutput`] types are:
|
| 1531 |
+
|
| 1532 |
+
- [`~generation.GreedySearchEncoderDecoderOutput`],
|
| 1533 |
+
- [`~generation.SampleEncoderDecoderOutput`],
|
| 1534 |
+
- [`~generation.BeamSearchEncoderDecoderOutput`],
|
| 1535 |
+
- [`~generation.BeamSampleEncoderDecoderOutput`]
|
| 1536 |
+
"""
|
| 1537 |
+
if generation_config is None:
|
| 1538 |
+
generation_config = self.generation_config
|
| 1539 |
+
|
| 1540 |
+
if return_timestamps is not None:
|
| 1541 |
+
if not hasattr(generation_config, "no_timestamps_token_id"):
|
| 1542 |
+
raise ValueError(
|
| 1543 |
+
"You are trying to return timestamps, but the generation config is not properly set."
|
| 1544 |
+
"Make sure to initialize the generation config with the correct attributes that are needed such as `no_timestamps_token_id`."
|
| 1545 |
+
"For more details on how to generate the approtiate config, refer to https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363"
|
| 1546 |
+
)
|
| 1547 |
+
|
| 1548 |
+
generation_config.return_timestamps = return_timestamps
|
| 1549 |
+
else:
|
| 1550 |
+
generation_config.return_timestamps = False
|
| 1551 |
+
|
| 1552 |
+
if language is not None:
|
| 1553 |
+
language = language.lower()
|
| 1554 |
+
generation_config.language = language
|
| 1555 |
+
if task is not None:
|
| 1556 |
+
generation_config.task = task
|
| 1557 |
+
|
| 1558 |
+
forced_decoder_ids = []
|
| 1559 |
+
if task is not None or language is not None:
|
| 1560 |
+
if hasattr(generation_config, "language"):
|
| 1561 |
+
if generation_config.language in generation_config.lang_to_id.keys():
|
| 1562 |
+
language_token = generation_config.language
|
| 1563 |
+
elif generation_config.language in TO_LANGUAGE_CODE.keys():
|
| 1564 |
+
language_token = f"<|{TO_LANGUAGE_CODE[generation_config.language]}|>"
|
| 1565 |
+
elif generation_config.language in TO_LANGUAGE_CODE.values():
|
| 1566 |
+
language_token = f"<|{generation_config.language}|>"
|
| 1567 |
+
else:
|
| 1568 |
+
is_language_code = len(generation_config.language) == 2
|
| 1569 |
+
raise ValueError(
|
| 1570 |
+
f"Unsupported language: {generation_config.language}. Language should be one of:"
|
| 1571 |
+
f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}."
|
| 1572 |
+
)
|
| 1573 |
+
forced_decoder_ids.append((1, generation_config.lang_to_id[language_token]))
|
| 1574 |
+
else:
|
| 1575 |
+
forced_decoder_ids.append((1, None)) # automatically detect the language
|
| 1576 |
+
|
| 1577 |
+
if hasattr(generation_config, "task"):
|
| 1578 |
+
if generation_config.task in TASK_IDS:
|
| 1579 |
+
forced_decoder_ids.append((2, generation_config.task_to_id[generation_config.task]))
|
| 1580 |
+
else:
|
| 1581 |
+
raise ValueError(
|
| 1582 |
+
f"The `{generation_config.task}`task is not supported. The task should be one of `{TASK_IDS}`"
|
| 1583 |
+
)
|
| 1584 |
+
else:
|
| 1585 |
+
forced_decoder_ids.append((2, generation_config.task_to_id["transcribe"])) # defaults to transcribe
|
| 1586 |
+
if hasattr(generation_config, "no_timestamps_token_id") and not generation_config.return_timestamps:
|
| 1587 |
+
idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1
|
| 1588 |
+
forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id))
|
| 1589 |
+
|
| 1590 |
+
# Legacy code for backward compatibility
|
| 1591 |
+
elif hasattr(self.config, "forced_decoder_ids") and self.config.forced_decoder_ids is not None:
|
| 1592 |
+
forced_decoder_ids = self.config.forced_decoder_ids
|
| 1593 |
+
elif (
|
| 1594 |
+
hasattr(self.generation_config, "forced_decoder_ids")
|
| 1595 |
+
and self.generation_config.forced_decoder_ids is not None
|
| 1596 |
+
):
|
| 1597 |
+
forced_decoder_ids = self.generation_config.forced_decoder_ids
|
| 1598 |
+
|
| 1599 |
+
if generation_config.return_timestamps:
|
| 1600 |
+
logits_processor = [WhisperTimeStampLogitsProcessor(generation_config)]
|
| 1601 |
+
|
| 1602 |
+
if len(forced_decoder_ids) > 0:
|
| 1603 |
+
generation_config.forced_decoder_ids = forced_decoder_ids
|
| 1604 |
+
|
| 1605 |
+
return super().generate(
|
| 1606 |
+
inputs,
|
| 1607 |
+
generation_config,
|
| 1608 |
+
logits_processor,
|
| 1609 |
+
stopping_criteria,
|
| 1610 |
+
prefix_allowed_tokens_fn,
|
| 1611 |
+
synced_gpus,
|
| 1612 |
+
**kwargs,
|
| 1613 |
+
)
|
| 1614 |
+
|
| 1615 |
+
def prepare_inputs_for_generation(
|
| 1616 |
+
self,
|
| 1617 |
+
decoder_input_ids,
|
| 1618 |
+
past_key_values=None,
|
| 1619 |
+
use_cache=None,
|
| 1620 |
+
encoder_outputs=None,
|
| 1621 |
+
attention_mask=None,
|
| 1622 |
+
**kwargs,
|
| 1623 |
+
):
|
| 1624 |
+
# cut decoder_input_ids if past is used
|
| 1625 |
+
if past_key_values is not None:
|
| 1626 |
+
decoder_input_ids = decoder_input_ids[:, -1:]
|
| 1627 |
+
|
| 1628 |
+
return {
|
| 1629 |
+
"encoder_outputs": encoder_outputs,
|
| 1630 |
+
"past_key_values": past_key_values,
|
| 1631 |
+
"decoder_input_ids": decoder_input_ids,
|
| 1632 |
+
"use_cache": use_cache,
|
| 1633 |
+
"decoder_attention_mask": None,
|
| 1634 |
+
}
|
| 1635 |
+
|
| 1636 |
+
#
|
| 1637 |
+
@staticmethod
|
| 1638 |
+
def _reorder_cache(past_key_values, beam_idx):
|
| 1639 |
+
reordered_past = ()
|
| 1640 |
+
for layer_past in past_key_values:
|
| 1641 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
| 1642 |
+
return reordered_past
|
| 1643 |
+
|
| 1644 |
+
|
| 1645 |
+
@add_start_docstrings(
|
| 1646 |
+
"""
|
| 1647 |
+
Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
|
| 1648 |
+
like SUPERB Keyword Spotting.
|
| 1649 |
+
""",
|
| 1650 |
+
WHISPER_ENCODER_INPUTS_DOCSTRING,
|
| 1651 |
+
)
|
| 1652 |
+
class WhisperForAudioClassification(WhisperPreTrainedModel):
|
| 1653 |
+
def __init__(self, config):
|
| 1654 |
+
super().__init__(config)
|
| 1655 |
+
|
| 1656 |
+
self.encoder = WhisperEncoder(config)
|
| 1657 |
+
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
|
| 1658 |
+
if config.use_weighted_layer_sum:
|
| 1659 |
+
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
|
| 1660 |
+
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
|
| 1661 |
+
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
|
| 1662 |
+
|
| 1663 |
+
# Initialize weights and apply final processing
|
| 1664 |
+
self.post_init()
|
| 1665 |
+
|
| 1666 |
+
def freeze_encoder(self):
|
| 1667 |
+
"""
|
| 1668 |
+
Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
|
| 1669 |
+
not be updated during training. Only the projection layers and classification head will be updated.
|
| 1670 |
+
"""
|
| 1671 |
+
self.encoder._freeze_parameters()
|
| 1672 |
+
|
| 1673 |
+
def get_input_embeddings(self) -> nn.Module:
|
| 1674 |
+
return self.encoder.get_input_embeddings()
|
| 1675 |
+
|
| 1676 |
+
def set_input_embeddings(self, value: nn.Module):
|
| 1677 |
+
self.encoder.set_input_embeddings(value)
|
| 1678 |
+
|
| 1679 |
+
@add_start_docstrings_to_model_forward(WHISPER_ENCODER_INPUTS_DOCSTRING)
|
| 1680 |
+
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
| 1681 |
+
def forward(
|
| 1682 |
+
self,
|
| 1683 |
+
input_features: Optional[torch.LongTensor] = None,
|
| 1684 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1685 |
+
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 1686 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1687 |
+
output_attentions: Optional[bool] = None,
|
| 1688 |
+
output_hidden_states: Optional[bool] = None,
|
| 1689 |
+
return_dict: Optional[bool] = None,
|
| 1690 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
| 1691 |
+
r"""
|
| 1692 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1693 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1694 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1695 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1696 |
+
|
| 1697 |
+
Returns:
|
| 1698 |
+
|
| 1699 |
+
Example:
|
| 1700 |
+
|
| 1701 |
+
```python
|
| 1702 |
+
>>> import torch
|
| 1703 |
+
>>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification
|
| 1704 |
+
>>> from datasets import load_dataset
|
| 1705 |
+
|
| 1706 |
+
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
|
| 1707 |
+
>>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
|
| 1708 |
+
|
| 1709 |
+
>>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True)
|
| 1710 |
+
>>> sample = next(iter(ds))
|
| 1711 |
+
|
| 1712 |
+
>>> inputs = feature_extractor(
|
| 1713 |
+
... sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt"
|
| 1714 |
+
... )
|
| 1715 |
+
>>> input_features = inputs.input_features
|
| 1716 |
+
|
| 1717 |
+
>>> with torch.no_grad():
|
| 1718 |
+
... logits = model(input_features).logits
|
| 1719 |
+
|
| 1720 |
+
>>> predicted_class_ids = torch.argmax(logits).item()
|
| 1721 |
+
>>> predicted_label = model.config.id2label[predicted_class_ids]
|
| 1722 |
+
>>> predicted_label
|
| 1723 |
+
'af_za'
|
| 1724 |
+
```"""
|
| 1725 |
+
|
| 1726 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1727 |
+
output_hidden_states = (
|
| 1728 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1729 |
+
)
|
| 1730 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1731 |
+
|
| 1732 |
+
if encoder_outputs is None:
|
| 1733 |
+
encoder_outputs = self.encoder(
|
| 1734 |
+
input_features,
|
| 1735 |
+
head_mask=head_mask,
|
| 1736 |
+
output_attentions=output_attentions,
|
| 1737 |
+
output_hidden_states=output_hidden_states,
|
| 1738 |
+
return_dict=return_dict,
|
| 1739 |
+
)
|
| 1740 |
+
|
| 1741 |
+
if self.config.use_weighted_layer_sum:
|
| 1742 |
+
hidden_states = torch.stack(encoder_outputs, dim=1)
|
| 1743 |
+
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
|
| 1744 |
+
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
|
| 1745 |
+
else:
|
| 1746 |
+
hidden_states = encoder_outputs[0]
|
| 1747 |
+
|
| 1748 |
+
hidden_states = self.projector(hidden_states)
|
| 1749 |
+
pooled_output = hidden_states.mean(dim=1)
|
| 1750 |
+
|
| 1751 |
+
logits = self.classifier(pooled_output)
|
| 1752 |
+
|
| 1753 |
+
loss = None
|
| 1754 |
+
|
| 1755 |
+
if labels is not None:
|
| 1756 |
+
loss_fct = CrossEntropyLoss()
|
| 1757 |
+
# move labels to correct device to enable PP
|
| 1758 |
+
labels = labels.to(logits.device)
|
| 1759 |
+
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
|
| 1760 |
+
|
| 1761 |
+
if not return_dict:
|
| 1762 |
+
output = (logits,) + encoder_outputs[1:]
|
| 1763 |
+
return ((loss,) + output) if loss is not None else output
|
| 1764 |
+
|
| 1765 |
+
return SequenceClassifierOutput(
|
| 1766 |
+
loss=loss,
|
| 1767 |
+
logits=logits,
|
| 1768 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 1769 |
+
attentions=encoder_outputs.attentions,
|
| 1770 |
+
)
|
models/salmonn.py
ADDED
|
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Tsinghua University, Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
import json
|
| 17 |
+
import contextlib
|
| 18 |
+
import random
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from transformers import LlamaTokenizer, StoppingCriteriaList
|
| 24 |
+
from peft import LoraConfig, TaskType, get_peft_model
|
| 25 |
+
|
| 26 |
+
from .Qformer import BertConfig, BertLMHeadModel
|
| 27 |
+
from .modeling_llama import LlamaForCausalLM
|
| 28 |
+
from .modeling_whisper import WhisperModel
|
| 29 |
+
from .beats.BEATs import BEATsConfig, BEATs
|
| 30 |
+
from .utils import StoppingCriteriaSub
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class SALMONN(nn.Module):
|
| 34 |
+
@classmethod
|
| 35 |
+
def init_speech_Qformer(cls, num_query_token, speech_width, num_hidden_layers=2):
|
| 36 |
+
encoder_config = BertConfig.from_pretrained("bert-base-uncased")
|
| 37 |
+
encoder_config.num_hidden_layers = num_hidden_layers
|
| 38 |
+
encoder_config.encoder_width = speech_width
|
| 39 |
+
# insert cross-attention layer every other block
|
| 40 |
+
encoder_config.add_cross_attention = True
|
| 41 |
+
encoder_config.cross_attention_freq = 1
|
| 42 |
+
encoder_config.query_length = num_query_token
|
| 43 |
+
Qformer = BertLMHeadModel(config=encoder_config)
|
| 44 |
+
query_tokens = nn.Parameter(
|
| 45 |
+
torch.zeros(1, num_query_token, encoder_config.hidden_size)
|
| 46 |
+
)
|
| 47 |
+
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
|
| 48 |
+
return Qformer, query_tokens
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
def device(self):
|
| 52 |
+
return list(self.parameters())[0].device
|
| 53 |
+
|
| 54 |
+
def maybe_autocast(self, dtype=torch.float16):
|
| 55 |
+
# if on cpu, don't use autocast
|
| 56 |
+
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
|
| 57 |
+
enable_autocast = self.device != torch.device("cpu")
|
| 58 |
+
|
| 59 |
+
if enable_autocast:
|
| 60 |
+
return torch.cuda.amp.autocast(dtype=dtype)
|
| 61 |
+
else:
|
| 62 |
+
return contextlib.nullcontext()
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
llama_path="",
|
| 67 |
+
whisper_path="",
|
| 68 |
+
freeze_whisper=True,
|
| 69 |
+
beats_path="",
|
| 70 |
+
freeze_beats=True,
|
| 71 |
+
|
| 72 |
+
use_speech_Qformer=True,
|
| 73 |
+
num_speech_query_token=1,
|
| 74 |
+
freeze_speech_QFormer=False,
|
| 75 |
+
window_level_Qformer=True,
|
| 76 |
+
second_per_window=0.333333,
|
| 77 |
+
second_stride=0.333333,
|
| 78 |
+
|
| 79 |
+
speech_llama_proj_model="",
|
| 80 |
+
freeze_speech_llama_proj=False,
|
| 81 |
+
|
| 82 |
+
lora=True,
|
| 83 |
+
lora_rank=8,
|
| 84 |
+
lora_alpha=32,
|
| 85 |
+
lora_dropout=0.1,
|
| 86 |
+
|
| 87 |
+
multi_prompt=False,
|
| 88 |
+
prompt_path="",
|
| 89 |
+
prompt_template="",
|
| 90 |
+
max_txt_len=128,
|
| 91 |
+
end_sym="</s>",
|
| 92 |
+
low_resource=False, # use 8 bit
|
| 93 |
+
device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore.
|
| 94 |
+
):
|
| 95 |
+
super().__init__()
|
| 96 |
+
|
| 97 |
+
self.beats_path = beats_path
|
| 98 |
+
self.use_speech_Qformer = use_speech_Qformer
|
| 99 |
+
self.window_level_Qformer = window_level_Qformer
|
| 100 |
+
self.second_per_window = second_per_window
|
| 101 |
+
self.second_stride = second_stride
|
| 102 |
+
self.lora = lora
|
| 103 |
+
self.multi_prompt = multi_prompt
|
| 104 |
+
self.max_txt_len = max_txt_len
|
| 105 |
+
self.end_sym = end_sym
|
| 106 |
+
self.low_resource = low_resource
|
| 107 |
+
|
| 108 |
+
logging.info('Loading LLaMA Tokenizer')
|
| 109 |
+
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_path, use_fast=False)
|
| 110 |
+
self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
| 111 |
+
self.llama_tokenizer.padding_side = "right"
|
| 112 |
+
|
| 113 |
+
logging.info('Loading LLaMA Model')
|
| 114 |
+
if self.low_resource:
|
| 115 |
+
self.llama_model = LlamaForCausalLM.from_pretrained(
|
| 116 |
+
llama_path,
|
| 117 |
+
torch_dtype=torch.float16,
|
| 118 |
+
load_in_8bit=True,
|
| 119 |
+
device_map={"": device_8bit},
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
self.llama_model = LlamaForCausalLM.from_pretrained(
|
| 123 |
+
llama_path,
|
| 124 |
+
torch_dtype=torch.float16,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
self.llama_model.resize_token_embeddings(len(self.llama_tokenizer))
|
| 128 |
+
for name, param in self.llama_model.named_parameters():
|
| 129 |
+
param.requires_grad = False
|
| 130 |
+
logging.info('Loading LLaMA Done')
|
| 131 |
+
|
| 132 |
+
if self.lora:
|
| 133 |
+
self.peft_config = LoraConfig(
|
| 134 |
+
task_type=TaskType.CAUSAL_LM,
|
| 135 |
+
inference_mode=False,
|
| 136 |
+
r=lora_rank,
|
| 137 |
+
lora_alpha=lora_alpha,
|
| 138 |
+
lora_dropout=lora_dropout,
|
| 139 |
+
)
|
| 140 |
+
self.llama_model = get_peft_model(self.llama_model, self.peft_config)
|
| 141 |
+
self.llama_model.print_trainable_parameters()
|
| 142 |
+
logging.info('LoRA Training')
|
| 143 |
+
|
| 144 |
+
assert whisper_path
|
| 145 |
+
logging.info('Loading Whisper Model')
|
| 146 |
+
self.speech_encoder = WhisperModel.from_pretrained(whisper_path).encoder
|
| 147 |
+
self.ln_speech = nn.LayerNorm(self.speech_encoder.config.d_model)
|
| 148 |
+
if freeze_whisper:
|
| 149 |
+
for name, param in self.speech_encoder.named_parameters():
|
| 150 |
+
param.requires_grad = False
|
| 151 |
+
self.speech_encoder.eval()
|
| 152 |
+
logging.info("freeze Whisper")
|
| 153 |
+
|
| 154 |
+
if self.beats_path:
|
| 155 |
+
logging.info("Loading BEATs Model")
|
| 156 |
+
beats_ckpt = torch.load(self.beats_path, map_location='cpu')
|
| 157 |
+
beats_cfg = BEATsConfig(beats_ckpt['cfg'])
|
| 158 |
+
self.beats = BEATs(beats_cfg)
|
| 159 |
+
self.beats.load_state_dict(beats_ckpt['model'])
|
| 160 |
+
self.ln_audio = nn.LayerNorm(self.beats.cfg.encoder_embed_dim)
|
| 161 |
+
if freeze_beats:
|
| 162 |
+
for name, param in self.beats.named_parameters():
|
| 163 |
+
param.requires_grad = False
|
| 164 |
+
self.beats.eval()
|
| 165 |
+
logging.info("freeze BEATs")
|
| 166 |
+
|
| 167 |
+
if self.use_speech_Qformer:
|
| 168 |
+
if self.beats_path:
|
| 169 |
+
self.speech_Qformer, self.speech_query_tokens = self.init_speech_Qformer(
|
| 170 |
+
num_query_token=num_speech_query_token, speech_width=self.speech_encoder.config.d_model + self.beats.cfg.encoder_embed_dim
|
| 171 |
+
)
|
| 172 |
+
else:
|
| 173 |
+
self.speech_Qformer, self.speech_query_tokens = self.init_speech_Qformer(
|
| 174 |
+
num_query_token=num_speech_query_token, speech_width=self.speech_encoder.config.d_model
|
| 175 |
+
)
|
| 176 |
+
self.speech_Qformer.bert.embeddings.word_embeddings = None
|
| 177 |
+
self.speech_Qformer.bert.embeddings.position_embeddings = None
|
| 178 |
+
for layer in self.speech_Qformer.bert.encoder.layer:
|
| 179 |
+
layer.output = None
|
| 180 |
+
layer.intermediate = None
|
| 181 |
+
self.speech_Qformer.cls = None
|
| 182 |
+
if freeze_speech_QFormer:
|
| 183 |
+
for name, param in self.speech_Qformer.named_parameters():
|
| 184 |
+
param.requires_grad = False
|
| 185 |
+
self.speech_Qformer.eval()
|
| 186 |
+
self.speech_query_tokens.requires_grad = False
|
| 187 |
+
logging.info("freeze Speech QFormer")
|
| 188 |
+
|
| 189 |
+
logging.info('Loading speech LLAMA proj')
|
| 190 |
+
self.speech_llama_proj = nn.Linear(
|
| 191 |
+
self.speech_Qformer.config.hidden_size, self.llama_model.config.hidden_size
|
| 192 |
+
)
|
| 193 |
+
if speech_llama_proj_model:
|
| 194 |
+
logging.info("Loading speech LLAMA proj from {}".format(speech_llama_proj_model))
|
| 195 |
+
speech_llama_proj_weight = torch.load(speech_llama_proj_model, map_location="cpu")
|
| 196 |
+
self.load_state_dict(speech_llama_proj_weight['model'], strict=False)
|
| 197 |
+
if freeze_speech_llama_proj:
|
| 198 |
+
for name, param in self.speech_llama_proj.named_parameters():
|
| 199 |
+
param.requires_grad = False
|
| 200 |
+
self.speech_llama_proj.eval()
|
| 201 |
+
logging.info("freeze speech LLAMA proj")
|
| 202 |
+
else:
|
| 203 |
+
# feel free to add other aligners here
|
| 204 |
+
raise NotImplementedError
|
| 205 |
+
|
| 206 |
+
# prepare prompts
|
| 207 |
+
self.prompt_dict = {}
|
| 208 |
+
if prompt_path:
|
| 209 |
+
try:
|
| 210 |
+
raw_prompts = json.load(open(prompt_path, "r"))
|
| 211 |
+
except:
|
| 212 |
+
print("Failed to load prompt! Try to use utf-8 encoding.")
|
| 213 |
+
raw_prompts = json.load(open(prompt_path, "r", encoding='utf-8'))
|
| 214 |
+
for task in raw_prompts.keys():
|
| 215 |
+
filted_prompts = [raw_prompt for raw_prompt in raw_prompts[task] if "<SpeechHere>" in raw_prompt]
|
| 216 |
+
self.prompt_dict[task] = [prompt_template.format(p) for p in filted_prompts]
|
| 217 |
+
print("Loading training prompts done!")
|
| 218 |
+
|
| 219 |
+
def _encode_auditory_feature(self, speech_embeds, audio_embeds=None):
|
| 220 |
+
with self.maybe_autocast():
|
| 221 |
+
if self.use_speech_Qformer:
|
| 222 |
+
speech_embeds = self.ln_speech(speech_embeds)
|
| 223 |
+
if audio_embeds is not None:
|
| 224 |
+
audio_embeds = self.ln_audio(audio_embeds)
|
| 225 |
+
if audio_embeds.size(1) < speech_embeds.size(1):
|
| 226 |
+
audio_embeds = F.pad(audio_embeds, (0, 0, 0, speech_embeds.size(1) - audio_embeds.size(1)))
|
| 227 |
+
elif audio_embeds.size(1) > speech_embeds.size(1):
|
| 228 |
+
speech_embeds = F.pad(speech_embeds, (0, 0, 0, audio_embeds.size(1) - speech_embeds.size(1)))
|
| 229 |
+
speech_embeds = torch.cat((speech_embeds, audio_embeds), dim=-1)
|
| 230 |
+
speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long).to(speech_embeds.device)
|
| 231 |
+
|
| 232 |
+
if self.window_level_Qformer:
|
| 233 |
+
B, T, C = speech_embeds.shape
|
| 234 |
+
kernel = round(1500 * self.second_per_window / 30.0)
|
| 235 |
+
stride = round(1500 * self.second_stride / 30.0)
|
| 236 |
+
kernel = (1, kernel)
|
| 237 |
+
stride = (1, stride)
|
| 238 |
+
speech_embeds_tr = speech_embeds.transpose(1, 2).unsqueeze(2)
|
| 239 |
+
speech_embeds_overlap = F.unfold(speech_embeds_tr, kernel_size=kernel, dilation=1, padding=0, stride=stride)
|
| 240 |
+
_, _, L = speech_embeds_overlap.shape
|
| 241 |
+
speech_embeds_overlap = speech_embeds_overlap.view(B, -1, kernel[1], L)
|
| 242 |
+
speech_embeds_overlap = torch.permute(speech_embeds_overlap, [0, 3, 2, 1])
|
| 243 |
+
speech_embeds = speech_embeds_overlap.reshape(-1, kernel[1], C)
|
| 244 |
+
speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long, device=speech_embeds.device)
|
| 245 |
+
|
| 246 |
+
query_tokens = self.speech_query_tokens.expand(speech_embeds.shape[0], -1, -1)
|
| 247 |
+
query_output = self.speech_Qformer.bert(
|
| 248 |
+
query_embeds=query_tokens,
|
| 249 |
+
encoder_hidden_states=speech_embeds,
|
| 250 |
+
encoder_attention_mask=speech_atts,
|
| 251 |
+
return_dict=True,
|
| 252 |
+
)
|
| 253 |
+
speech_embeds = self.speech_llama_proj(query_output.last_hidden_state)
|
| 254 |
+
|
| 255 |
+
if self.window_level_Qformer:
|
| 256 |
+
speech_embeds = speech_embeds.view(B, -1, speech_embeds.size(2)).contiguous()
|
| 257 |
+
|
| 258 |
+
speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long).to(speech_embeds.device)
|
| 259 |
+
else:
|
| 260 |
+
raise NotImplementedError
|
| 261 |
+
|
| 262 |
+
return speech_embeds, speech_atts
|
| 263 |
+
|
| 264 |
+
def encode_speech(self, spectrogram, raw_wav=None, audio_padding_mask=None):
|
| 265 |
+
with self.maybe_autocast():
|
| 266 |
+
speech_embeds = self.speech_encoder(spectrogram, return_dict=True).last_hidden_state
|
| 267 |
+
|
| 268 |
+
if self.beats_path and raw_wav is not None:
|
| 269 |
+
audio_embeds, _ = self.beats.extract_features(raw_wav, padding_mask=audio_padding_mask, feature_only=True)
|
| 270 |
+
else:
|
| 271 |
+
audio_embeds = None
|
| 272 |
+
|
| 273 |
+
return self._encode_auditory_feature(speech_embeds, audio_embeds=audio_embeds)
|
| 274 |
+
|
| 275 |
+
def prompt_wrap(self, embeds, atts, prompt, multi_prompt=False):
|
| 276 |
+
if prompt:
|
| 277 |
+
if multi_prompt:
|
| 278 |
+
p_before = []
|
| 279 |
+
p_after = []
|
| 280 |
+
for i, p in enumerate(prompt):
|
| 281 |
+
b, a = p.split("<SpeechHere>")
|
| 282 |
+
p_before.append(b)
|
| 283 |
+
p_after.append(a)
|
| 284 |
+
|
| 285 |
+
p_before_tokens = self.llama_tokenizer(
|
| 286 |
+
p_before, return_tensors="pt", add_special_tokens=False
|
| 287 |
+
).to(embeds.device)
|
| 288 |
+
p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids) if not self.lora else self.llama_model.model.model.embed_tokens(p_before_tokens.input_ids)
|
| 289 |
+
|
| 290 |
+
# speech_embeds wrapped with prompts_embeds are padded to the same length here
|
| 291 |
+
p_after_tokens = self.llama_tokenizer(
|
| 292 |
+
p_after, return_tensors="pt", padding="longest", add_special_tokens=False
|
| 293 |
+
).to(embeds.device)
|
| 294 |
+
p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids) if not self.lora else self.llama_model.model.model.embed_tokens(p_after_tokens.input_ids)
|
| 295 |
+
|
| 296 |
+
wrapped_embeds = torch.cat([p_before_embeds, embeds, p_after_embeds], dim=1)
|
| 297 |
+
wrapped_atts = torch.cat([p_before_tokens.attention_mask, atts, p_after_tokens.attention_mask], dim=1)
|
| 298 |
+
else:
|
| 299 |
+
batch_size = embeds.shape[0]
|
| 300 |
+
p_before, p_after = prompt.split("<SpeechHere>")
|
| 301 |
+
|
| 302 |
+
p_before_tokens = self.llama_tokenizer(
|
| 303 |
+
p_before, return_tensors="pt", add_special_tokens=False
|
| 304 |
+
).to(embeds.device)
|
| 305 |
+
p_after_tokens = self.llama_tokenizer(
|
| 306 |
+
p_after, return_tensors="pt", add_special_tokens=False
|
| 307 |
+
).to(embeds.device)
|
| 308 |
+
p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1) if not self.lora else self.llama_model.model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1)
|
| 309 |
+
p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1) if not self.lora else self.llama_model.model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1)
|
| 310 |
+
|
| 311 |
+
wrapped_embeds = torch.cat([p_before_embeds, embeds, p_after_embeds], dim=1)
|
| 312 |
+
wrapped_atts = torch.cat([p_before_tokens.attention_mask, atts, p_after_tokens.attention_mask], dim=1)
|
| 313 |
+
return wrapped_embeds, wrapped_atts
|
| 314 |
+
else:
|
| 315 |
+
return embeds, atts
|
| 316 |
+
|
| 317 |
+
def forward(self, samples, verbose=False):
|
| 318 |
+
# detect whether there are multi tasks in this batch
|
| 319 |
+
task = list(set(samples["task"]))
|
| 320 |
+
if len(task) > 1 or "QA" in task:
|
| 321 |
+
self.multi_prompt = True
|
| 322 |
+
|
| 323 |
+
# prepare prompts
|
| 324 |
+
if self.prompt_dict:
|
| 325 |
+
if self.multi_prompt:
|
| 326 |
+
prompt = [random.choice(self.prompt_dict[task]) for task in samples["task"]]
|
| 327 |
+
if "Q" in samples:
|
| 328 |
+
prompt = [p.format(q) if '{}' in p else p for p, q in zip(prompt, samples["Q"]) ]
|
| 329 |
+
else:
|
| 330 |
+
prompt = random.choice(self.prompt_dict[samples["task"][0]])
|
| 331 |
+
|
| 332 |
+
# use speech/audio encoder to encode speech/audio
|
| 333 |
+
spectrogram = samples["spectrogram"]
|
| 334 |
+
raw_wav = samples.get("raw_wav", None)
|
| 335 |
+
audio_padding_mask = samples.get("padding_mask", None)
|
| 336 |
+
|
| 337 |
+
speech_embeds, speech_atts = self.encode_speech(spectrogram, raw_wav=raw_wav, audio_padding_mask=audio_padding_mask)
|
| 338 |
+
|
| 339 |
+
# wrap speech_embeds with prompts
|
| 340 |
+
if self.prompt_dict:
|
| 341 |
+
speech_embeds, speech_atts = self.prompt_wrap(speech_embeds, speech_atts, prompt, multi_prompt=self.multi_prompt)
|
| 342 |
+
|
| 343 |
+
# prepare inputs for LLM
|
| 344 |
+
text = [t + self.end_sym for t in samples["text"]]
|
| 345 |
+
to_regress_tokens = self.llama_tokenizer(
|
| 346 |
+
text,
|
| 347 |
+
return_tensors="pt",
|
| 348 |
+
padding="longest",
|
| 349 |
+
truncation=True,
|
| 350 |
+
max_length=self.max_txt_len,
|
| 351 |
+
add_special_tokens=False
|
| 352 |
+
).to(spectrogram.device)
|
| 353 |
+
to_regress_embeds = self.llama_model.model.embed_tokens(to_regress_tokens.input_ids) if not self.lora else self.llama_model.model.model.embed_tokens(to_regress_tokens.input_ids)
|
| 354 |
+
targets = to_regress_tokens.input_ids.masked_fill(
|
| 355 |
+
to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, -100
|
| 356 |
+
)
|
| 357 |
+
empty_targets = (
|
| 358 |
+
torch.ones(
|
| 359 |
+
[speech_atts.shape[0], speech_atts.shape[1] + 1],
|
| 360 |
+
dtype=torch.long
|
| 361 |
+
).to(spectrogram.device).fill_(-100)
|
| 362 |
+
)
|
| 363 |
+
targets = torch.cat([empty_targets, targets], dim=1)
|
| 364 |
+
|
| 365 |
+
batch_size = speech_embeds.shape[0]
|
| 366 |
+
bos = torch.ones(
|
| 367 |
+
[batch_size, 1],
|
| 368 |
+
dtype=to_regress_tokens.input_ids.dtype,
|
| 369 |
+
device=to_regress_tokens.input_ids.device,
|
| 370 |
+
) * self.llama_tokenizer.bos_token_id
|
| 371 |
+
bos_embeds = self.llama_model.model.embed_tokens(bos) if not self.lora else self.llama_model.model.model.embed_tokens(bos)
|
| 372 |
+
atts_bos = speech_atts[:, :1]
|
| 373 |
+
|
| 374 |
+
inputs_embeds = torch.cat([bos_embeds, speech_embeds, to_regress_embeds], dim=1)
|
| 375 |
+
attention_mask = torch.cat([atts_bos, speech_atts, to_regress_tokens.attention_mask], dim=1)
|
| 376 |
+
|
| 377 |
+
# calulate loss
|
| 378 |
+
with self.maybe_autocast():
|
| 379 |
+
outputs = self.llama_model(
|
| 380 |
+
inputs_embeds=inputs_embeds,
|
| 381 |
+
attention_mask=attention_mask,
|
| 382 |
+
return_dict=True,
|
| 383 |
+
labels=targets,
|
| 384 |
+
)
|
| 385 |
+
loss = outputs.loss
|
| 386 |
+
|
| 387 |
+
if verbose:
|
| 388 |
+
nvocab = self.llama_model.config.vocab_size
|
| 389 |
+
results = outputs.logits[:, empty_targets.size(1) - 1: -1, :].contiguous().view(-1, nvocab).argmax(dim=-1)
|
| 390 |
+
labels = targets[:, empty_targets.size(1):].contiguous().view(-1)
|
| 391 |
+
mask = (labels != -100)
|
| 392 |
+
correct = (results[mask] == labels[mask]).float().sum()
|
| 393 |
+
total = len(labels[mask])
|
| 394 |
+
|
| 395 |
+
if verbose:
|
| 396 |
+
return {"loss": loss, "correct": correct, "total": total}
|
| 397 |
+
|
| 398 |
+
return {"loss": loss}
|
| 399 |
+
|
| 400 |
+
def generate(self, samples, generate_cfg, prompts=None):
|
| 401 |
+
batch_size = samples["spectrogram"].shape[0]
|
| 402 |
+
|
| 403 |
+
spectrogram = samples["spectrogram"]
|
| 404 |
+
raw_wav = samples.get("raw_wav", None)
|
| 405 |
+
audio_padding_mask = samples.get("padding_mask", None)
|
| 406 |
+
|
| 407 |
+
speech_embeds, speech_atts = self.encode_speech(spectrogram, raw_wav=raw_wav, audio_padding_mask=audio_padding_mask)
|
| 408 |
+
|
| 409 |
+
if prompts is not None:
|
| 410 |
+
speech_embeds, speech_atts = self.prompt_wrap(speech_embeds, speech_atts, prompts, multi_prompt=True)
|
| 411 |
+
|
| 412 |
+
bos = torch.ones(
|
| 413 |
+
[batch_size, 1],
|
| 414 |
+
dtype=torch.int32,
|
| 415 |
+
device=speech_embeds.device,
|
| 416 |
+
) * self.llama_tokenizer.bos_token_id
|
| 417 |
+
bos_embeds = self.llama_model.model.embed_tokens(bos) if not self.lora else self.llama_model.model.model.embed_tokens(bos)
|
| 418 |
+
atts_bos = speech_atts[:, :1]
|
| 419 |
+
|
| 420 |
+
embeds = torch.cat([bos_embeds, speech_embeds], dim=1)
|
| 421 |
+
attns = torch.cat([atts_bos, speech_atts], dim=1)
|
| 422 |
+
|
| 423 |
+
stop_words_ids = [torch.tensor([2]).cuda()]
|
| 424 |
+
stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
|
| 425 |
+
outputs = self.llama_model.generate(
|
| 426 |
+
inputs_embeds=embeds,
|
| 427 |
+
max_new_tokens=generate_cfg.get("max_new_tokens", 200),
|
| 428 |
+
stopping_criteria=stopping_criteria,
|
| 429 |
+
num_beams=generate_cfg.get("num_beams", 4),
|
| 430 |
+
do_sample=generate_cfg.get("do_sample", False),
|
| 431 |
+
min_length=generate_cfg.get("min_length", 1),
|
| 432 |
+
temperature=generate_cfg.get("temperature", 1.0),
|
| 433 |
+
top_p=generate_cfg.get("top_p", 0.9),
|
| 434 |
+
repetition_penalty=generate_cfg.get("repetition_penalty", 1.0),
|
| 435 |
+
length_penalty=generate_cfg.get("length_penalty", 1.0),
|
| 436 |
+
attention_mask=attns,
|
| 437 |
+
)
|
| 438 |
+
text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False)
|
| 439 |
+
|
| 440 |
+
return text
|
| 441 |
+
|
| 442 |
+
@classmethod
|
| 443 |
+
def from_config(cls, config):
|
| 444 |
+
llama_path = config.get("llama_path")
|
| 445 |
+
whisper_path = config.get("whisper_path")
|
| 446 |
+
freeze_whisper = config.get("freeze_whisper", True)
|
| 447 |
+
beats_path = config.get("beats_path", "")
|
| 448 |
+
freeze_beats = config.get("freeze_beats", True)
|
| 449 |
+
|
| 450 |
+
use_speech_Qformer = config.get("use_speech_Qformer", True)
|
| 451 |
+
num_speech_query_token = config.get("num_speech_query_token", 1)
|
| 452 |
+
freeze_speech_QFormer = config.get("freeze_speech_QFormer", False)
|
| 453 |
+
window_level_Qformer = config.get("window_level_Qformer", True)
|
| 454 |
+
second_per_window = config.get("second_per_window", 0.333333)
|
| 455 |
+
second_stride = config.get("second_stride", 0.333333)
|
| 456 |
+
|
| 457 |
+
speech_llama_proj_model = config.get("speech_llama_proj_model", "")
|
| 458 |
+
freeze_speech_llama_proj = config.get("freeze_speech_llama_proj", False)
|
| 459 |
+
|
| 460 |
+
lora = config.get("lora", True)
|
| 461 |
+
lora_rank = config.get("lora_rank", 8)
|
| 462 |
+
lora_alpha = config.get("lora_alpha", 32)
|
| 463 |
+
lora_dropout = config.get("lora_dropout", 0.1)
|
| 464 |
+
|
| 465 |
+
multi_prompt = config.get("multi_prompt", False)
|
| 466 |
+
prompt_path = config.get("prompt_path", "")
|
| 467 |
+
prompt_template = config.get("prompt_template", "")
|
| 468 |
+
max_txt_len = config.get("max_txt_len", 128)
|
| 469 |
+
end_sym = config.get("end_sym", "</s>")
|
| 470 |
+
low_resource = config.get("low_resource", False)
|
| 471 |
+
device_8bit = config.get("device_8bit", 0)
|
| 472 |
+
|
| 473 |
+
model = cls(
|
| 474 |
+
llama_path=llama_path,
|
| 475 |
+
whisper_path=whisper_path,
|
| 476 |
+
freeze_whisper=freeze_whisper,
|
| 477 |
+
beats_path=beats_path,
|
| 478 |
+
freeze_beats=freeze_beats,
|
| 479 |
+
use_speech_Qformer=use_speech_Qformer,
|
| 480 |
+
num_speech_query_token=num_speech_query_token,
|
| 481 |
+
freeze_speech_QFormer=freeze_speech_QFormer,
|
| 482 |
+
window_level_Qformer=window_level_Qformer,
|
| 483 |
+
second_per_window=second_per_window,
|
| 484 |
+
second_stride=second_stride,
|
| 485 |
+
speech_llama_proj_model=speech_llama_proj_model,
|
| 486 |
+
freeze_speech_llama_proj=freeze_speech_llama_proj,
|
| 487 |
+
lora=lora,
|
| 488 |
+
lora_rank=lora_rank,
|
| 489 |
+
lora_alpha=lora_alpha,
|
| 490 |
+
lora_dropout=lora_dropout,
|
| 491 |
+
multi_prompt=multi_prompt,
|
| 492 |
+
prompt_path=prompt_path,
|
| 493 |
+
prompt_template=prompt_template,
|
| 494 |
+
max_txt_len=max_txt_len,
|
| 495 |
+
end_sym=end_sym,
|
| 496 |
+
low_resource=low_resource,
|
| 497 |
+
device_8bit=device_8bit,
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
ckpt_path = config.get("ckpt", "")
|
| 501 |
+
if ckpt_path:
|
| 502 |
+
logging.info("Load SALMONN ckpt from: {}".format(ckpt_path))
|
| 503 |
+
ckpt = torch.load(ckpt_path, map_location="cpu")
|
| 504 |
+
model.load_state_dict(ckpt['model'], strict=False)
|
| 505 |
+
|
| 506 |
+
return model
|
models/utils.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Tsinghua University, Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from transformers import StoppingCriteria
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class StoppingCriteriaSub(StoppingCriteria):
|
| 20 |
+
|
| 21 |
+
def __init__(self, stops=[], encounters=1):
|
| 22 |
+
super().__init__()
|
| 23 |
+
self.stops = stops
|
| 24 |
+
|
| 25 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
|
| 26 |
+
for stop in self.stops:
|
| 27 |
+
if torch.all((stop == input_ids[0][-len(stop):])).item():
|
| 28 |
+
return True
|
| 29 |
+
|
| 30 |
+
return False
|
prompts/test_prompt.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"asr": "<Speech><SpeechHere></Speech> Recognize the speech and give me the transcription.",
|
| 3 |
+
"asr_zh": "<Speech><SpeechHere></Speech> 请将语音中的内容写下来。",
|
| 4 |
+
"asr_de": "<Speech><SpeechHere></Speech> Hören Sie sich die Rede an und schreiben Sie ihren Inhalt auf.",
|
| 5 |
+
"translation_ec": "<Speech><SpeechHere></Speech> Listen to the speech and translate it into Chinese.",
|
| 6 |
+
"audiocaption": "<Speech><SpeechHere></Speech> Please describe the audio.",
|
| 7 |
+
"audiocaption_v2": "<Speech><SpeechHere></Speech> Please write down what your hear in the audio.",
|
| 8 |
+
"QA": "<Speech><SpeechHere></Speech> {}",
|
| 9 |
+
"gender_QA": "<Speech><SpeechHere></Speech> {}",
|
| 10 |
+
"phone_recognition": "<Speech><SpeechHere></Speech> Provide the phonetic transcription for the speech.",
|
| 11 |
+
"speech_query": "<Speech><SpeechHere></Speech> Please answer the question in detail.",
|
| 12 |
+
"emotion_recognition": "<Speech><SpeechHere></Speech> Describe the emotion of the speaker in one word.",
|
| 13 |
+
"lyrics_recognition": "<Speech><SpeechHere></Speech> Listen to the song and write down its content.",
|
| 14 |
+
"audio_speech_description": "<Speech><SpeechHere></Speech> Describe the speech and the background audio",
|
| 15 |
+
"speaker_verification": "<Speech><SpeechHere></Speech> Do you only hear the same person talking? Answer yes or no.",
|
| 16 |
+
"fluent_speech_audio": "<Speech><SpeechHere></Speech> Describe the background audio and the speech in a fluent sentence.",
|
| 17 |
+
"speech_separation": "<Speech><SpeechHere></Speech> Please write down what you hear each person says.",
|
| 18 |
+
"audio_story_telling": "<Speech><SpeechHere></Speech> Based on the audio, write a story in detail. Your story should be highly related to the audio.",
|
| 19 |
+
"speech_audio_query": "<Speech><SpeechHere></Speech> Please answer the speaker's question in detail based on the background sound.",
|
| 20 |
+
"slot_filling": "<Speech><SpeechHere></Speech> According to the speech, what is the {}?",
|
| 21 |
+
"music_description": "<Speech><SpeechHere></Speech> Listen to this music clip and describe the music.",
|
| 22 |
+
"translation_en2ja": "<Speech><SpeechHere></Speech> Listen to the speech and translate it into Japanese.",
|
| 23 |
+
"translation_en2de": "<Speech><SpeechHere></Speech> Listen to the speech and translate it into German.",
|
| 24 |
+
"speech_audio_coreasoning": "<Speech><SpeechHere></Speech> Use your strong reasoning skills to answer the speaker's question in detail based on the background sound.",
|
| 25 |
+
"keywords": "<Speech><SpeechHere></Speech> Give me only three keywords of the text.",
|
| 26 |
+
"speaker_diarization_asr": "<Speech><SpeechHere></Speech> Please recognize each speaker and transcribe their speech content."
|
| 27 |
+
}
|
prompts/train_prompt.json
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"asr": [
|
| 3 |
+
"<Speech><SpeechHere></Speech> Can you transcribe the speech into a written format?",
|
| 4 |
+
"<Speech><SpeechHere></Speech> Listen to the speech and write down its content.",
|
| 5 |
+
"<Speech><SpeechHere></Speech> What is the content of the speech you heard?",
|
| 6 |
+
"<Speech><SpeechHere></Speech> Please write down the transcription of the speech.",
|
| 7 |
+
"<Speech><SpeechHere></Speech> Please transcribe the speech into a written format.",
|
| 8 |
+
"<Speech><SpeechHere></Speech> Write down the content of the speech you heard.",
|
| 9 |
+
"<Speech><SpeechHere></Speech> Can you write down the transcription of the speech?",
|
| 10 |
+
"<Speech><SpeechHere></Speech> Put the speech into a written format.",
|
| 11 |
+
"<Speech><SpeechHere></Speech> Please help me to transcribe the speech into a written format.",
|
| 12 |
+
"<Speech><SpeechHere></Speech> Recognize the content of the speech you heard.",
|
| 13 |
+
"<Speech><SpeechHere></Speech> Can you recognize what you heard in the speech?",
|
| 14 |
+
"<Speech><SpeechHere></Speech> Recognize the speech and write it down in a written format.",
|
| 15 |
+
"<Speech><SpeechHere></Speech> Listen to the speech and recognize its content.",
|
| 16 |
+
"<Speech><SpeechHere></Speech> Give me the transcription of the speech you heard.",
|
| 17 |
+
"<Speech><SpeechHere></Speech> Recognize the speech and give me the transcription."
|
| 18 |
+
],
|
| 19 |
+
"asr_zh": [
|
| 20 |
+
"<Speech><SpeechHere></Speech> 前面的语音说了什么?",
|
| 21 |
+
"<Speech><SpeechHere></Speech> 请将语音中的内容写下来。",
|
| 22 |
+
"<Speech><SpeechHere></Speech> 请识别这段中文语音。",
|
| 23 |
+
"<Speech><SpeechHere></Speech> 听前面的音频,写出对方说的内容。",
|
| 24 |
+
"<Speech><SpeechHere></Speech> 写下你听到的内容。",
|
| 25 |
+
"<Speech><SpeechHere></Speech> 请记下语音中人说的话。",
|
| 26 |
+
"<Speech><SpeechHere></Speech> 仔细听这段语音,记下语音中的话",
|
| 27 |
+
"<Speech><SpeechHere></Speech> 将你听到的话写下来",
|
| 28 |
+
"<Speech><SpeechHere></Speech> 这个人说了什么?请记下来。",
|
| 29 |
+
"<Speech><SpeechHere></Speech> 请将语音转换为文字",
|
| 30 |
+
"<Speech><SpeechHere></Speech> 请识别这个人说的内容"
|
| 31 |
+
],
|
| 32 |
+
"asr_de": [
|
| 33 |
+
"<Speech><SpeechHere></Speech> Können Sie die Rede in ein schriftliches Format übertragen?",
|
| 34 |
+
"<Speech><SpeechHere></Speech> Hören Sie sich die Rede an und schreiben Sie ihren Inhalt auf.",
|
| 35 |
+
"<Speech><SpeechHere></Speech> Bitte notieren Sie die Transkription der Rede.",
|
| 36 |
+
"<Speech><SpeechHere></Speech> Geben Sie mir die Transkription der Rede, die Sie gehört haben.",
|
| 37 |
+
"<Speech><SpeechHere></Speech> Was hat dieser Mann gesagt? Bitte schreiben Sie es auf.",
|
| 38 |
+
"<Speech><SpeechHere></Speech> Können Sie die Transkription der Rede aufschreiben?",
|
| 39 |
+
"<Speech><SpeechHere></Speech> Hören Sie der Stimme aufmerksam zu und notieren Sie die Wörter in der Stimme",
|
| 40 |
+
"<Speech><SpeechHere></Speech> Schreiben Sie auf, was Sie hören.",
|
| 41 |
+
"<Speech><SpeechHere></Speech> Bitte Sprache in Text umwandeln.",
|
| 42 |
+
"<Speech><SpeechHere></Speech> Erkennen Sie den Inhalt der Rede, die Sie gehört haben."
|
| 43 |
+
],
|
| 44 |
+
"translation_ec": [
|
| 45 |
+
"<Speech><SpeechHere></Speech> Can you translate the speech into Chinese?",
|
| 46 |
+
"<Speech><SpeechHere></Speech> Please translate the speech you heard into Chinese.",
|
| 47 |
+
"<Speech><SpeechHere></Speech> Listen to the speech and translate it into Chinese.",
|
| 48 |
+
"<Speech><SpeechHere></Speech> Give me the Chinese translation of this speech.",
|
| 49 |
+
"<Speech><SpeechHere></Speech> Could you please provide a Chinese translation for the speech?",
|
| 50 |
+
"<Speech><SpeechHere></Speech> Would you be willing to translate the speech into Chinese for me?",
|
| 51 |
+
"<Speech><SpeechHere></Speech> Would you be able to render the speech in Chinese?",
|
| 52 |
+
"<Speech><SpeechHere></Speech> Could you assist me in translating the speech into Chinese?",
|
| 53 |
+
"<Speech><SpeechHere></Speech> Can you help me convert the speech into Chinese text?",
|
| 54 |
+
"<Speech><SpeechHere></Speech> Please convert the speech into Chinese text.",
|
| 55 |
+
"<Speech><SpeechHere></Speech> 请将这段语音的内容翻译成中文。",
|
| 56 |
+
"<Speech><SpeechHere></Speech> 你能把这段语音用中文表达出来吗?",
|
| 57 |
+
"<Speech><SpeechHere></Speech> 请将你听到的语音用中文写出来。"
|
| 58 |
+
],
|
| 59 |
+
"audiocaption": [
|
| 60 |
+
"<Speech><SpeechHere></Speech> Listen to this audio clip and provide its caption.",
|
| 61 |
+
"<Speech><SpeechHere></Speech> Describe the following audio in a caption.",
|
| 62 |
+
"<Speech><SpeechHere></Speech> Based on the sound you hear, create a caption for this audio.",
|
| 63 |
+
"<Speech><SpeechHere></Speech> Can you describe the scene or event depicted in this audio?",
|
| 64 |
+
"<Speech><SpeechHere></Speech> Could you summarise what's happening in this audio?",
|
| 65 |
+
"<Speech><SpeechHere></Speech> What does this audio describe?",
|
| 66 |
+
"<Speech><SpeechHere></Speech> Please describe the audio."
|
| 67 |
+
],
|
| 68 |
+
"audiocaption_v2": [
|
| 69 |
+
"<Speech><SpeechHere></Speech> Please write down what your hear in the audio."
|
| 70 |
+
],
|
| 71 |
+
"QA": [
|
| 72 |
+
"<Speech><SpeechHere></Speech> {}"
|
| 73 |
+
],
|
| 74 |
+
"inference_QA": [
|
| 75 |
+
"<Speech><SpeechHere></Speech> {}"
|
| 76 |
+
],
|
| 77 |
+
"gender_QA": [
|
| 78 |
+
"<Speech><SpeechHere></Speech> {}"
|
| 79 |
+
],
|
| 80 |
+
"gender_recognition": [
|
| 81 |
+
"<Speech><SpeechHere></Speech> What is the gender of the speaker?",
|
| 82 |
+
"<Speech><SpeechHere></Speech> Use one word to describe the speaker's gender.",
|
| 83 |
+
"<Speech><SpeechHere></Speech> Describe the speaker's gender.",
|
| 84 |
+
"<Speech><SpeechHere></Speech> Can you accurately identify the gender of the speaker?",
|
| 85 |
+
"<Speech><SpeechHere></Speech> Can you distinguish the gender of the speaker?",
|
| 86 |
+
"<Speech><SpeechHere></Speech> Describe the gender of the person speaking.",
|
| 87 |
+
"<Speech><SpeechHere></Speech> What is the speaker's gender based on the audio?",
|
| 88 |
+
"<Speech><SpeechHere></Speech> Tell me about the gender of the person you hear.",
|
| 89 |
+
"<Speech><SpeechHere></Speech> Is the speaker male or female?"
|
| 90 |
+
],
|
| 91 |
+
"phone_recognition": [
|
| 92 |
+
"<Speech><SpeechHere></Speech> Please transcribe the audio clip into its corresponding phonetic representation.",
|
| 93 |
+
"<Speech><SpeechHere></Speech> Write the sequence of phonemes corresponding to this speech.",
|
| 94 |
+
"<Speech><SpeechHere></Speech> Provide the phonetic transcription for the speech.",
|
| 95 |
+
"<Speech><SpeechHere></Speech> Transcribe the phonemes for the speech please.",
|
| 96 |
+
"<Speech><SpeechHere></Speech> Can you recognize the phonetic representation in the speech?",
|
| 97 |
+
"<Speech><SpeechHere></Speech> Listen to the speech and recognize its phonetic representation",
|
| 98 |
+
"<Speech><SpeechHere></Speech> What is the phoneme transcription of the speech?"
|
| 99 |
+
],
|
| 100 |
+
"speech_separation": [
|
| 101 |
+
"<Speech><SpeechHere></Speech> There are two people talking in the audio, please write what they say in order.",
|
| 102 |
+
"<Speech><SpeechHere></Speech> Please write down what you hear each person says.",
|
| 103 |
+
"<Speech><SpeechHere></Speech> Can you record what each person says?",
|
| 104 |
+
"<Speech><SpeechHere></Speech> Transcribe the words spoken by each person in the audio."
|
| 105 |
+
],
|
| 106 |
+
"emotion_recognition": [
|
| 107 |
+
"<Speech><SpeechHere></Speech> Describe the emotion of the speaker in one word.",
|
| 108 |
+
"<Speech><SpeechHere></Speech> Use one word to describe the speaker's emotion."
|
| 109 |
+
],
|
| 110 |
+
"music_description": [
|
| 111 |
+
"<Speech><SpeechHere></Speech> Listen to this music clip and describe the music.",
|
| 112 |
+
"<Speech><SpeechHere></Speech> Please describe the music.",
|
| 113 |
+
"<Speech><SpeechHere></Speech> Provide a description of the music.",
|
| 114 |
+
"<Speech><SpeechHere></Speech> Analyze the music in this clip and offer a description.",
|
| 115 |
+
"<Speech><SpeechHere></Speech> Give me a description of the music in this clip."
|
| 116 |
+
],
|
| 117 |
+
"speaker_verification": [
|
| 118 |
+
"<Speech><SpeechHere></Speech> Are the two people speaking successively the same person? Answer yes or no.",
|
| 119 |
+
"<Speech><SpeechHere></Speech> Do you only hear the same person talking? Answer yes or no.",
|
| 120 |
+
"<Speech><SpeechHere></Speech> Is only one person speaking in the audio? Answer yes or no."
|
| 121 |
+
],
|
| 122 |
+
"audio_story_telling": [
|
| 123 |
+
"<Speech><SpeechHere></Speech> Based on the audio, write a story in detail. Your story should be highly related to the audio.",
|
| 124 |
+
"<Speech><SpeechHere></Speech> Please write a story in detail based on the audio. Your story should contain all the elements in the audio.",
|
| 125 |
+
"<Speech><SpeechHere></Speech> Please generate a long story that is highly related to the audio."
|
| 126 |
+
],
|
| 127 |
+
"speaker_diarization_asr": [
|
| 128 |
+
"<Speech><SpeechHere></Speech> Identify each speaker in turn and what is said.",
|
| 129 |
+
"<Speech><SpeechHere></Speech> Write down the content of each speaker and the corresponding speech in turn.",
|
| 130 |
+
"<Speech><SpeechHere></Speech> Please recognize each speaker and transcribe their speech content."
|
| 131 |
+
]
|
| 132 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SALMONN Inference Requirements
|
| 2 |
+
torch>=2.0.0
|
| 3 |
+
transformers>=4.35.0
|
| 4 |
+
peft>=0.6.0
|
| 5 |
+
accelerate>=0.24.0
|
| 6 |
+
soundfile>=0.12.0
|
| 7 |
+
librosa>=0.10.0
|
| 8 |
+
numpy<2.0.0
|
| 9 |
+
omegaconf>=2.3.0
|
| 10 |
+
fastapi>=0.104.0
|
| 11 |
+
uvicorn>=0.24.0
|
| 12 |
+
python-multipart>=0.0.6
|
| 13 |
+
pydantic>=2.0.0
|
| 14 |
+
aiofiles>=23.0.0
|
server.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
SALMONN FastAPI Server
|
| 3 |
+
HTTP API for audio understanding and transcription.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import tempfile
|
| 8 |
+
import shutil
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
import yaml
|
| 13 |
+
import uvicorn
|
| 14 |
+
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
|
| 15 |
+
from fastapi.responses import JSONResponse
|
| 16 |
+
from pydantic import BaseModel
|
| 17 |
+
from omegaconf import OmegaConf
|
| 18 |
+
|
| 19 |
+
from inference import SALMONNInference
|
| 20 |
+
|
| 21 |
+
# Load config
|
| 22 |
+
CONFIG_PATH = os.environ.get("SALMONN_CONFIG", "config.yaml")
|
| 23 |
+
|
| 24 |
+
with open(CONFIG_PATH, "r") as f:
|
| 25 |
+
config = OmegaConf.create(yaml.safe_load(f))
|
| 26 |
+
|
| 27 |
+
# Initialize FastAPI app
|
| 28 |
+
app = FastAPI(
|
| 29 |
+
title="SALMONN API",
|
| 30 |
+
description="Audio Language Model for Speech, Audio Events, and Music Understanding",
|
| 31 |
+
version="1.0.0",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
# Global model instance (loaded on startup)
|
| 35 |
+
model: Optional[SALMONNInference] = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TranscribeResponse(BaseModel):
|
| 39 |
+
text: str
|
| 40 |
+
status: str = "success"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class ChatResponse(BaseModel):
|
| 44 |
+
question: str
|
| 45 |
+
answer: str
|
| 46 |
+
status: str = "success"
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class HealthResponse(BaseModel):
|
| 50 |
+
status: str
|
| 51 |
+
model_loaded: bool
|
| 52 |
+
device: str
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@app.on_event("startup")
|
| 56 |
+
async def startup_event():
|
| 57 |
+
"""Load model on startup."""
|
| 58 |
+
global model
|
| 59 |
+
print("Starting SALMONN server...")
|
| 60 |
+
model = SALMONNInference(CONFIG_PATH)
|
| 61 |
+
model.load()
|
| 62 |
+
print("Server ready!")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@app.get("/", response_model=dict)
|
| 66 |
+
async def root():
|
| 67 |
+
"""Root endpoint with API info."""
|
| 68 |
+
return {
|
| 69 |
+
"name": "SALMONN API",
|
| 70 |
+
"version": "1.0.0",
|
| 71 |
+
"endpoints": {
|
| 72 |
+
"/health": "Health check",
|
| 73 |
+
"/transcribe": "Transcribe audio (POST)",
|
| 74 |
+
"/chat": "Ask questions about audio (POST)",
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@app.get("/health", response_model=HealthResponse)
|
| 80 |
+
async def health():
|
| 81 |
+
"""Health check endpoint."""
|
| 82 |
+
return HealthResponse(
|
| 83 |
+
status="healthy" if model and model._loaded else "loading",
|
| 84 |
+
model_loaded=model._loaded if model else False,
|
| 85 |
+
device=str(model.device) if model else "unknown",
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@app.post("/transcribe", response_model=TranscribeResponse)
|
| 90 |
+
async def transcribe(
|
| 91 |
+
audio: UploadFile = File(..., description="Audio file (wav, mp3, etc.)"),
|
| 92 |
+
):
|
| 93 |
+
"""
|
| 94 |
+
Transcribe an audio file to text.
|
| 95 |
+
|
| 96 |
+
- **audio**: Audio file to transcribe
|
| 97 |
+
|
| 98 |
+
Returns transcribed text.
|
| 99 |
+
"""
|
| 100 |
+
if not model or not model._loaded:
|
| 101 |
+
raise HTTPException(status_code=503, detail="Model not loaded yet")
|
| 102 |
+
|
| 103 |
+
# Save uploaded file temporarily
|
| 104 |
+
suffix = Path(audio.filename).suffix if audio.filename else ".wav"
|
| 105 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 106 |
+
shutil.copyfileobj(audio.file, tmp)
|
| 107 |
+
tmp_path = tmp.name
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
text = model.transcribe(tmp_path)
|
| 111 |
+
return TranscribeResponse(text=text)
|
| 112 |
+
except Exception as e:
|
| 113 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 114 |
+
finally:
|
| 115 |
+
os.unlink(tmp_path)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@app.post("/chat", response_model=ChatResponse)
|
| 119 |
+
async def chat(
|
| 120 |
+
audio: UploadFile = File(..., description="Audio file (wav, mp3, etc.)"),
|
| 121 |
+
question: str = Form(..., description="Question about the audio"),
|
| 122 |
+
):
|
| 123 |
+
"""
|
| 124 |
+
Ask a question about an audio file.
|
| 125 |
+
|
| 126 |
+
- **audio**: Audio file to analyze
|
| 127 |
+
- **question**: Question about the audio content
|
| 128 |
+
|
| 129 |
+
Returns the model's answer.
|
| 130 |
+
"""
|
| 131 |
+
if not model or not model._loaded:
|
| 132 |
+
raise HTTPException(status_code=503, detail="Model not loaded yet")
|
| 133 |
+
|
| 134 |
+
# Save uploaded file temporarily
|
| 135 |
+
suffix = Path(audio.filename).suffix if audio.filename else ".wav"
|
| 136 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 137 |
+
shutil.copyfileobj(audio.file, tmp)
|
| 138 |
+
tmp_path = tmp.name
|
| 139 |
+
|
| 140 |
+
try:
|
| 141 |
+
answer = model.chat(tmp_path, question)
|
| 142 |
+
return ChatResponse(question=question, answer=answer)
|
| 143 |
+
except Exception as e:
|
| 144 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 145 |
+
finally:
|
| 146 |
+
os.unlink(tmp_path)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@app.post("/describe")
|
| 150 |
+
async def describe(
|
| 151 |
+
audio: UploadFile = File(..., description="Audio file (wav, mp3, etc.)"),
|
| 152 |
+
):
|
| 153 |
+
"""
|
| 154 |
+
Get a detailed description of the audio content.
|
| 155 |
+
|
| 156 |
+
- **audio**: Audio file to describe
|
| 157 |
+
|
| 158 |
+
Returns description of the audio.
|
| 159 |
+
"""
|
| 160 |
+
if not model or not model._loaded:
|
| 161 |
+
raise HTTPException(status_code=503, detail="Model not loaded yet")
|
| 162 |
+
|
| 163 |
+
# Save uploaded file temporarily
|
| 164 |
+
suffix = Path(audio.filename).suffix if audio.filename else ".wav"
|
| 165 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 166 |
+
shutil.copyfileobj(audio.file, tmp)
|
| 167 |
+
tmp_path = tmp.name
|
| 168 |
+
|
| 169 |
+
try:
|
| 170 |
+
description = model.describe(tmp_path)
|
| 171 |
+
return {"description": description, "status": "success"}
|
| 172 |
+
except Exception as e:
|
| 173 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 174 |
+
finally:
|
| 175 |
+
os.unlink(tmp_path)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
if __name__ == "__main__":
|
| 179 |
+
uvicorn.run(
|
| 180 |
+
"server:app",
|
| 181 |
+
host=config.server.host,
|
| 182 |
+
port=config.server.port,
|
| 183 |
+
reload=config.server.get("reload", False),
|
| 184 |
+
)
|
utils.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (2024) Tsinghua University, Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
import time
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch.utils.data import DataLoader, DistributedSampler
|
| 20 |
+
import soundfile as sf
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from dist_utils import is_main_process, get_world_size, get_rank
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def now():
|
| 27 |
+
from datetime import datetime
|
| 28 |
+
|
| 29 |
+
return datetime.now().strftime("%Y%m%d%H%M")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def setup_logger():
|
| 33 |
+
logging.basicConfig(
|
| 34 |
+
level=logging.INFO if is_main_process() else logging.WARN,
|
| 35 |
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
| 36 |
+
handlers=[logging.StreamHandler()],
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_dataloader(dataset, config, is_train=True, use_distributed=True):
|
| 41 |
+
if use_distributed:
|
| 42 |
+
sampler = DistributedSampler(
|
| 43 |
+
dataset,
|
| 44 |
+
shuffle=is_train,
|
| 45 |
+
num_replicas=get_world_size(),
|
| 46 |
+
rank=get_rank()
|
| 47 |
+
)
|
| 48 |
+
else:
|
| 49 |
+
sampler = None
|
| 50 |
+
|
| 51 |
+
loader = DataLoader(
|
| 52 |
+
dataset,
|
| 53 |
+
batch_size=config.batch_size_train if is_train else config.batch_size_eval,
|
| 54 |
+
num_workers=config.num_workers,
|
| 55 |
+
pin_memory=True,
|
| 56 |
+
sampler=sampler,
|
| 57 |
+
shuffle=sampler is None and is_train,
|
| 58 |
+
collate_fn=dataset.collater,
|
| 59 |
+
drop_last=is_train,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
if is_train:
|
| 63 |
+
loader = IterLoader(loader, use_distributed=use_distributed)
|
| 64 |
+
|
| 65 |
+
return loader
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def apply_to_sample(f, sample):
|
| 69 |
+
if len(sample) == 0:
|
| 70 |
+
return {}
|
| 71 |
+
|
| 72 |
+
def _apply(x):
|
| 73 |
+
if torch.is_tensor(x):
|
| 74 |
+
return f(x)
|
| 75 |
+
elif isinstance(x, dict):
|
| 76 |
+
return {key: _apply(value) for key, value in x.items()}
|
| 77 |
+
elif isinstance(x, list):
|
| 78 |
+
return [_apply(x) for x in x]
|
| 79 |
+
else:
|
| 80 |
+
return x
|
| 81 |
+
|
| 82 |
+
return _apply(sample)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def move_to_cuda(sample):
|
| 86 |
+
def _move_to_cuda(tensor):
|
| 87 |
+
return tensor.cuda()
|
| 88 |
+
|
| 89 |
+
return apply_to_sample(_move_to_cuda, sample)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def prepare_sample(samples, cuda_enabled=True):
|
| 93 |
+
if cuda_enabled:
|
| 94 |
+
samples = move_to_cuda(samples)
|
| 95 |
+
|
| 96 |
+
# TODO fp16 support
|
| 97 |
+
|
| 98 |
+
return samples
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class IterLoader:
|
| 102 |
+
"""
|
| 103 |
+
A wrapper to convert DataLoader as an infinite iterator.
|
| 104 |
+
|
| 105 |
+
Modified from:
|
| 106 |
+
https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
def __init__(self, dataloader: DataLoader, use_distributed: bool = False):
|
| 110 |
+
self._dataloader = dataloader
|
| 111 |
+
self.iter_loader = iter(self._dataloader)
|
| 112 |
+
self._use_distributed = use_distributed
|
| 113 |
+
self._epoch = 0
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def epoch(self) -> int:
|
| 117 |
+
return self._epoch
|
| 118 |
+
|
| 119 |
+
def __next__(self):
|
| 120 |
+
try:
|
| 121 |
+
data = next(self.iter_loader)
|
| 122 |
+
except StopIteration:
|
| 123 |
+
self._epoch += 1
|
| 124 |
+
if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed:
|
| 125 |
+
self._dataloader.sampler.set_epoch(self._epoch)
|
| 126 |
+
time.sleep(2) # Prevent possible deadlock during epoch transition
|
| 127 |
+
self.iter_loader = iter(self._dataloader)
|
| 128 |
+
data = next(self.iter_loader)
|
| 129 |
+
|
| 130 |
+
return data
|
| 131 |
+
|
| 132 |
+
def __iter__(self):
|
| 133 |
+
return self
|
| 134 |
+
|
| 135 |
+
def __len__(self):
|
| 136 |
+
return len(self._dataloader)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def prepare_one_sample(wav_path, wav_processor, cuda_enabled=True):
|
| 140 |
+
audio, sr = sf.read(wav_path)
|
| 141 |
+
if len(audio.shape) == 2: # stereo to mono
|
| 142 |
+
audio = audio[:, 0]
|
| 143 |
+
if len(audio) < sr: # pad audio to at least 1s
|
| 144 |
+
sil = np.zeros(sr - len(audio), dtype=float)
|
| 145 |
+
audio = np.concatenate((audio, sil), axis=0)
|
| 146 |
+
audio = audio[: sr * 30] # truncate audio to at most 30s
|
| 147 |
+
|
| 148 |
+
spectrogram = wav_processor(audio, sampling_rate=sr, return_tensors="pt", padding="max_length")["input_features"]
|
| 149 |
+
|
| 150 |
+
samples = {
|
| 151 |
+
"spectrogram": spectrogram,
|
| 152 |
+
"raw_wav": torch.from_numpy(audio).unsqueeze(0),
|
| 153 |
+
"padding_mask": torch.zeros(len(audio), dtype=torch.bool).unsqueeze(0),
|
| 154 |
+
}
|
| 155 |
+
if cuda_enabled:
|
| 156 |
+
samples = move_to_cuda(samples)
|
| 157 |
+
|
| 158 |
+
return samples
|