Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- .gitattributes +4 -0
- .gitignore +121 -0
- LICENSE.txt +201 -0
- README.md +124 -7
- app.py +620 -0
- assets/M3CoT-25169-0.png +3 -0
- assets/method.png +3 -0
- assets/validation_Finance_2.mp4 +3 -0
- assets/yt--MAYaJ5cyOE_70.mp4 +3 -0
- requirements.txt +21 -0
- videoauto_r1/early_exit.py +174 -0
- videoauto_r1/modeling_qwen2_5_vl_patched.py +2018 -0
- videoauto_r1/modeling_qwen3_vl_patched.py +1824 -0
- videoauto_r1/qwen_vl_utils/vision_process.py +684 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
assets/M3CoT-25169-0.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
assets/method.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
assets/validation_Finance_2.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
assets/yt--MAYaJ5cyOE_70.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ignore folder
|
| 2 |
+
.vscode
|
| 3 |
+
.idea
|
| 4 |
+
.gradio
|
| 5 |
+
/experiments/
|
| 6 |
+
/logs/
|
| 7 |
+
/trash/
|
| 8 |
+
|
| 9 |
+
/data/*
|
| 10 |
+
!/data/data_config.yaml
|
| 11 |
+
|
| 12 |
+
dcgm
|
| 13 |
+
log
|
| 14 |
+
*.err
|
| 15 |
+
*.out
|
| 16 |
+
/wandb/
|
| 17 |
+
build/
|
| 18 |
+
dist/
|
| 19 |
+
.DS_Store
|
| 20 |
+
|
| 21 |
+
# Byte-compiled / optimized / DLL files
|
| 22 |
+
__pycache__/
|
| 23 |
+
*.py[cod]
|
| 24 |
+
*$py.class
|
| 25 |
+
|
| 26 |
+
# C extensions
|
| 27 |
+
*.so
|
| 28 |
+
|
| 29 |
+
# Distribution / packaging
|
| 30 |
+
.Python
|
| 31 |
+
build/
|
| 32 |
+
develop-eggs/
|
| 33 |
+
dist/
|
| 34 |
+
downloads/
|
| 35 |
+
eggs/
|
| 36 |
+
.eggs/
|
| 37 |
+
parts/
|
| 38 |
+
sdist/
|
| 39 |
+
wheels/
|
| 40 |
+
pip-wheel-metadata/
|
| 41 |
+
share/python-wheels/
|
| 42 |
+
*.egg-info/
|
| 43 |
+
.installed.cfg
|
| 44 |
+
*.egg
|
| 45 |
+
MANIFEST
|
| 46 |
+
|
| 47 |
+
# PyInstaller
|
| 48 |
+
# Usually these files are written by a python script from a template
|
| 49 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 50 |
+
*.manifest
|
| 51 |
+
*.spec
|
| 52 |
+
|
| 53 |
+
# Installer logs
|
| 54 |
+
pip-log.txt
|
| 55 |
+
pip-delete-this-directory.txt
|
| 56 |
+
|
| 57 |
+
# Unit test / coverage reports
|
| 58 |
+
htmlcov/
|
| 59 |
+
.tox/
|
| 60 |
+
.nox/
|
| 61 |
+
.coverage
|
| 62 |
+
.coverage.*
|
| 63 |
+
.cache
|
| 64 |
+
nosetests.xml
|
| 65 |
+
coverage.xml
|
| 66 |
+
*.cover
|
| 67 |
+
*.py,cover
|
| 68 |
+
.hypothesis/
|
| 69 |
+
.pytest_cache/
|
| 70 |
+
|
| 71 |
+
# Translations
|
| 72 |
+
*.mo
|
| 73 |
+
*.pot
|
| 74 |
+
|
| 75 |
+
# Django stuff:
|
| 76 |
+
*.log
|
| 77 |
+
local_settings.py
|
| 78 |
+
db.sqlite3
|
| 79 |
+
db.sqlite3-journal
|
| 80 |
+
|
| 81 |
+
# Flask stuff:
|
| 82 |
+
instance/
|
| 83 |
+
.webassets-cache
|
| 84 |
+
|
| 85 |
+
# Scrapy stuff:
|
| 86 |
+
.scrapy
|
| 87 |
+
|
| 88 |
+
# Sphinx documentation
|
| 89 |
+
docs/_build/
|
| 90 |
+
|
| 91 |
+
# PyBuilder
|
| 92 |
+
target/
|
| 93 |
+
|
| 94 |
+
# Jupyter Notebook
|
| 95 |
+
.ipynb_checkpoints
|
| 96 |
+
|
| 97 |
+
# IPython
|
| 98 |
+
profile_default/
|
| 99 |
+
ipython_config.py
|
| 100 |
+
|
| 101 |
+
# pyenv
|
| 102 |
+
.python-version
|
| 103 |
+
|
| 104 |
+
# pipenv
|
| 105 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 106 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 107 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 108 |
+
# install all needed dependencies.
|
| 109 |
+
#Pipfile.lock
|
| 110 |
+
|
| 111 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 112 |
+
__pypackages__/
|
| 113 |
+
|
| 114 |
+
# Environments
|
| 115 |
+
.env
|
| 116 |
+
.venv
|
| 117 |
+
env/
|
| 118 |
+
venv/
|
| 119 |
+
ENV/
|
| 120 |
+
env.bak/
|
| 121 |
+
venv.bak/
|
LICENSE.txt
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,12 +1,129 @@
|
|
| 1 |
---
|
| 2 |
-
title: VideoAuto-
|
| 3 |
-
|
| 4 |
-
colorFrom: blue
|
| 5 |
-
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 6.2.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: VideoAuto-R1_Demo
|
| 3 |
+
app_file: app.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
sdk_version: 6.2.0
|
|
|
|
|
|
|
| 6 |
---
|
| 7 |
+
# VideoAuto-R1: Video Auto Reasoning via Thinking Once, Answering Twice
|
| 8 |
+
|
| 9 |
+
<p align="left">
|
| 10 |
+
<a href="https://arxiv.org/abs/2601.05175" alt="arXiv">
|
| 11 |
+
<img src="https://img.shields.io/badge/arXiv-2601.05175-b31b1b.svg?style=flat" /></a>
|
| 12 |
+
<a href='https://ivul-kaust.github.io/projects/videoauto-r1/'>
|
| 13 |
+
<img src='https://img.shields.io/badge/Project%20Page-VideoAuto--R1-green'></a>
|
| 14 |
+
<a href="https://huggingface.co/collections/IVUL-KAUST/videoauto-r1" alt="models">
|
| 15 |
+
<img src="https://img.shields.io/badge/Models-HuggingFace-yellow.svg" /></a>
|
| 16 |
+
<a href="https://huggingface.co/datasets/IVUL-KAUST/VideoAuto-R1-Data" alt="data">
|
| 17 |
+
<img src="https://img.shields.io/badge/Data-HuggingFace-yellow.svg" /></a>
|
| 18 |
+
<a href="https://github.com/IVUL-KAUST/VideoAuto-R1/blob/main/LICENSE.txt" alt="license">
|
| 19 |
+
<img src="https://img.shields.io/badge/License-Apache_2.0-blue.svg" /></a>
|
| 20 |
+
<a href="https://img.shields.io/github/stars/IVUL-KAUST/VideoAuto-R1" alt="stars">
|
| 21 |
+
<img src="https://img.shields.io/github/stars/IVUL-KAUST/VideoAuto-R1" /></a>
|
| 22 |
+
</p>
|
| 23 |
+
|
| 24 |
+
## 📖 Overview
|
| 25 |
+
<div align="center">
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+
</div>
|
| 30 |
+
|
| 31 |
+
We propose VideoAuto-R1, a video understanding framework that adopts a "reason-when-necessary" strategy. During training, our approach follows a Thinking Once, Answering Twice paradigm: the model first generates an initial answer, then performs reasoning, and finally outputs a reviewed answer. Both answers are supervised via verifiable rewards. During inference, the model uses the confidence score of the initial answer to determine whether to proceed with reasoning.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
## 🔥 Updates
|
| 35 |
+
- **[2025-01-08]**: We have released the training code and data for VideoAuto-R1!
|
| 36 |
+
|
| 37 |
+
## Online Demo
|
| 38 |
+
xxxx
|
| 39 |
+
|
| 40 |
+
## Installation
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
git clone git@github.com:IVUL-KAUST/VideoAuto-R1.git
|
| 44 |
+
cd VideoAuto-R1
|
| 45 |
+
|
| 46 |
+
conda create -n videoauto-r1 python=3.12
|
| 47 |
+
source activate videoauto-r1
|
| 48 |
+
|
| 49 |
+
pip install -r requirements.txt
|
| 50 |
+
|
| 51 |
+
conda install "ffmpeg<8"
|
| 52 |
+
pip install flash-attn==2.8.0.post2 --no-build-isolation
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
The code is tested with Python 3.12, PyTorch 2.8, CUDA 12.4 on linux, and may also work on other versions.
|
| 56 |
+
|
| 57 |
+
## Training
|
| 58 |
+
|
| 59 |
+
Please download the data from [HuggingFace](https://huggingface.co/datasets/IVUL-KAUST/VideoAuto-R1-Data) and put them under the `data/` folder.
|
| 60 |
+
|
| 61 |
+
For training, please run the following scripts:
|
| 62 |
+
```bash
|
| 63 |
+
# for Qwen2.5-VL
|
| 64 |
+
bash scripts/train/grpo_autothink/train_qwen2.5vl_grpo_auto_text_image_video.sh
|
| 65 |
+
|
| 66 |
+
# for Qwen3-VL
|
| 67 |
+
bash scripts/train/grpo_autothink/train_qwen3vl_grpo_auto_text_image_video.sh
|
| 68 |
+
```
|
| 69 |
+
Our models are trained on 32 H100 GPUs. You may need to adjust the batch size and accumulation steps according to your hardware settings.
|
| 70 |
+
|
| 71 |
+
## Evaluation
|
| 72 |
+
|
| 73 |
+
We use lmms_eval framework to evaluate our models.
|
| 74 |
+
|
| 75 |
+
For evaluating the baseline Qwen models, please run the following scripts:
|
| 76 |
+
```bash
|
| 77 |
+
# for Qwen2.5-VL
|
| 78 |
+
bash scripts/eval/benchmark_qwen/eval_qwen2_5_vl_16k.sh
|
| 79 |
+
|
| 80 |
+
# for Qwen3-VL
|
| 81 |
+
bash scripts/eval/benchmark_qwen/eval_qwen3_vl_128k.sh
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
For evaluating our VideoAuto-R1 models, please run the following scripts:
|
| 85 |
+
```bash
|
| 86 |
+
# for Qwen2.5-VL
|
| 87 |
+
bash scripts/eval/grpo_autothink/eval_qwen2_5_vl_auto_16k.sh
|
| 88 |
+
|
| 89 |
+
# for Qwen3-VL
|
| 90 |
+
bash scripts/eval/grpo_autothink/eval_qwen3_vl_auto_128k.sh
|
| 91 |
+
```
|
| 92 |
+
Our models are evaluated on 8 H100 GPUs. You may need to adjust according to your hardware settings.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
Expected Results:
|
| 96 |
+
| Benchmarks | Qwen2.5-VL-7B | VideoAuto-R1-7B | Qwen3-VL-8B | VideoAuto-R1-8B |
|
| 97 |
+
| -------------- | ------------- | --------------- | ----------- | --------------- |
|
| 98 |
+
| VideoMME | 66.0 | **67.3** | 72.5 | 71.7 |
|
| 99 |
+
| MVBench | 67.1 | **71.0** | 69.4 | **72.0** |
|
| 100 |
+
| LongVideoBench | 60.9 | 60.5 | 67.6 | 67.4 |
|
| 101 |
+
| MMVU | 66.2 | **69.7** | 69.9 | **71.1** |
|
| 102 |
+
| VideoMMMU | 54.7 | **58.6** | 61.0 | **65.0** |
|
| 103 |
+
| MVP | 36.5 | **39.4** | 40.5 | **43.0** |
|
| 104 |
+
| Charades-STA | 52.9 | **60.0** | 44.6 | **63.7** |
|
| 105 |
+
| ActivityNet-QA | 26.9 | **47.6** | 36.1 | **51.9** |
|
| 106 |
+
| Next-GQA | 20.2 | **36.7** | 37.1 | **44.2** |
|
| 107 |
+
|
| 108 |
+
Due to the different environment or library versions, the performance may vary slightly from the reported results in the paper (±0.5%).
|
| 109 |
+
|
| 110 |
+
## Acknowledgement
|
| 111 |
+
|
| 112 |
+
This project builds upon the following excellent works: [Qwen-VL](https://github.com/QwenLM/Qwen3-VL), [TRL](https://github.com/huggingface/trl), [lmms-eval](https://github.com/EvolvingLMMs-Lab/lmms-eval), etc. We thank all researchers and developers who contributed to these foundational projects.
|
| 113 |
+
|
| 114 |
+
## Citation
|
| 115 |
+
|
| 116 |
+
If you use VideoAuto-R1 in your research, please cite:
|
| 117 |
+
|
| 118 |
+
```bibtex
|
| 119 |
+
@article{liu2026videoautor1,
|
| 120 |
+
title={VideoAuto-R1: Video Auto Reasoning via Thinking Once, Answering Twice},
|
| 121 |
+
author={Liu, Shuming and Zhuge, Mingchen and Zhao, Changsheng and Chen, Jun and Wu, Lemeng and Liu, Zechun and Zhu, Chenchen and Cai, Zhipeng and Zhou, Chong and Liu, Haozhe and Chang, Ernie and Suri, Saksham and Xu, Hongyu and Qian, Qi and Wen, Wei and Varadarajan, Balakrishnan and Liu, Zhuang and Xu, Hu and Bordes, Florian and Krishnamoorthi, Raghuraman and Ghanem, Bernard and Chandra, Vikas and Xiong, Yunyang},
|
| 122 |
+
journal={arXiv preprint arXiv:2601.05175},
|
| 123 |
+
year={2026}
|
| 124 |
+
}
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
This project is licensed under the Apache License 2.0. See LICENSE file for details.
|
| 128 |
|
| 129 |
+
If you have any questions, please contact: shuming.liu@kaust.edu.sa.
|
app.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
VideoAuto-R1 (Qwen3-VL) Demo
|
| 3 |
+
A Gradio-based chat interface for adaptive inference with image/video inputs.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import base64
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import gradio as gr
|
| 12 |
+
from PIL import Image
|
| 13 |
+
from transformers import AutoProcessor, AutoTokenizer
|
| 14 |
+
|
| 15 |
+
from videoauto_r1.qwen_vl_utils.vision_process import process_vision_info
|
| 16 |
+
from videoauto_r1.modeling_qwen3_vl_patched import Qwen3VLForConditionalGeneration
|
| 17 |
+
from videoauto_r1.early_exit import compute_first_boxed_answer_probs
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# ============================================================================
|
| 21 |
+
# Constants
|
| 22 |
+
# ============================================================================
|
| 23 |
+
|
| 24 |
+
COT_SYSTEM_PROMPT_ANSWER_TWICE = (
|
| 25 |
+
"You are a helpful assistant.\n"
|
| 26 |
+
"FIRST: Output your initial answer inside the first \\boxed{...} without any analysis or explanations. "
|
| 27 |
+
"If you cannot determine the answer without reasoning, output \\boxed{Let's analyze the problem step by step.} instead.\n"
|
| 28 |
+
"THEN: Think through the reasoning as an internal monologue enclosed within <think>...</think>.\n"
|
| 29 |
+
"AT LAST: Output the final answer again inside \\boxed{...}. If you believe the previous answer was correct, repeat it; otherwise, correct it.\n"
|
| 30 |
+
"Output format: \\boxed{...}<think>...</think>\\boxed{...}"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
VIDEO_EXTS = (".mp4", ".avi", ".mov", ".mkv", ".flv", ".wmv", ".webm")
|
| 34 |
+
IMAGE_EXTS = (".jpg", ".jpeg", ".png", ".bmp", ".gif", ".webp", ".tiff")
|
| 35 |
+
|
| 36 |
+
CUSTOM_CSS = """
|
| 37 |
+
#chatbot .message[class*="user"] {
|
| 38 |
+
max-width: 50% !important;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
#chatbot .message[class*="bot"],
|
| 42 |
+
#chatbot .message[class*="assistant"] {
|
| 43 |
+
max-width: 60% !important;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
#chatbot .message > div {
|
| 47 |
+
width: 100% !important;
|
| 48 |
+
max-width: 100% !important;
|
| 49 |
+
}
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ============================================================================
|
| 54 |
+
# Utility Functions
|
| 55 |
+
# ============================================================================
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def detect_media_type(file_path: str | None) -> str | None:
|
| 59 |
+
"""
|
| 60 |
+
Detect media type from file extension.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
file_path: Path to the media file
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
'image', 'video', or None
|
| 67 |
+
"""
|
| 68 |
+
if not file_path:
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
p = file_path.lower()
|
| 72 |
+
if p.endswith(VIDEO_EXTS):
|
| 73 |
+
return "video"
|
| 74 |
+
if p.endswith(IMAGE_EXTS):
|
| 75 |
+
return "image"
|
| 76 |
+
|
| 77 |
+
# Fallback: try to open as image
|
| 78 |
+
try:
|
| 79 |
+
Image.open(file_path)
|
| 80 |
+
return "image"
|
| 81 |
+
except Exception:
|
| 82 |
+
return "video"
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# ============================================================================
|
| 86 |
+
# Model Class
|
| 87 |
+
# ============================================================================
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class Qwen3VLAutoThinkDemo:
|
| 91 |
+
"""Main model class for Qwen3-VL with adaptive inference."""
|
| 92 |
+
|
| 93 |
+
def __init__(self, model_path="IVUL-KAUST/VideoAuto-R1-Qwen3-VL-8B"):
|
| 94 |
+
"""Initialize model, processor, and tokenizer."""
|
| 95 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 96 |
+
|
| 97 |
+
# Load model
|
| 98 |
+
self.model = Qwen3VLForConditionalGeneration.from_pretrained(
|
| 99 |
+
model_path,
|
| 100 |
+
dtype="bfloat16",
|
| 101 |
+
device_map="auto",
|
| 102 |
+
attn_implementation="sdpa",
|
| 103 |
+
).eval()
|
| 104 |
+
|
| 105 |
+
self.processor = AutoProcessor.from_pretrained(model_path)
|
| 106 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 107 |
+
self.system_prompt = COT_SYSTEM_PROMPT_ANSWER_TWICE
|
| 108 |
+
|
| 109 |
+
def process_image(
|
| 110 |
+
self,
|
| 111 |
+
image_path: str,
|
| 112 |
+
image_min_pixels: int = 128 * 28 * 28,
|
| 113 |
+
image_max_pixels: int = 16384 * 28 * 28,
|
| 114 |
+
) -> dict | None:
|
| 115 |
+
"""
|
| 116 |
+
Process image file to base64 format.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
image_path: Path to image file
|
| 120 |
+
image_min_pixels: Minimum pixel count
|
| 121 |
+
image_max_pixels: Maximum pixel count
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
Dictionary with image data or None
|
| 125 |
+
"""
|
| 126 |
+
if image_path is None:
|
| 127 |
+
return None
|
| 128 |
+
|
| 129 |
+
image = Image.open(image_path).convert("RGB")
|
| 130 |
+
buffer = BytesIO()
|
| 131 |
+
image.save(buffer, format="JPEG")
|
| 132 |
+
base64_bytes = base64.b64encode(buffer.getvalue())
|
| 133 |
+
base64_string = base64_bytes.decode("utf-8")
|
| 134 |
+
|
| 135 |
+
return {
|
| 136 |
+
"type": "image",
|
| 137 |
+
"image": f"data:image/jpeg;base64,{base64_string}",
|
| 138 |
+
"min_pixels": image_min_pixels,
|
| 139 |
+
"max_pixels": image_max_pixels,
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
def process_video(
|
| 143 |
+
self,
|
| 144 |
+
video_path: str,
|
| 145 |
+
video_min_pixels: int = 16 * 28 * 28,
|
| 146 |
+
video_max_pixels: int = 768 * 28 * 28,
|
| 147 |
+
video_total_pixels: int = 128000 * 28 * 28,
|
| 148 |
+
min_frames: int = 4,
|
| 149 |
+
max_frames: int = 64,
|
| 150 |
+
fps: float = 2.0,
|
| 151 |
+
) -> dict | None:
|
| 152 |
+
"""
|
| 153 |
+
Process video file configuration.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
video_path: Path to video file
|
| 157 |
+
video_min_pixels: Minimum pixels per frame
|
| 158 |
+
video_max_pixels: Maximum pixels per frame
|
| 159 |
+
video_total_pixels: Total pixels across all frames
|
| 160 |
+
min_frames: Minimum number of frames
|
| 161 |
+
max_frames: Maximum number of frames
|
| 162 |
+
fps: Frames per second for sampling
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
Dictionary with video configuration or None
|
| 166 |
+
"""
|
| 167 |
+
if video_path is None:
|
| 168 |
+
return None
|
| 169 |
+
|
| 170 |
+
return {
|
| 171 |
+
"type": "video",
|
| 172 |
+
"video": video_path,
|
| 173 |
+
"min_pixels": video_min_pixels,
|
| 174 |
+
"max_pixels": video_max_pixels,
|
| 175 |
+
"total_pixels": video_total_pixels,
|
| 176 |
+
"min_frames": min_frames,
|
| 177 |
+
"max_frames": max_frames,
|
| 178 |
+
"fps": fps,
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
def generate(
|
| 182 |
+
self,
|
| 183 |
+
media_input: str | None,
|
| 184 |
+
prompt: str,
|
| 185 |
+
early_exit_thresh: float,
|
| 186 |
+
temperature: float,
|
| 187 |
+
max_new_tokens: int = 4096,
|
| 188 |
+
) -> dict:
|
| 189 |
+
"""
|
| 190 |
+
Generate response with adaptive inference.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
media_input: Path to media file
|
| 194 |
+
prompt: Text prompt
|
| 195 |
+
early_exit_thresh: Confidence threshold for early exit
|
| 196 |
+
temperature: Sampling temperature
|
| 197 |
+
max_new_tokens: Maximum tokens to generate
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
Dictionary containing response and metadata
|
| 201 |
+
"""
|
| 202 |
+
# Prepare message
|
| 203 |
+
message = [{"role": "system", "content": self.system_prompt}]
|
| 204 |
+
content_parts = []
|
| 205 |
+
|
| 206 |
+
# Process media input
|
| 207 |
+
if media_input is not None:
|
| 208 |
+
media_type = detect_media_type(media_input)
|
| 209 |
+
|
| 210 |
+
if media_type == "video":
|
| 211 |
+
video_dict = self.process_video(media_input)
|
| 212 |
+
if video_dict:
|
| 213 |
+
content_parts.append(video_dict)
|
| 214 |
+
elif media_type == "image":
|
| 215 |
+
image_dict = self.process_image(media_input)
|
| 216 |
+
if image_dict:
|
| 217 |
+
content_parts.append(image_dict)
|
| 218 |
+
|
| 219 |
+
# Add text prompt
|
| 220 |
+
content_parts.append({"type": "text", "text": prompt})
|
| 221 |
+
message.append({"role": "user", "content": content_parts})
|
| 222 |
+
|
| 223 |
+
# Apply chat template
|
| 224 |
+
text = self.processor.apply_chat_template([message], tokenize=False, add_generation_prompt=True)
|
| 225 |
+
|
| 226 |
+
# Process vision inputs
|
| 227 |
+
image_inputs, video_inputs, video_kwargs = process_vision_info(
|
| 228 |
+
[message],
|
| 229 |
+
image_patch_size=16,
|
| 230 |
+
return_video_kwargs=True,
|
| 231 |
+
return_video_metadata=True,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if video_inputs is not None:
|
| 235 |
+
video_inputs, video_metadatas = zip(*video_inputs)
|
| 236 |
+
video_inputs = list(video_inputs)
|
| 237 |
+
video_metadatas = list(video_metadatas)
|
| 238 |
+
else:
|
| 239 |
+
video_metadatas = None
|
| 240 |
+
|
| 241 |
+
# Prepare inputs
|
| 242 |
+
inputs = self.processor(
|
| 243 |
+
text=text,
|
| 244 |
+
images=image_inputs,
|
| 245 |
+
videos=video_inputs,
|
| 246 |
+
video_metadata=video_metadatas,
|
| 247 |
+
do_resize=False,
|
| 248 |
+
padding=True,
|
| 249 |
+
return_tensors="pt",
|
| 250 |
+
**video_kwargs,
|
| 251 |
+
)
|
| 252 |
+
inputs = inputs.to(self.device)
|
| 253 |
+
|
| 254 |
+
# Generation configuration
|
| 255 |
+
gen_kwargs = {
|
| 256 |
+
"max_new_tokens": max_new_tokens,
|
| 257 |
+
"temperature": temperature if temperature > 0 else None,
|
| 258 |
+
"do_sample": temperature > 0,
|
| 259 |
+
"top_p": 0.9 if temperature > 0 else None,
|
| 260 |
+
"num_beams": 1,
|
| 261 |
+
"use_cache": True,
|
| 262 |
+
"return_dict_in_generate": True,
|
| 263 |
+
"output_scores": True,
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
# Generate response
|
| 267 |
+
with torch.no_grad():
|
| 268 |
+
gen_out = self.model.generate(
|
| 269 |
+
**inputs,
|
| 270 |
+
eos_token_id=self.tokenizer.eos_token_id,
|
| 271 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 272 |
+
**gen_kwargs,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# Decode output
|
| 276 |
+
generated_ids = gen_out.sequences[0][len(inputs.input_ids[0]) :]
|
| 277 |
+
answer = self.processor.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 278 |
+
|
| 279 |
+
# Compute confidence
|
| 280 |
+
first_box_probs = compute_first_boxed_answer_probs(
|
| 281 |
+
b=0,
|
| 282 |
+
gen_ids=generated_ids,
|
| 283 |
+
gen_out=gen_out,
|
| 284 |
+
ans=answer,
|
| 285 |
+
task="",
|
| 286 |
+
tokenizer=self.tokenizer,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Parse response
|
| 290 |
+
first_answer = answer.split("<think>")[0]
|
| 291 |
+
second_answer = answer.split("</think>")[-1] if "</think>" in answer else first_answer
|
| 292 |
+
reasoning = answer.split("<think>")[-1].split("</think>")[0] if "<think>" in answer else "N/A"
|
| 293 |
+
|
| 294 |
+
# Determine inference mode
|
| 295 |
+
if first_box_probs >= early_exit_thresh:
|
| 296 |
+
need_cot = False
|
| 297 |
+
reasoning = False
|
| 298 |
+
else:
|
| 299 |
+
need_cot = True
|
| 300 |
+
|
| 301 |
+
return {
|
| 302 |
+
"full_response": answer,
|
| 303 |
+
"first_answer": first_answer,
|
| 304 |
+
"confidence": f"{first_box_probs:.4f}",
|
| 305 |
+
"need_cot": need_cot,
|
| 306 |
+
"reasoning": reasoning,
|
| 307 |
+
"second_answer": second_answer,
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
# ============================================================================
|
| 312 |
+
# Gradio Callback Functions
|
| 313 |
+
# ============================================================================
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def update_preview(file_path: str | None):
|
| 317 |
+
"""Update preview widgets based on media type."""
|
| 318 |
+
mtype = detect_media_type(file_path)
|
| 319 |
+
|
| 320 |
+
if mtype == "image":
|
| 321 |
+
return (
|
| 322 |
+
gr.update(value=file_path, visible=True), # image_preview
|
| 323 |
+
gr.update(value=None, visible=False), # video_preview
|
| 324 |
+
)
|
| 325 |
+
elif mtype == "video":
|
| 326 |
+
return (
|
| 327 |
+
gr.update(value=None, visible=False), # image_preview
|
| 328 |
+
gr.update(value=file_path, visible=True), # video_preview
|
| 329 |
+
)
|
| 330 |
+
else:
|
| 331 |
+
return (
|
| 332 |
+
gr.update(value=None, visible=False),
|
| 333 |
+
gr.update(value=None, visible=False),
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def chat_generate(
|
| 338 |
+
media_path,
|
| 339 |
+
user_text,
|
| 340 |
+
messages_state,
|
| 341 |
+
chatbot_state,
|
| 342 |
+
last_media_state,
|
| 343 |
+
early_exit_thresh,
|
| 344 |
+
temperature,
|
| 345 |
+
):
|
| 346 |
+
"""Handle chat message generation."""
|
| 347 |
+
if user_text is None or str(user_text).strip() == "":
|
| 348 |
+
raise gr.Error("Chat message cannot be empty.")
|
| 349 |
+
|
| 350 |
+
# Clear history if media changed
|
| 351 |
+
if (
|
| 352 |
+
(media_path is not None)
|
| 353 |
+
and (last_media_state is not None)
|
| 354 |
+
and (os.path.basename(media_path) != os.path.basename(last_media_state))
|
| 355 |
+
):
|
| 356 |
+
messages_state = []
|
| 357 |
+
chatbot_state = []
|
| 358 |
+
|
| 359 |
+
# Initialize system prompt
|
| 360 |
+
if len(messages_state) == 0:
|
| 361 |
+
messages_state.append({"role": "system", "content": demo_model.system_prompt})
|
| 362 |
+
|
| 363 |
+
# Prepare user message
|
| 364 |
+
content_parts = []
|
| 365 |
+
if media_path is not None:
|
| 366 |
+
mtype = detect_media_type(media_path)
|
| 367 |
+
if mtype == "video":
|
| 368 |
+
vd = demo_model.process_video(media_path)
|
| 369 |
+
if vd:
|
| 370 |
+
content_parts.append(vd)
|
| 371 |
+
elif mtype == "image":
|
| 372 |
+
imd = demo_model.process_image(media_path)
|
| 373 |
+
if imd:
|
| 374 |
+
content_parts.append(imd)
|
| 375 |
+
|
| 376 |
+
content_parts.append({"type": "text", "text": user_text})
|
| 377 |
+
messages_state.append({"role": "user", "content": content_parts})
|
| 378 |
+
|
| 379 |
+
# Generate response
|
| 380 |
+
result = demo_model.generate(media_path, user_text, early_exit_thresh, temperature)
|
| 381 |
+
|
| 382 |
+
# Format assistant response
|
| 383 |
+
first_ans = (result.get("first_answer") or "").strip()
|
| 384 |
+
conf = result.get("confidence", "N/A")
|
| 385 |
+
need_cot = result.get("need_cot", "")
|
| 386 |
+
reasoning = result.get("reasoning", "")
|
| 387 |
+
final_ans = (result.get("second_answer") or "").strip()
|
| 388 |
+
|
| 389 |
+
if need_cot:
|
| 390 |
+
decision_prompt = f"Continue CoT Reasoning (confidence = {conf})"
|
| 391 |
+
else:
|
| 392 |
+
decision_prompt = f"Early Exit (confidence = {conf})"
|
| 393 |
+
|
| 394 |
+
assistant_display_1 = f"**Initial Answer:**\n{first_ans}\n\n" f"**{decision_prompt}**\n\n"
|
| 395 |
+
|
| 396 |
+
# Update state
|
| 397 |
+
messages_state.append({"role": "assistant", "content": assistant_display_1})
|
| 398 |
+
chatbot_state.append({"role": "user", "content": user_text})
|
| 399 |
+
chatbot_state.append({"role": "assistant", "content": assistant_display_1})
|
| 400 |
+
|
| 401 |
+
if need_cot:
|
| 402 |
+
assistant_display_2 = (
|
| 403 |
+
f"\n\n**<think>**\n\n{reasoning}\n**</think>**\n\n" f"**Reviewed Answer:**\n{final_ans}\n\n"
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
messages_state.append({"role": "assistant", "content": assistant_display_2})
|
| 407 |
+
chatbot_state.append({"role": "assistant", "content": assistant_display_2})
|
| 408 |
+
|
| 409 |
+
# Disable textbox and send button after generation to prevent interleaved conversation
|
| 410 |
+
return (
|
| 411 |
+
messages_state,
|
| 412 |
+
chatbot_state,
|
| 413 |
+
media_path,
|
| 414 |
+
gr.update(value="", interactive=False), # Disable and clear textbox
|
| 415 |
+
gr.update(interactive=False), # Disable send button
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def clear_history():
|
| 420 |
+
"""Clear all chat history and reset interface."""
|
| 421 |
+
return (
|
| 422 |
+
[], # messages_state
|
| 423 |
+
[], # chatbot_state
|
| 424 |
+
None, # last_media_state
|
| 425 |
+
gr.update(value=None), # file
|
| 426 |
+
gr.update(value=None, visible=False), # image_preview
|
| 427 |
+
gr.update(value=None, visible=False), # video_preview
|
| 428 |
+
gr.update(value="", interactive=True), # Re-enable and clear textbox
|
| 429 |
+
gr.update(interactive=True), # Re-enable send button
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
# ============================================================================
|
| 434 |
+
# Example Data
|
| 435 |
+
# ============================================================================
|
| 436 |
+
|
| 437 |
+
EXAMPLES = [
|
| 438 |
+
[
|
| 439 |
+
"assets/yt--MAYaJ5cyOE_70.mp4",
|
| 440 |
+
"Question: Which one of these descriptions correctly matches the actions in the video?\nOptions:\n(A) officiating\n(B) skating\n(C) stopping\n(D) playing sports\nPut your final answer in \\boxed{}.",
|
| 441 |
+
# GT is B
|
| 442 |
+
],
|
| 443 |
+
[
|
| 444 |
+
"assets/validation_Finance_2.mp4",
|
| 445 |
+
"Using the Arbitrage Pricing Theory model shown above, calculate the expected return E(rp) if the risk-free rate increases to 5%. All other risk premiums (RP) and beta (\\beta) values remain unchanged.\nOptions:\nA. 13.4%\nB. 14.8%\nC. 15.6%\nD. 16.1%\nE. 16.5%\nF. 16.9%\nG. 17.5%\nH. 17.8%\nI. 17.2%\nJ. 18.1%\nPut your final answer in \\boxed{}.",
|
| 446 |
+
# GT is I
|
| 447 |
+
],
|
| 448 |
+
[
|
| 449 |
+
"assets/M3CoT-25169-0.png",
|
| 450 |
+
"Within the image, you'll notice several purchased items. And we assume that the water temperature is 4 ° C at this time.\nWithin the image, can you identify the count of items among the provided options that will go below the waterline?\nA. 0\nB. 1\nC. 2\nD. 3\nPut your final answer in \\boxed{}.",
|
| 451 |
+
# GT is B
|
| 452 |
+
],
|
| 453 |
+
[
|
| 454 |
+
None,
|
| 455 |
+
"Determine the value of the parameter $m$ such that the equation $(m-2)x^2 + (m^2-4m+3)x - (6m^2-2) = 0$ has real solutions, and the sum of the cubes of these solutions is equal to zero.\nPut your final answer in \\boxed{}.",
|
| 456 |
+
# GT is 3
|
| 457 |
+
],
|
| 458 |
+
]
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
# ============================================================================
|
| 462 |
+
# Gradio Interface
|
| 463 |
+
# ============================================================================
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def create_demo():
|
| 467 |
+
"""Create and configure the Gradio interface."""
|
| 468 |
+
with gr.Blocks(title="VideoAuto-R1 Demo") as demo:
|
| 469 |
+
gr.Markdown("# VideoAuto-R1 (Qwen3-VL-8B) Demo")
|
| 470 |
+
|
| 471 |
+
# Display system prompt
|
| 472 |
+
with gr.Accordion("System Prompt", open=False):
|
| 473 |
+
gr.Markdown(f"```\n{COT_SYSTEM_PROMPT_ANSWER_TWICE}\n```")
|
| 474 |
+
|
| 475 |
+
# State variables
|
| 476 |
+
messages_state = gr.State([])
|
| 477 |
+
chatbot_state = gr.State([])
|
| 478 |
+
last_media_state = gr.State(None)
|
| 479 |
+
|
| 480 |
+
with gr.Row():
|
| 481 |
+
# Left column: Media input and settings
|
| 482 |
+
with gr.Column(scale=3):
|
| 483 |
+
media_input = gr.File(
|
| 484 |
+
label="Upload Image or Video",
|
| 485 |
+
file_types=["image", "video"],
|
| 486 |
+
type="filepath",
|
| 487 |
+
)
|
| 488 |
+
image_preview = gr.Image(label="Image Preview", visible=False)
|
| 489 |
+
video_preview = gr.Video(label="Video Preview", visible=False)
|
| 490 |
+
|
| 491 |
+
with gr.Accordion("Advanced Settings", open=True):
|
| 492 |
+
early_exit_thresh = gr.Slider(
|
| 493 |
+
minimum=0.0,
|
| 494 |
+
maximum=1.0,
|
| 495 |
+
value=0.98,
|
| 496 |
+
step=0.01,
|
| 497 |
+
label="Early Exit Threshold",
|
| 498 |
+
)
|
| 499 |
+
temperature = gr.Slider(
|
| 500 |
+
minimum=0.0,
|
| 501 |
+
maximum=2.0,
|
| 502 |
+
value=0.0,
|
| 503 |
+
step=0.1,
|
| 504 |
+
label="Temperature",
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
# Right column: Chat interface
|
| 508 |
+
with gr.Column(scale=7):
|
| 509 |
+
chatbot = gr.Chatbot(
|
| 510 |
+
label="Chat",
|
| 511 |
+
elem_id="chatbot",
|
| 512 |
+
height=600,
|
| 513 |
+
sanitize_html=False,
|
| 514 |
+
)
|
| 515 |
+
textbox = gr.Textbox(
|
| 516 |
+
show_label=False,
|
| 517 |
+
placeholder="Enter text and press ENTER",
|
| 518 |
+
lines=2,
|
| 519 |
+
)
|
| 520 |
+
with gr.Row():
|
| 521 |
+
send_btn = gr.Button("Send", variant="primary")
|
| 522 |
+
clear_btn = gr.Button("Clear")
|
| 523 |
+
|
| 524 |
+
gr.Markdown(
|
| 525 |
+
"Please click the **Clear** button before starting a new conversation or trying a new example."
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
# Event handlers
|
| 529 |
+
media_input.change(
|
| 530 |
+
fn=update_preview,
|
| 531 |
+
inputs=[media_input],
|
| 532 |
+
outputs=[image_preview, video_preview],
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# Send button click: generate response and disable input controls
|
| 536 |
+
send_btn.click(
|
| 537 |
+
fn=chat_generate,
|
| 538 |
+
inputs=[
|
| 539 |
+
media_input,
|
| 540 |
+
textbox,
|
| 541 |
+
messages_state,
|
| 542 |
+
chatbot_state,
|
| 543 |
+
last_media_state,
|
| 544 |
+
early_exit_thresh,
|
| 545 |
+
temperature,
|
| 546 |
+
],
|
| 547 |
+
outputs=[messages_state, chatbot_state, last_media_state, textbox, send_btn],
|
| 548 |
+
).then(
|
| 549 |
+
fn=lambda cs: cs,
|
| 550 |
+
inputs=[chatbot_state],
|
| 551 |
+
outputs=[chatbot],
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
# Textbox submit: generate response and disable input controls
|
| 555 |
+
textbox.submit(
|
| 556 |
+
fn=chat_generate,
|
| 557 |
+
inputs=[
|
| 558 |
+
media_input,
|
| 559 |
+
textbox,
|
| 560 |
+
messages_state,
|
| 561 |
+
chatbot_state,
|
| 562 |
+
last_media_state,
|
| 563 |
+
early_exit_thresh,
|
| 564 |
+
temperature,
|
| 565 |
+
],
|
| 566 |
+
outputs=[messages_state, chatbot_state, last_media_state, textbox, send_btn],
|
| 567 |
+
).then(
|
| 568 |
+
fn=lambda cs: cs,
|
| 569 |
+
inputs=[chatbot_state],
|
| 570 |
+
outputs=[chatbot],
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
# Clear button: reset all states and re-enable input controls
|
| 574 |
+
clear_btn.click(
|
| 575 |
+
fn=clear_history,
|
| 576 |
+
inputs=[],
|
| 577 |
+
outputs=[
|
| 578 |
+
messages_state,
|
| 579 |
+
chatbot_state,
|
| 580 |
+
last_media_state,
|
| 581 |
+
media_input,
|
| 582 |
+
image_preview,
|
| 583 |
+
video_preview,
|
| 584 |
+
textbox,
|
| 585 |
+
send_btn,
|
| 586 |
+
],
|
| 587 |
+
).then(
|
| 588 |
+
fn=lambda cs: cs,
|
| 589 |
+
inputs=[chatbot_state],
|
| 590 |
+
outputs=[chatbot],
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
gr.Examples(
|
| 594 |
+
examples=EXAMPLES,
|
| 595 |
+
inputs=[media_input, textbox],
|
| 596 |
+
label="Examples",
|
| 597 |
+
cache_examples=False,
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
return demo
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
# ============================================================================
|
| 604 |
+
# Main Entry Point
|
| 605 |
+
# ============================================================================
|
| 606 |
+
|
| 607 |
+
if __name__ == "__main__":
|
| 608 |
+
# Initialize model
|
| 609 |
+
demo_model = Qwen3VLAutoThinkDemo()
|
| 610 |
+
|
| 611 |
+
# Create and launch demo
|
| 612 |
+
demo = create_demo()
|
| 613 |
+
demo.launch(
|
| 614 |
+
share=True,
|
| 615 |
+
server_name="0.0.0.0",
|
| 616 |
+
server_port=7860,
|
| 617 |
+
allowed_paths=["assets"],
|
| 618 |
+
debug=True,
|
| 619 |
+
css=CUSTOM_CSS,
|
| 620 |
+
)
|
assets/M3CoT-25169-0.png
ADDED
|
Git LFS Details
|
assets/method.png
ADDED
|
Git LFS Details
|
assets/validation_Finance_2.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5858c001c91b8c3c8f716c810fda90a6b3a2d97d74adc08c2b374c578877f86d
|
| 3 |
+
size 4029792
|
assets/yt--MAYaJ5cyOE_70.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54d8d3e3c4f3bbf135994bddccee34d7ad285f1097c59e81efadeeb2b2e7f2d1
|
| 3 |
+
size 361753
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# videoauto-r1 dependencies
|
| 2 |
+
torch==2.8.0
|
| 3 |
+
torchvision==0.23.0
|
| 4 |
+
transformers==4.57.1
|
| 5 |
+
vllm==0.11.0
|
| 6 |
+
deepspeed==0.16.8
|
| 7 |
+
accelerate==1.11.0
|
| 8 |
+
trl==0.23.0
|
| 9 |
+
math-verify==0.8.0
|
| 10 |
+
torchcodec==0.7.0
|
| 11 |
+
tensorboardX
|
| 12 |
+
av
|
| 13 |
+
|
| 14 |
+
# lmms-eval dependencies
|
| 15 |
+
loguru
|
| 16 |
+
pytablewriter
|
| 17 |
+
sacrebleu>=1.5.0
|
| 18 |
+
evaluate>=0.4.0
|
| 19 |
+
sqlitedict==2.1.0
|
| 20 |
+
tenacity==8.3.0
|
| 21 |
+
decord==0.6.0
|
videoauto_r1/early_exit.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
def compute_first_boxed_answer_probs(b, gen_ids, gen_out, ans, task, tokenizer):
|
| 6 |
+
# extract logprobs for each step based on the gen_ids
|
| 7 |
+
cur_lp = []
|
| 8 |
+
for t, tok_id in enumerate(gen_ids):
|
| 9 |
+
if t >= len(gen_out.scores):
|
| 10 |
+
break
|
| 11 |
+
step_scores = gen_out.scores[t][b] # [V]
|
| 12 |
+
step_logprobs = F.log_softmax(step_scores, dim=-1)
|
| 13 |
+
cur_lp.append(step_logprobs[tok_id.item()].unsqueeze(0))
|
| 14 |
+
lp_vec = torch.cat(cur_lp, dim=0) if cur_lp else torch.empty(0)
|
| 15 |
+
|
| 16 |
+
if task.startswith("mvp_"):
|
| 17 |
+
ans = f"\\boxed{{Answer:{ans}"
|
| 18 |
+
_prefix_ids_tensor = tokenizer(
|
| 19 |
+
"\\boxed{Answer:",
|
| 20 |
+
add_special_tokens=False,
|
| 21 |
+
return_tensors="pt",
|
| 22 |
+
).input_ids[0]
|
| 23 |
+
prefix_ids = _prefix_ids_tensor.to(
|
| 24 |
+
device=gen_ids.device,
|
| 25 |
+
dtype=gen_ids.dtype,
|
| 26 |
+
)
|
| 27 |
+
fake_lp = torch.zeros(
|
| 28 |
+
prefix_ids.shape[0],
|
| 29 |
+
device=lp_vec.device,
|
| 30 |
+
dtype=lp_vec.dtype,
|
| 31 |
+
)
|
| 32 |
+
gen_ids = torch.cat([prefix_ids, gen_ids], dim=0)
|
| 33 |
+
lp_vec = torch.cat([fake_lp, lp_vec], dim=0)
|
| 34 |
+
|
| 35 |
+
first = extract_first_boxed_content(ans)
|
| 36 |
+
first_box_tok_logprobs = None
|
| 37 |
+
|
| 38 |
+
if first:
|
| 39 |
+
first_content, _, _ = first # content only
|
| 40 |
+
# Exclude cases starting with "Let's analyze" (ignoring leading whitespace).
|
| 41 |
+
if not first_content.lstrip().startswith("Let's analyze"):
|
| 42 |
+
# Normalize (normalize_text may clean up choices/letters to forms like 'H', etc.)
|
| 43 |
+
first_box_norm = normalize_text(first_content)
|
| 44 |
+
|
| 45 |
+
# Locate the normalized text within the token sequence
|
| 46 |
+
span = find_token_span_for_text(
|
| 47 |
+
gen_ids=gen_ids,
|
| 48 |
+
text_piece=first_box_norm,
|
| 49 |
+
tokenizer=tokenizer,
|
| 50 |
+
decoded_answer=ans,
|
| 51 |
+
)
|
| 52 |
+
if span is not None and lp_vec.numel() > 0:
|
| 53 |
+
s, e = span
|
| 54 |
+
# Defensive clipping (shouldn't be necessary in theory)
|
| 55 |
+
s = max(0, min(s, lp_vec.shape[0]))
|
| 56 |
+
e = max(0, min(e, lp_vec.shape[0]))
|
| 57 |
+
if e > s:
|
| 58 |
+
first_box_tok_logprobs = lp_vec[s:e]
|
| 59 |
+
|
| 60 |
+
# for mvp, the answer is like "Answer: A", so we use the last token
|
| 61 |
+
if task.startswith("mvp_"):
|
| 62 |
+
first_box_tok_logprobs = first_box_tok_logprobs[-1]
|
| 63 |
+
|
| 64 |
+
if first_box_tok_logprobs is None:
|
| 65 |
+
first_box_probs = -1
|
| 66 |
+
else:
|
| 67 |
+
first_box_probs = first_box_tok_logprobs.mean().exp().item()
|
| 68 |
+
|
| 69 |
+
return first_box_probs
|
| 70 |
+
|
| 71 |
+
_PATTERN_BOXED = re.compile(r"\\boxed\{([^{}]*(?:\{(?:[^{}]+|\{[^{}]*\})*\}[^{}]*)*)\}")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def extract_first_boxed_content(text: str):
|
| 75 |
+
"""
|
| 76 |
+
Returns:
|
| 77 |
+
(content, inner_start, inner_end)
|
| 78 |
+
- content: inner text of the first \\boxed{...} (group 1)
|
| 79 |
+
- inner_start, inner_end: character indices of that inner content in `text` (end is exclusive)
|
| 80 |
+
|
| 81 |
+
Requirement: the text must contain at least two \\boxed{...} occurrences; otherwise return False.
|
| 82 |
+
"""
|
| 83 |
+
it = _PATTERN_BOXED.finditer(text)
|
| 84 |
+
m1 = next(it, None)
|
| 85 |
+
if m1 is None:
|
| 86 |
+
return False
|
| 87 |
+
if next(it, None) is None: # require at least two boxed occurrences
|
| 88 |
+
return False
|
| 89 |
+
content = m1.group(1)
|
| 90 |
+
inner_start, inner_end = m1.span(1) # return the span of the *inner* content only
|
| 91 |
+
return content, inner_start, inner_end
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _find_subsequence(haystack_ids, needle_ids):
|
| 95 |
+
"""
|
| 96 |
+
Return (start_idx, end_idx); return None if not found.
|
| 97 |
+
"""
|
| 98 |
+
if not needle_ids:
|
| 99 |
+
return None
|
| 100 |
+
n = len(needle_ids)
|
| 101 |
+
limit = len(haystack_ids) - n + 1
|
| 102 |
+
for i in range(max(0, 0), max(0, limit)):
|
| 103 |
+
if haystack_ids[i : i + n] == needle_ids:
|
| 104 |
+
return i, i + n
|
| 105 |
+
# Edge case: if the needle is longer than the haystack, fail directly
|
| 106 |
+
if limit <= 0 and haystack_ids == needle_ids:
|
| 107 |
+
return 0, len(haystack_ids)
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _first_nonempty_find(text, variants):
|
| 112 |
+
"""Find the first occurring variant in `text` (in order). Return (variant, char_pos) or (None, -1)."""
|
| 113 |
+
for v in variants:
|
| 114 |
+
if not v:
|
| 115 |
+
continue
|
| 116 |
+
pos = text.find(v)
|
| 117 |
+
if pos != -1:
|
| 118 |
+
return v, pos
|
| 119 |
+
return None, -1
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def find_token_span_for_text(gen_ids, text_piece, tokenizer, decoded_answer):
|
| 123 |
+
"""
|
| 124 |
+
Goal: Given the decoded complete answer string `decoded_answer`, its generated token sequence `gen_ids`,
|
| 125 |
+
and a text fragment `text_piece`, find the corresponding token span for that fragment.
|
| 126 |
+
|
| 127 |
+
Strategy:
|
| 128 |
+
A) Encode `text_piece` into tokens and search it as a subsequence in `gen_ids`
|
| 129 |
+
using multiple textual variants: original / stripped / lstrip / prefixed with a space.
|
| 130 |
+
B) If (A) fails: locate the fragment via `str.find()` in `decoded_answer`, then
|
| 131 |
+
re-encode `decoded_answer[:pos]` and the chosen fragment to infer the token span by length.
|
| 132 |
+
|
| 133 |
+
Returns: (tok_start, tok_end) or None
|
| 134 |
+
"""
|
| 135 |
+
# Common variants: original, strip, lstrip, prefixed space
|
| 136 |
+
candidates_text = [
|
| 137 |
+
text_piece,
|
| 138 |
+
text_piece.strip(),
|
| 139 |
+
text_piece.lstrip(),
|
| 140 |
+
(" " + text_piece) if not text_piece.startswith(" ") else text_piece,
|
| 141 |
+
]
|
| 142 |
+
# (A) Direct token subsequence match
|
| 143 |
+
for cand in candidates_text:
|
| 144 |
+
cand_ids = tokenizer.encode(cand, add_special_tokens=False)
|
| 145 |
+
if not cand_ids:
|
| 146 |
+
continue
|
| 147 |
+
span = _find_subsequence(gen_ids, cand_ids)
|
| 148 |
+
if span is not None:
|
| 149 |
+
return span
|
| 150 |
+
|
| 151 |
+
# (B) Fallback: use character position + re-encoding to estimate the token span
|
| 152 |
+
chosen, pos = _first_nonempty_find(decoded_answer, candidates_text)
|
| 153 |
+
if chosen is not None:
|
| 154 |
+
prefix_ids = tokenizer.encode(decoded_answer[:pos], add_special_tokens=False)
|
| 155 |
+
chosen_ids = tokenizer.encode(chosen, add_special_tokens=False)
|
| 156 |
+
start = len(prefix_ids)
|
| 157 |
+
end = start + len(chosen_ids)
|
| 158 |
+
if end <= len(gen_ids):
|
| 159 |
+
return (start, end)
|
| 160 |
+
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
_CHOICE_PAREN = re.compile(r"""^\s*[\(\[\{]\s*([A-Za-z])\s*[\)\]\}]\s*(?:[.)/:;\-]\s*)?""", re.X)
|
| 165 |
+
_CHOICE_BARE_WITH_DELIM = re.compile(r"""^\s*([A-Za-z])\s*[.)/:;\-]\s*""", re.X)
|
| 166 |
+
_CHOICE_SINGLE_LETTER = re.compile(r"""^\s*([A-Za-z])\s*[.]?\s*$""", re.X)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def normalize_text(s):
|
| 170 |
+
m = _CHOICE_PAREN.match(s) or _CHOICE_BARE_WITH_DELIM.match(s) or _CHOICE_SINGLE_LETTER.match(s)
|
| 171 |
+
if m:
|
| 172 |
+
return m.group(1)
|
| 173 |
+
else:
|
| 174 |
+
return s
|
videoauto_r1/modeling_qwen2_5_vl_patched.py
ADDED
|
@@ -0,0 +1,2018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_qwen2_5_vl.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 11 |
+
# and OPT implementations in this library. It has been modified from its
|
| 12 |
+
# original forms to accommodate minor architectural differences compared
|
| 13 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 14 |
+
#
|
| 15 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 16 |
+
# you may not use this file except in compliance with the License.
|
| 17 |
+
# You may obtain a copy of the License at
|
| 18 |
+
#
|
| 19 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 20 |
+
#
|
| 21 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 22 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 23 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 24 |
+
# See the License for the specific language governing permissions and
|
| 25 |
+
# limitations under the License.
|
| 26 |
+
|
| 27 |
+
from dataclasses import dataclass
|
| 28 |
+
from typing import Any, Callable, Optional, Union
|
| 29 |
+
|
| 30 |
+
import torch
|
| 31 |
+
import torch.nn as nn
|
| 32 |
+
import torch.nn.functional as F
|
| 33 |
+
|
| 34 |
+
from transformers.activations import ACT2FN
|
| 35 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 36 |
+
from transformers.generation import GenerationMixin
|
| 37 |
+
from transformers.masking_utils import (
|
| 38 |
+
create_causal_mask,
|
| 39 |
+
create_sliding_window_causal_mask,
|
| 40 |
+
)
|
| 41 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 42 |
+
from transformers.modeling_layers import GradientCheckpointingLayer
|
| 43 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
|
| 44 |
+
from transformers.modeling_rope_utils import dynamic_rope_update, ROPE_INIT_FUNCTIONS
|
| 45 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
| 46 |
+
|
| 47 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2RMSNorm
|
| 48 |
+
from transformers.models.qwen2_5_vl.configuration_qwen2_5_vl import (
|
| 49 |
+
Qwen2_5_VLConfig,
|
| 50 |
+
Qwen2_5_VLTextConfig,
|
| 51 |
+
Qwen2_5_VLVisionConfig,
|
| 52 |
+
)
|
| 53 |
+
from transformers.processing_utils import Unpack
|
| 54 |
+
from transformers.utils import (
|
| 55 |
+
auto_docstring,
|
| 56 |
+
can_return_tuple,
|
| 57 |
+
is_torchdynamo_compiling,
|
| 58 |
+
logging,
|
| 59 |
+
TransformersKwargs,
|
| 60 |
+
)
|
| 61 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
logger = logging.get_logger(__name__)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class Qwen2_5_VLMLP(nn.Module):
|
| 68 |
+
def __init__(self, config, bias: bool = False):
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.hidden_size = config.hidden_size
|
| 71 |
+
self.intermediate_size = config.intermediate_size
|
| 72 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
|
| 73 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
|
| 74 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias)
|
| 75 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 76 |
+
|
| 77 |
+
def forward(self, hidden_state):
|
| 78 |
+
return self.down_proj(
|
| 79 |
+
self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class Qwen2_5_VisionPatchEmbed(nn.Module):
|
| 84 |
+
def __init__(
|
| 85 |
+
self,
|
| 86 |
+
patch_size: int = 14,
|
| 87 |
+
temporal_patch_size: int = 2,
|
| 88 |
+
in_channels: int = 3,
|
| 89 |
+
embed_dim: int = 1152,
|
| 90 |
+
) -> None:
|
| 91 |
+
super().__init__()
|
| 92 |
+
self.patch_size = patch_size
|
| 93 |
+
self.temporal_patch_size = temporal_patch_size
|
| 94 |
+
self.in_channels = in_channels
|
| 95 |
+
self.embed_dim = embed_dim
|
| 96 |
+
|
| 97 |
+
kernel_size = [temporal_patch_size, patch_size, patch_size]
|
| 98 |
+
self.proj = nn.Conv3d(
|
| 99 |
+
in_channels,
|
| 100 |
+
embed_dim,
|
| 101 |
+
kernel_size=kernel_size,
|
| 102 |
+
stride=kernel_size,
|
| 103 |
+
bias=False,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 107 |
+
target_dtype = self.proj.weight.dtype
|
| 108 |
+
hidden_states = hidden_states.view(
|
| 109 |
+
-1,
|
| 110 |
+
self.in_channels,
|
| 111 |
+
self.temporal_patch_size,
|
| 112 |
+
self.patch_size,
|
| 113 |
+
self.patch_size,
|
| 114 |
+
)
|
| 115 |
+
hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(
|
| 116 |
+
-1, self.embed_dim
|
| 117 |
+
)
|
| 118 |
+
return hidden_states
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class Qwen2_5_VisionRotaryEmbedding(nn.Module):
|
| 122 |
+
inv_freq: torch.Tensor # fix linting for `register_buffer`
|
| 123 |
+
|
| 124 |
+
def __init__(self, dim: int, theta: float = 10000.0) -> None:
|
| 125 |
+
super().__init__()
|
| 126 |
+
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
| 127 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 128 |
+
|
| 129 |
+
def forward(self, seqlen: int) -> torch.Tensor:
|
| 130 |
+
seq = torch.arange(
|
| 131 |
+
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
|
| 132 |
+
)
|
| 133 |
+
freqs = torch.outer(seq, self.inv_freq)
|
| 134 |
+
return freqs
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class Qwen2_5_VLPatchMerger(nn.Module):
|
| 138 |
+
def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None:
|
| 139 |
+
super().__init__()
|
| 140 |
+
self.hidden_size = context_dim * (spatial_merge_size**2)
|
| 141 |
+
self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6)
|
| 142 |
+
self.mlp = nn.Sequential(
|
| 143 |
+
nn.Linear(self.hidden_size, self.hidden_size),
|
| 144 |
+
nn.GELU(),
|
| 145 |
+
nn.Linear(self.hidden_size, dim),
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 149 |
+
x = self.mlp(self.ln_q(x).view(-1, self.hidden_size))
|
| 150 |
+
return x
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def rotate_half(x):
|
| 154 |
+
"""Rotates half the hidden dims of the input."""
|
| 155 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 156 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 157 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def apply_rotary_pos_emb_vision(
|
| 161 |
+
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
|
| 162 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 163 |
+
orig_q_dtype = q.dtype
|
| 164 |
+
orig_k_dtype = k.dtype
|
| 165 |
+
q, k = q.float(), k.float()
|
| 166 |
+
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
|
| 167 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 168 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 169 |
+
q_embed = q_embed.to(orig_q_dtype)
|
| 170 |
+
k_embed = k_embed.to(orig_k_dtype)
|
| 171 |
+
return q_embed, k_embed
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 175 |
+
"""
|
| 176 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 177 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 178 |
+
"""
|
| 179 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 180 |
+
if n_rep == 1:
|
| 181 |
+
return hidden_states
|
| 182 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(
|
| 183 |
+
batch, num_key_value_heads, n_rep, slen, head_dim
|
| 184 |
+
)
|
| 185 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def eager_attention_forward(
|
| 189 |
+
module: nn.Module,
|
| 190 |
+
query: torch.Tensor,
|
| 191 |
+
key: torch.Tensor,
|
| 192 |
+
value: torch.Tensor,
|
| 193 |
+
attention_mask: Optional[torch.Tensor],
|
| 194 |
+
scaling: float,
|
| 195 |
+
dropout: float = 0.0,
|
| 196 |
+
**kwargs,
|
| 197 |
+
):
|
| 198 |
+
key_states = repeat_kv(key, module.num_key_value_groups)
|
| 199 |
+
value_states = repeat_kv(value, module.num_key_value_groups)
|
| 200 |
+
|
| 201 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
| 202 |
+
if attention_mask is not None:
|
| 203 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 204 |
+
attn_weights = attn_weights + causal_mask
|
| 205 |
+
|
| 206 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
|
| 207 |
+
query.dtype
|
| 208 |
+
)
|
| 209 |
+
attn_weights = nn.functional.dropout(
|
| 210 |
+
attn_weights, p=dropout, training=module.training
|
| 211 |
+
)
|
| 212 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 213 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 214 |
+
|
| 215 |
+
return attn_output, attn_weights
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class Qwen2_5_VLVisionAttention(nn.Module):
|
| 219 |
+
def __init__(self, config: Qwen2_5_VLVisionConfig) -> None:
|
| 220 |
+
super().__init__()
|
| 221 |
+
self.dim = config.hidden_size
|
| 222 |
+
self.num_heads = config.num_heads
|
| 223 |
+
self.head_dim = self.dim // self.num_heads
|
| 224 |
+
self.num_key_value_groups = 1 # needed for eager attention
|
| 225 |
+
self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
|
| 226 |
+
self.proj = nn.Linear(self.dim, self.dim)
|
| 227 |
+
self.scaling = self.head_dim**-0.5
|
| 228 |
+
self.config = config
|
| 229 |
+
self.attention_dropout = 0.0
|
| 230 |
+
self.is_causal = False
|
| 231 |
+
|
| 232 |
+
def forward(
|
| 233 |
+
self,
|
| 234 |
+
hidden_states: torch.Tensor,
|
| 235 |
+
cu_seqlens: torch.Tensor,
|
| 236 |
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
| 237 |
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
| 238 |
+
**kwargs,
|
| 239 |
+
) -> torch.Tensor:
|
| 240 |
+
seq_length = hidden_states.shape[0]
|
| 241 |
+
query_states, key_states, value_states = (
|
| 242 |
+
self.qkv(hidden_states)
|
| 243 |
+
.reshape(seq_length, 3, self.num_heads, -1)
|
| 244 |
+
.permute(1, 0, 2, 3)
|
| 245 |
+
.unbind(0)
|
| 246 |
+
)
|
| 247 |
+
if position_embeddings is None:
|
| 248 |
+
logger.warning_once(
|
| 249 |
+
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
|
| 250 |
+
"through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed "
|
| 251 |
+
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be "
|
| 252 |
+
"removed and `position_embeddings` will be mandatory."
|
| 253 |
+
)
|
| 254 |
+
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
|
| 255 |
+
cos = emb.cos()
|
| 256 |
+
sin = emb.sin()
|
| 257 |
+
else:
|
| 258 |
+
cos, sin = position_embeddings
|
| 259 |
+
query_states, key_states = apply_rotary_pos_emb_vision(
|
| 260 |
+
query_states, key_states, cos, sin
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
query_states = query_states.transpose(0, 1).unsqueeze(0)
|
| 264 |
+
key_states = key_states.transpose(0, 1).unsqueeze(0)
|
| 265 |
+
value_states = value_states.transpose(0, 1).unsqueeze(0)
|
| 266 |
+
|
| 267 |
+
attention_interface: Callable = eager_attention_forward
|
| 268 |
+
if self.config._attn_implementation != "eager":
|
| 269 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[
|
| 270 |
+
self.config._attn_implementation
|
| 271 |
+
]
|
| 272 |
+
|
| 273 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 274 |
+
# Flash Attention 2: Use cu_seqlens for variable length attention
|
| 275 |
+
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
|
| 276 |
+
attn_output, _ = attention_interface(
|
| 277 |
+
self,
|
| 278 |
+
query_states,
|
| 279 |
+
key_states,
|
| 280 |
+
value_states,
|
| 281 |
+
attention_mask=None,
|
| 282 |
+
scaling=self.scaling,
|
| 283 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 284 |
+
cu_seq_lens_q=cu_seqlens,
|
| 285 |
+
cu_seq_lens_k=cu_seqlens,
|
| 286 |
+
max_length_q=max_seqlen,
|
| 287 |
+
max_length_k=max_seqlen,
|
| 288 |
+
is_causal=False,
|
| 289 |
+
**kwargs,
|
| 290 |
+
)
|
| 291 |
+
else:
|
| 292 |
+
# Other implementations: Process each chunk separately
|
| 293 |
+
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
|
| 294 |
+
splits = [
|
| 295 |
+
torch.split(tensor, lengths.tolist(), dim=2)
|
| 296 |
+
for tensor in (query_states, key_states, value_states)
|
| 297 |
+
]
|
| 298 |
+
|
| 299 |
+
attn_outputs = [
|
| 300 |
+
attention_interface(
|
| 301 |
+
self,
|
| 302 |
+
q,
|
| 303 |
+
k,
|
| 304 |
+
v,
|
| 305 |
+
attention_mask=None,
|
| 306 |
+
scaling=self.scaling,
|
| 307 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 308 |
+
is_causal=False,
|
| 309 |
+
**kwargs,
|
| 310 |
+
)[0]
|
| 311 |
+
for q, k, v in zip(*splits)
|
| 312 |
+
]
|
| 313 |
+
attn_output = torch.cat(attn_outputs, dim=1)
|
| 314 |
+
|
| 315 |
+
attn_output = attn_output.reshape(seq_length, -1).contiguous()
|
| 316 |
+
attn_output = self.proj(attn_output)
|
| 317 |
+
return attn_output
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class Qwen2_5_VLVisionBlock(GradientCheckpointingLayer):
|
| 321 |
+
def __init__(self, config, attn_implementation: str = "sdpa") -> None:
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.norm1 = Qwen2RMSNorm(config.hidden_size, eps=1e-6)
|
| 324 |
+
self.norm2 = Qwen2RMSNorm(config.hidden_size, eps=1e-6)
|
| 325 |
+
self.attn = Qwen2_5_VLVisionAttention(config=config)
|
| 326 |
+
self.mlp = Qwen2_5_VLMLP(config, bias=True)
|
| 327 |
+
|
| 328 |
+
def forward(
|
| 329 |
+
self,
|
| 330 |
+
hidden_states: torch.Tensor,
|
| 331 |
+
cu_seqlens: torch.Tensor,
|
| 332 |
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
| 333 |
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
| 334 |
+
**kwargs,
|
| 335 |
+
) -> torch.Tensor:
|
| 336 |
+
hidden_states = hidden_states + self.attn(
|
| 337 |
+
self.norm1(hidden_states),
|
| 338 |
+
cu_seqlens=cu_seqlens,
|
| 339 |
+
rotary_pos_emb=rotary_pos_emb,
|
| 340 |
+
position_embeddings=position_embeddings,
|
| 341 |
+
**kwargs,
|
| 342 |
+
)
|
| 343 |
+
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
|
| 344 |
+
return hidden_states
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
@auto_docstring
|
| 348 |
+
class Qwen2_5_VLPreTrainedModel(PreTrainedModel):
|
| 349 |
+
config: Qwen2_5_VLConfig
|
| 350 |
+
base_model_prefix = "model"
|
| 351 |
+
supports_gradient_checkpointing = True
|
| 352 |
+
_no_split_modules = ["Qwen2_5_VLDecoderLayer", "Qwen2_5_VLVisionBlock"]
|
| 353 |
+
_skip_keys_device_placement = "past_key_values"
|
| 354 |
+
_supports_flash_attn = True
|
| 355 |
+
_supports_sdpa = True
|
| 356 |
+
|
| 357 |
+
_can_compile_fullgraph = True
|
| 358 |
+
_supports_attention_backend = True
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class Qwen2_5_VisionTransformerPretrainedModel(Qwen2_5_VLPreTrainedModel):
|
| 362 |
+
config: Qwen2_5_VLVisionConfig
|
| 363 |
+
_no_split_modules = ["Qwen2_5_VLVisionBlock"]
|
| 364 |
+
|
| 365 |
+
def __init__(self, config, *inputs, **kwargs) -> None:
|
| 366 |
+
super().__init__(config, *inputs, **kwargs)
|
| 367 |
+
self.spatial_merge_size = config.spatial_merge_size
|
| 368 |
+
self.patch_size = config.patch_size
|
| 369 |
+
self.fullatt_block_indexes = config.fullatt_block_indexes
|
| 370 |
+
self.window_size = config.window_size
|
| 371 |
+
self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
|
| 372 |
+
|
| 373 |
+
self.patch_embed = Qwen2_5_VisionPatchEmbed(
|
| 374 |
+
patch_size=config.patch_size,
|
| 375 |
+
temporal_patch_size=config.temporal_patch_size,
|
| 376 |
+
in_channels=config.in_channels,
|
| 377 |
+
embed_dim=config.hidden_size,
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
head_dim = config.hidden_size // config.num_heads
|
| 381 |
+
self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2)
|
| 382 |
+
|
| 383 |
+
self.blocks = nn.ModuleList(
|
| 384 |
+
[Qwen2_5_VLVisionBlock(config) for _ in range(config.depth)]
|
| 385 |
+
)
|
| 386 |
+
self.merger = Qwen2_5_VLPatchMerger(
|
| 387 |
+
dim=config.out_hidden_size,
|
| 388 |
+
context_dim=config.hidden_size,
|
| 389 |
+
spatial_merge_size=config.spatial_merge_size,
|
| 390 |
+
)
|
| 391 |
+
self.gradient_checkpointing = False
|
| 392 |
+
|
| 393 |
+
def rot_pos_emb(self, grid_thw):
|
| 394 |
+
pos_ids = []
|
| 395 |
+
for t, h, w in grid_thw:
|
| 396 |
+
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
|
| 397 |
+
hpos_ids = hpos_ids.reshape(
|
| 398 |
+
h // self.spatial_merge_size,
|
| 399 |
+
self.spatial_merge_size,
|
| 400 |
+
w // self.spatial_merge_size,
|
| 401 |
+
self.spatial_merge_size,
|
| 402 |
+
)
|
| 403 |
+
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
|
| 404 |
+
hpos_ids = hpos_ids.flatten()
|
| 405 |
+
|
| 406 |
+
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
|
| 407 |
+
wpos_ids = wpos_ids.reshape(
|
| 408 |
+
h // self.spatial_merge_size,
|
| 409 |
+
self.spatial_merge_size,
|
| 410 |
+
w // self.spatial_merge_size,
|
| 411 |
+
self.spatial_merge_size,
|
| 412 |
+
)
|
| 413 |
+
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
|
| 414 |
+
wpos_ids = wpos_ids.flatten()
|
| 415 |
+
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
|
| 416 |
+
pos_ids = torch.cat(pos_ids, dim=0)
|
| 417 |
+
max_grid_size = grid_thw[:, 1:].max()
|
| 418 |
+
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
|
| 419 |
+
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
|
| 420 |
+
return rotary_pos_emb
|
| 421 |
+
|
| 422 |
+
def get_window_index(self, grid_thw):
|
| 423 |
+
window_index: list = []
|
| 424 |
+
cu_window_seqlens: list = [0]
|
| 425 |
+
window_index_id = 0
|
| 426 |
+
vit_merger_window_size = (
|
| 427 |
+
self.window_size // self.spatial_merge_size // self.patch_size
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
for grid_t, grid_h, grid_w in grid_thw:
|
| 431 |
+
llm_grid_h, llm_grid_w = (
|
| 432 |
+
grid_h // self.spatial_merge_size,
|
| 433 |
+
grid_w // self.spatial_merge_size,
|
| 434 |
+
)
|
| 435 |
+
index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(
|
| 436 |
+
grid_t, llm_grid_h, llm_grid_w
|
| 437 |
+
)
|
| 438 |
+
pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
|
| 439 |
+
pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
|
| 440 |
+
num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
|
| 441 |
+
num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
|
| 442 |
+
index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
|
| 443 |
+
index_padded = index_padded.reshape(
|
| 444 |
+
grid_t,
|
| 445 |
+
num_windows_h,
|
| 446 |
+
vit_merger_window_size,
|
| 447 |
+
num_windows_w,
|
| 448 |
+
vit_merger_window_size,
|
| 449 |
+
)
|
| 450 |
+
index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
|
| 451 |
+
grid_t,
|
| 452 |
+
num_windows_h * num_windows_w,
|
| 453 |
+
vit_merger_window_size,
|
| 454 |
+
vit_merger_window_size,
|
| 455 |
+
)
|
| 456 |
+
seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
|
| 457 |
+
index_padded = index_padded.reshape(-1)
|
| 458 |
+
index_new = index_padded[index_padded != -100]
|
| 459 |
+
window_index.append(index_new + window_index_id)
|
| 460 |
+
cu_seqlens_tmp = (
|
| 461 |
+
seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1]
|
| 462 |
+
)
|
| 463 |
+
cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
|
| 464 |
+
window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
|
| 465 |
+
window_index = torch.cat(window_index, dim=0)
|
| 466 |
+
|
| 467 |
+
return window_index, cu_window_seqlens
|
| 468 |
+
|
| 469 |
+
def forward(
|
| 470 |
+
self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs
|
| 471 |
+
) -> torch.Tensor:
|
| 472 |
+
"""
|
| 473 |
+
Args:
|
| 474 |
+
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
|
| 475 |
+
The final hidden states of the model.
|
| 476 |
+
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
|
| 477 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 478 |
+
|
| 479 |
+
Returns:
|
| 480 |
+
`torch.Tensor`: hidden_states.
|
| 481 |
+
"""
|
| 482 |
+
hidden_states = self.patch_embed(hidden_states)
|
| 483 |
+
rotary_pos_emb = self.rot_pos_emb(grid_thw)
|
| 484 |
+
window_index, cu_window_seqlens = self.get_window_index(grid_thw)
|
| 485 |
+
cu_window_seqlens = torch.tensor(
|
| 486 |
+
cu_window_seqlens,
|
| 487 |
+
device=hidden_states.device,
|
| 488 |
+
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
|
| 489 |
+
)
|
| 490 |
+
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
|
| 491 |
+
|
| 492 |
+
seq_len, _ = hidden_states.size()
|
| 493 |
+
hidden_states = hidden_states.reshape(
|
| 494 |
+
seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1
|
| 495 |
+
)
|
| 496 |
+
hidden_states = hidden_states[window_index, :, :]
|
| 497 |
+
hidden_states = hidden_states.reshape(seq_len, -1)
|
| 498 |
+
rotary_pos_emb = rotary_pos_emb.reshape(
|
| 499 |
+
seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1
|
| 500 |
+
)
|
| 501 |
+
rotary_pos_emb = rotary_pos_emb[window_index, :, :]
|
| 502 |
+
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
|
| 503 |
+
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
|
| 504 |
+
position_embeddings = (emb.cos(), emb.sin())
|
| 505 |
+
|
| 506 |
+
cu_seqlens = torch.repeat_interleave(
|
| 507 |
+
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]
|
| 508 |
+
).cumsum(
|
| 509 |
+
dim=0,
|
| 510 |
+
# Select dtype based on the following factors:
|
| 511 |
+
# - FA2 requires that cu_seqlens_q must have dtype int32
|
| 512 |
+
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
|
| 513 |
+
# See https://github.com/huggingface/transformers/pull/34852 for more information
|
| 514 |
+
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
|
| 515 |
+
)
|
| 516 |
+
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
|
| 517 |
+
|
| 518 |
+
for layer_num, blk in enumerate(self.blocks):
|
| 519 |
+
if layer_num in self.fullatt_block_indexes:
|
| 520 |
+
cu_seqlens_now = cu_seqlens
|
| 521 |
+
else:
|
| 522 |
+
cu_seqlens_now = cu_window_seqlens
|
| 523 |
+
|
| 524 |
+
hidden_states = blk(
|
| 525 |
+
hidden_states,
|
| 526 |
+
cu_seqlens=cu_seqlens_now,
|
| 527 |
+
position_embeddings=position_embeddings,
|
| 528 |
+
**kwargs,
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
hidden_states = self.merger(hidden_states)
|
| 532 |
+
reverse_indices = torch.argsort(window_index)
|
| 533 |
+
hidden_states = hidden_states[reverse_indices, :]
|
| 534 |
+
|
| 535 |
+
return hidden_states
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
@dataclass
|
| 539 |
+
@auto_docstring(
|
| 540 |
+
custom_intro="""
|
| 541 |
+
Base class for Llava outputs, with hidden states and attentions.
|
| 542 |
+
"""
|
| 543 |
+
)
|
| 544 |
+
class Qwen2_5_VLModelOutputWithPast(ModelOutput):
|
| 545 |
+
r"""
|
| 546 |
+
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 547 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 548 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
| 549 |
+
|
| 550 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
| 551 |
+
`past_key_values` input) to speed up sequential decoding.
|
| 552 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 553 |
+
The rope index difference between sequence length and multimodal rope.
|
| 554 |
+
"""
|
| 555 |
+
|
| 556 |
+
last_hidden_state: torch.FloatTensor = None
|
| 557 |
+
past_key_values: Optional[list[torch.FloatTensor]] = None
|
| 558 |
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
| 559 |
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
| 560 |
+
rope_deltas: Optional[torch.LongTensor] = None
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class Qwen2_5_VLRotaryEmbedding(nn.Module):
|
| 564 |
+
inv_freq: torch.Tensor # fix linting for `register_buffer`
|
| 565 |
+
|
| 566 |
+
def __init__(self, config: Qwen2_5_VLTextConfig, device=None):
|
| 567 |
+
super().__init__()
|
| 568 |
+
# BC: "rope_type" was originally "type"
|
| 569 |
+
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
| 570 |
+
self.rope_type = config.rope_scaling.get(
|
| 571 |
+
"rope_type", config.rope_scaling.get("type")
|
| 572 |
+
)
|
| 573 |
+
else:
|
| 574 |
+
self.rope_type = "default"
|
| 575 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 576 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 577 |
+
|
| 578 |
+
self.config = config
|
| 579 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 580 |
+
|
| 581 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
| 582 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 583 |
+
self.original_inv_freq = self.inv_freq
|
| 584 |
+
|
| 585 |
+
@torch.no_grad()
|
| 586 |
+
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
|
| 587 |
+
def forward(self, x, position_ids):
|
| 588 |
+
# In contrast to other models, Qwen2_5_VL has different position ids for the grids
|
| 589 |
+
# So we expand the inv_freq to shape (3, ...)
|
| 590 |
+
inv_freq_expanded = (
|
| 591 |
+
self.inv_freq[None, None, :, None]
|
| 592 |
+
.float()
|
| 593 |
+
.expand(3, position_ids.shape[1], -1, 1)
|
| 594 |
+
)
|
| 595 |
+
position_ids_expanded = position_ids[
|
| 596 |
+
:, :, None, :
|
| 597 |
+
].float() # shape (3, bs, 1, positions)
|
| 598 |
+
|
| 599 |
+
device_type = (
|
| 600 |
+
x.device.type
|
| 601 |
+
if isinstance(x.device.type, str) and x.device.type != "mps"
|
| 602 |
+
else "cpu"
|
| 603 |
+
)
|
| 604 |
+
with torch.autocast(device_type=device_type, enabled=False): # Force float32
|
| 605 |
+
freqs = (
|
| 606 |
+
inv_freq_expanded.float() @ position_ids_expanded.float()
|
| 607 |
+
).transpose(2, 3)
|
| 608 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 609 |
+
cos = emb.cos() * self.attention_scaling
|
| 610 |
+
sin = emb.sin() * self.attention_scaling
|
| 611 |
+
|
| 612 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
class Qwen2MLP(nn.Module):
|
| 616 |
+
def __init__(self, config):
|
| 617 |
+
super().__init__()
|
| 618 |
+
self.config = config
|
| 619 |
+
self.hidden_size = config.hidden_size
|
| 620 |
+
self.intermediate_size = config.intermediate_size
|
| 621 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 622 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 623 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 624 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 625 |
+
|
| 626 |
+
def forward(self, x):
|
| 627 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 628 |
+
return down_proj
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
|
| 632 |
+
"""Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).
|
| 633 |
+
|
| 634 |
+
Explanation:
|
| 635 |
+
Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
|
| 636 |
+
sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
|
| 637 |
+
vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
|
| 638 |
+
Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
|
| 639 |
+
For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
|
| 640 |
+
height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
|
| 641 |
+
difference with modern LLMs.
|
| 642 |
+
|
| 643 |
+
Args:
|
| 644 |
+
q (`torch.Tensor`): The query tensor.
|
| 645 |
+
k (`torch.Tensor`): The key tensor.
|
| 646 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 647 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 648 |
+
position_ids (`torch.Tensor`):
|
| 649 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
| 650 |
+
used to pass offsetted position ids when working with a KV-cache.
|
| 651 |
+
mrope_section(`List(int)`):
|
| 652 |
+
Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
|
| 653 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 654 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 655 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 656 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 657 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 658 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 659 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 660 |
+
Returns:
|
| 661 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 662 |
+
"""
|
| 663 |
+
mrope_section = mrope_section * 2
|
| 664 |
+
cos = torch.cat(
|
| 665 |
+
[m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1
|
| 666 |
+
).unsqueeze(unsqueeze_dim)
|
| 667 |
+
sin = torch.cat(
|
| 668 |
+
[m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1
|
| 669 |
+
).unsqueeze(unsqueeze_dim)
|
| 670 |
+
|
| 671 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 672 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 673 |
+
return q_embed, k_embed
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
class Qwen2_5_VLAttention(nn.Module):
|
| 677 |
+
"""
|
| 678 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| 679 |
+
and "Generating Long Sequences with Sparse Transformers".
|
| 680 |
+
"""
|
| 681 |
+
|
| 682 |
+
def __init__(self, config: Qwen2_5_VLTextConfig, layer_idx: Optional[int] = None):
|
| 683 |
+
super().__init__()
|
| 684 |
+
self.config = config
|
| 685 |
+
self.layer_idx = layer_idx
|
| 686 |
+
if layer_idx is None:
|
| 687 |
+
logger.warning_once(
|
| 688 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
| 689 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
| 690 |
+
"when creating this class."
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
self.hidden_size = config.hidden_size
|
| 694 |
+
self.num_heads = config.num_attention_heads
|
| 695 |
+
self.head_dim = self.hidden_size // self.num_heads
|
| 696 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 697 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 698 |
+
self.is_causal = True
|
| 699 |
+
self.attention_dropout = config.attention_dropout
|
| 700 |
+
self.rope_scaling = config.rope_scaling
|
| 701 |
+
self.scaling = self.head_dim**-0.5
|
| 702 |
+
|
| 703 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
| 704 |
+
raise ValueError(
|
| 705 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| 706 |
+
f" and `num_heads`: {self.num_heads})."
|
| 707 |
+
)
|
| 708 |
+
self.q_proj = nn.Linear(
|
| 709 |
+
self.hidden_size, self.num_heads * self.head_dim, bias=True
|
| 710 |
+
)
|
| 711 |
+
self.k_proj = nn.Linear(
|
| 712 |
+
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True
|
| 713 |
+
)
|
| 714 |
+
self.v_proj = nn.Linear(
|
| 715 |
+
self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True
|
| 716 |
+
)
|
| 717 |
+
self.o_proj = nn.Linear(
|
| 718 |
+
self.num_heads * self.head_dim, self.hidden_size, bias=False
|
| 719 |
+
)
|
| 720 |
+
self.sliding_window = (
|
| 721 |
+
config.sliding_window
|
| 722 |
+
if config.layer_types[layer_idx] == "sliding_attention"
|
| 723 |
+
else None
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
self.rotary_emb = Qwen2_5_VLRotaryEmbedding(config=config)
|
| 727 |
+
|
| 728 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 729 |
+
def forward(
|
| 730 |
+
self,
|
| 731 |
+
hidden_states: torch.Tensor,
|
| 732 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 733 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 734 |
+
past_key_values: Optional[Cache] = None,
|
| 735 |
+
output_attentions: bool = False,
|
| 736 |
+
use_cache: bool = False,
|
| 737 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 738 |
+
position_embeddings: Optional[
|
| 739 |
+
tuple[torch.Tensor, torch.Tensor]
|
| 740 |
+
] = None, # necessary, but kept here for BC
|
| 741 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 742 |
+
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
|
| 743 |
+
bsz, q_len, _ = hidden_states.size()
|
| 744 |
+
|
| 745 |
+
query_states = self.q_proj(hidden_states)
|
| 746 |
+
key_states = self.k_proj(hidden_states)
|
| 747 |
+
value_states = self.v_proj(hidden_states)
|
| 748 |
+
|
| 749 |
+
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
|
| 750 |
+
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
|
| 751 |
+
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
|
| 752 |
+
|
| 753 |
+
cos, sin = position_embeddings
|
| 754 |
+
query_states, key_states = apply_multimodal_rotary_pos_emb(
|
| 755 |
+
query_states, key_states, cos, sin, self.rope_scaling["mrope_section"]
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
if past_key_values is not None:
|
| 759 |
+
cache_kwargs = {
|
| 760 |
+
"sin": sin,
|
| 761 |
+
"cos": cos,
|
| 762 |
+
"cache_position": cache_position,
|
| 763 |
+
} # Specific to RoPE models
|
| 764 |
+
key_states, value_states = past_key_values.update(
|
| 765 |
+
key_states, value_states, self.layer_idx, cache_kwargs
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
attention_interface: Callable = eager_attention_forward
|
| 769 |
+
if self.config._attn_implementation != "eager":
|
| 770 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[
|
| 771 |
+
self.config._attn_implementation
|
| 772 |
+
]
|
| 773 |
+
|
| 774 |
+
attn_output, attn_weights = attention_interface(
|
| 775 |
+
self,
|
| 776 |
+
query_states,
|
| 777 |
+
key_states,
|
| 778 |
+
value_states,
|
| 779 |
+
attention_mask,
|
| 780 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 781 |
+
scaling=self.scaling,
|
| 782 |
+
sliding_window=self.sliding_window,
|
| 783 |
+
position_ids=position_ids, # pass positions for FA2
|
| 784 |
+
**kwargs,
|
| 785 |
+
)
|
| 786 |
+
|
| 787 |
+
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
|
| 788 |
+
attn_output = self.o_proj(attn_output)
|
| 789 |
+
return attn_output, attn_weights
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
class Qwen2_5_VLDecoderLayer(GradientCheckpointingLayer):
|
| 793 |
+
def __init__(self, config: Qwen2_5_VLTextConfig, layer_idx: int):
|
| 794 |
+
super().__init__()
|
| 795 |
+
self.hidden_size = config.hidden_size
|
| 796 |
+
|
| 797 |
+
if (
|
| 798 |
+
config.use_sliding_window
|
| 799 |
+
and config._attn_implementation != "flash_attention_2"
|
| 800 |
+
):
|
| 801 |
+
logger.warning_once(
|
| 802 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
| 803 |
+
"unexpected results may be encountered."
|
| 804 |
+
)
|
| 805 |
+
self.self_attn = Qwen2_5_VLAttention(config, layer_idx)
|
| 806 |
+
|
| 807 |
+
self.mlp = Qwen2MLP(config)
|
| 808 |
+
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 809 |
+
self.post_attention_layernorm = Qwen2RMSNorm(
|
| 810 |
+
config.hidden_size, eps=config.rms_norm_eps
|
| 811 |
+
)
|
| 812 |
+
self.attention_type = config.layer_types[layer_idx]
|
| 813 |
+
|
| 814 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 815 |
+
def forward(
|
| 816 |
+
self,
|
| 817 |
+
hidden_states: torch.Tensor,
|
| 818 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 819 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 820 |
+
past_key_values: Optional[tuple[torch.Tensor]] = None,
|
| 821 |
+
output_attentions: Optional[bool] = False,
|
| 822 |
+
use_cache: Optional[bool] = False,
|
| 823 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 824 |
+
position_embeddings: Optional[
|
| 825 |
+
tuple[torch.Tensor, torch.Tensor]
|
| 826 |
+
] = None, # necessary, but kept here for BC
|
| 827 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 828 |
+
) -> tuple[
|
| 829 |
+
torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]
|
| 830 |
+
]:
|
| 831 |
+
"""
|
| 832 |
+
Args:
|
| 833 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| 834 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| 835 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
| 836 |
+
output_attentions (`bool`, *optional*):
|
| 837 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| 838 |
+
returned tensors for more detail.
|
| 839 |
+
use_cache (`bool`, *optional*):
|
| 840 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| 841 |
+
(see `past_key_values`).
|
| 842 |
+
past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| 843 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| 844 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
| 845 |
+
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
|
| 846 |
+
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
|
| 847 |
+
with `head_dim` being the embedding dimension of each attention head.
|
| 848 |
+
kwargs (`dict`, *optional*):
|
| 849 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
| 850 |
+
into the model
|
| 851 |
+
"""
|
| 852 |
+
|
| 853 |
+
residual = hidden_states
|
| 854 |
+
|
| 855 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 856 |
+
|
| 857 |
+
# Self Attention
|
| 858 |
+
hidden_states, self_attn_weights = self.self_attn(
|
| 859 |
+
hidden_states=hidden_states,
|
| 860 |
+
attention_mask=attention_mask,
|
| 861 |
+
position_ids=position_ids,
|
| 862 |
+
past_key_values=past_key_values,
|
| 863 |
+
output_attentions=output_attentions,
|
| 864 |
+
use_cache=use_cache,
|
| 865 |
+
cache_position=cache_position,
|
| 866 |
+
position_embeddings=position_embeddings,
|
| 867 |
+
**kwargs,
|
| 868 |
+
)
|
| 869 |
+
hidden_states = residual + hidden_states
|
| 870 |
+
|
| 871 |
+
# Fully Connected
|
| 872 |
+
residual = hidden_states
|
| 873 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 874 |
+
hidden_states = self.mlp(hidden_states)
|
| 875 |
+
hidden_states = residual + hidden_states
|
| 876 |
+
|
| 877 |
+
outputs = (hidden_states,)
|
| 878 |
+
|
| 879 |
+
if output_attentions:
|
| 880 |
+
outputs += (self_attn_weights,)
|
| 881 |
+
|
| 882 |
+
return outputs
|
| 883 |
+
|
| 884 |
+
|
| 885 |
+
@auto_docstring
|
| 886 |
+
class Qwen2_5_VLTextModel(Qwen2_5_VLPreTrainedModel):
|
| 887 |
+
config: Qwen2_5_VLTextConfig
|
| 888 |
+
|
| 889 |
+
def __init__(self, config: Qwen2_5_VLTextConfig):
|
| 890 |
+
super().__init__(config)
|
| 891 |
+
self.padding_idx = config.pad_token_id
|
| 892 |
+
self.vocab_size = config.vocab_size
|
| 893 |
+
|
| 894 |
+
self.embed_tokens = nn.Embedding(
|
| 895 |
+
config.vocab_size, config.hidden_size, self.padding_idx
|
| 896 |
+
)
|
| 897 |
+
self.layers = nn.ModuleList(
|
| 898 |
+
[
|
| 899 |
+
Qwen2_5_VLDecoderLayer(config, layer_idx)
|
| 900 |
+
for layer_idx in range(config.num_hidden_layers)
|
| 901 |
+
]
|
| 902 |
+
)
|
| 903 |
+
self._attn_implementation = config._attn_implementation
|
| 904 |
+
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 905 |
+
self.rotary_emb = Qwen2_5_VLRotaryEmbedding(config=config)
|
| 906 |
+
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
|
| 907 |
+
|
| 908 |
+
self.gradient_checkpointing = False
|
| 909 |
+
# Initialize weights and apply final processing
|
| 910 |
+
self.post_init()
|
| 911 |
+
|
| 912 |
+
@auto_docstring
|
| 913 |
+
def forward(
|
| 914 |
+
self,
|
| 915 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 916 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 917 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 918 |
+
past_key_values: Optional[Cache] = None,
|
| 919 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 920 |
+
use_cache: Optional[bool] = None,
|
| 921 |
+
output_attentions: Optional[bool] = None,
|
| 922 |
+
output_hidden_states: Optional[bool] = None,
|
| 923 |
+
return_dict: Optional[bool] = None,
|
| 924 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 925 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 926 |
+
) -> Union[tuple, BaseModelOutputWithPast]:
|
| 927 |
+
output_attentions = (
|
| 928 |
+
output_attentions
|
| 929 |
+
if output_attentions is not None
|
| 930 |
+
else self.config.output_attentions
|
| 931 |
+
)
|
| 932 |
+
output_hidden_states = (
|
| 933 |
+
output_hidden_states
|
| 934 |
+
if output_hidden_states is not None
|
| 935 |
+
else self.config.output_hidden_states
|
| 936 |
+
)
|
| 937 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 938 |
+
|
| 939 |
+
return_dict = (
|
| 940 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 941 |
+
)
|
| 942 |
+
|
| 943 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 944 |
+
raise ValueError(
|
| 945 |
+
"You must specify exactly one of input_ids or inputs_embeds"
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
if self.gradient_checkpointing and self.training:
|
| 949 |
+
if use_cache:
|
| 950 |
+
logger.warning_once(
|
| 951 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 952 |
+
)
|
| 953 |
+
use_cache = False
|
| 954 |
+
|
| 955 |
+
# torch.jit.trace() doesn't support cache objects in the output
|
| 956 |
+
if use_cache and past_key_values is None and not torch.jit.is_tracing():
|
| 957 |
+
past_key_values = DynamicCache()
|
| 958 |
+
|
| 959 |
+
if inputs_embeds is None:
|
| 960 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 961 |
+
|
| 962 |
+
if cache_position is None:
|
| 963 |
+
past_seen_tokens = (
|
| 964 |
+
past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 965 |
+
)
|
| 966 |
+
cache_position = torch.arange(
|
| 967 |
+
past_seen_tokens,
|
| 968 |
+
past_seen_tokens + inputs_embeds.shape[1],
|
| 969 |
+
device=inputs_embeds.device,
|
| 970 |
+
)
|
| 971 |
+
|
| 972 |
+
# the hard coded `3` is for temporal, height and width.
|
| 973 |
+
if position_ids is None:
|
| 974 |
+
position_ids = cache_position.view(1, 1, -1).expand(
|
| 975 |
+
3, inputs_embeds.shape[0], -1
|
| 976 |
+
)
|
| 977 |
+
elif position_ids.ndim == 2:
|
| 978 |
+
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
|
| 979 |
+
|
| 980 |
+
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
|
| 981 |
+
# where each dim indicates visual spatial positions for temporal/height/width grids.
|
| 982 |
+
# There are two scenarios when FA2-like packed masking might be activated.
|
| 983 |
+
# 1. User specifically passed packed `position_ids` and no attention mask.
|
| 984 |
+
# In this case we expect the useer to create correct position ids for all 3 grids
|
| 985 |
+
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
|
| 986 |
+
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
|
| 987 |
+
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
|
| 988 |
+
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
|
| 989 |
+
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
|
| 990 |
+
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
|
| 991 |
+
text_position_ids = position_ids[0]
|
| 992 |
+
position_ids = position_ids[1:]
|
| 993 |
+
else:
|
| 994 |
+
text_position_ids = None
|
| 995 |
+
|
| 996 |
+
# It may already have been prepared by e.g. `generate`
|
| 997 |
+
if not isinstance(causal_mask_mapping := attention_mask, dict):
|
| 998 |
+
# Prepare mask arguments
|
| 999 |
+
mask_kwargs = {
|
| 1000 |
+
"config": self.config,
|
| 1001 |
+
"input_embeds": inputs_embeds,
|
| 1002 |
+
"attention_mask": attention_mask,
|
| 1003 |
+
"cache_position": cache_position,
|
| 1004 |
+
"past_key_values": past_key_values,
|
| 1005 |
+
"position_ids": text_position_ids,
|
| 1006 |
+
}
|
| 1007 |
+
# Create the masks
|
| 1008 |
+
causal_mask_mapping = {
|
| 1009 |
+
"full_attention": create_causal_mask(**mask_kwargs),
|
| 1010 |
+
}
|
| 1011 |
+
# The sliding window alternating layers are not always activated depending on the config
|
| 1012 |
+
if self.has_sliding_layers:
|
| 1013 |
+
causal_mask_mapping["sliding_attention"] = (
|
| 1014 |
+
create_sliding_window_causal_mask(**mask_kwargs)
|
| 1015 |
+
)
|
| 1016 |
+
|
| 1017 |
+
hidden_states = inputs_embeds
|
| 1018 |
+
|
| 1019 |
+
# create position embeddings to be shared across the decoder layers
|
| 1020 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 1021 |
+
|
| 1022 |
+
# decoder layers
|
| 1023 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1024 |
+
all_self_attns = () if output_attentions else None
|
| 1025 |
+
|
| 1026 |
+
for decoder_layer in self.layers:
|
| 1027 |
+
if output_hidden_states:
|
| 1028 |
+
all_hidden_states += (hidden_states,)
|
| 1029 |
+
|
| 1030 |
+
layer_outputs = decoder_layer(
|
| 1031 |
+
hidden_states,
|
| 1032 |
+
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
|
| 1033 |
+
position_ids=text_position_ids,
|
| 1034 |
+
past_key_values=past_key_values,
|
| 1035 |
+
output_attentions=output_attentions,
|
| 1036 |
+
use_cache=use_cache,
|
| 1037 |
+
cache_position=cache_position,
|
| 1038 |
+
position_embeddings=position_embeddings,
|
| 1039 |
+
**kwargs,
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
hidden_states = layer_outputs[0]
|
| 1043 |
+
|
| 1044 |
+
if output_attentions:
|
| 1045 |
+
all_self_attns += (layer_outputs[1],)
|
| 1046 |
+
|
| 1047 |
+
hidden_states = self.norm(hidden_states)
|
| 1048 |
+
|
| 1049 |
+
# add hidden states from the last decoder layer
|
| 1050 |
+
if output_hidden_states:
|
| 1051 |
+
all_hidden_states += (hidden_states,)
|
| 1052 |
+
|
| 1053 |
+
if not return_dict:
|
| 1054 |
+
return tuple(
|
| 1055 |
+
v
|
| 1056 |
+
for v in [
|
| 1057 |
+
hidden_states,
|
| 1058 |
+
past_key_values,
|
| 1059 |
+
all_hidden_states,
|
| 1060 |
+
all_self_attns,
|
| 1061 |
+
]
|
| 1062 |
+
if v is not None
|
| 1063 |
+
)
|
| 1064 |
+
return BaseModelOutputWithPast(
|
| 1065 |
+
last_hidden_state=hidden_states,
|
| 1066 |
+
past_key_values=past_key_values,
|
| 1067 |
+
hidden_states=all_hidden_states,
|
| 1068 |
+
attentions=all_self_attns,
|
| 1069 |
+
)
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
@auto_docstring
|
| 1073 |
+
class Qwen2_5_VLModel(Qwen2_5_VLPreTrainedModel):
|
| 1074 |
+
base_model_prefix = ""
|
| 1075 |
+
_checkpoint_conversion_mapping = {"^model": "language_model"}
|
| 1076 |
+
# Reference: fix gemma3 grad acc #37208
|
| 1077 |
+
accepts_loss_kwargs = False
|
| 1078 |
+
config: Qwen2_5_VLConfig
|
| 1079 |
+
_no_split_modules = ["Qwen2_5_VLDecoderLayer", "Qwen2_5_VLVisionBlock"]
|
| 1080 |
+
|
| 1081 |
+
def __init__(self, config):
|
| 1082 |
+
super().__init__(config)
|
| 1083 |
+
self.visual = Qwen2_5_VisionTransformerPretrainedModel._from_config(
|
| 1084 |
+
config.vision_config
|
| 1085 |
+
)
|
| 1086 |
+
self.language_model = Qwen2_5_VLTextModel._from_config(config.text_config)
|
| 1087 |
+
self.rope_deltas = None # cache rope_deltas here
|
| 1088 |
+
|
| 1089 |
+
# Initialize weights and apply final processing
|
| 1090 |
+
self.post_init()
|
| 1091 |
+
|
| 1092 |
+
def get_input_embeddings(self):
|
| 1093 |
+
return self.language_model.get_input_embeddings()
|
| 1094 |
+
|
| 1095 |
+
def set_input_embeddings(self, value):
|
| 1096 |
+
self.language_model.set_input_embeddings(value)
|
| 1097 |
+
|
| 1098 |
+
def set_decoder(self, decoder):
|
| 1099 |
+
self.language_model = decoder
|
| 1100 |
+
|
| 1101 |
+
def get_decoder(self):
|
| 1102 |
+
return self.language_model
|
| 1103 |
+
|
| 1104 |
+
def get_rope_index(
|
| 1105 |
+
self,
|
| 1106 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1107 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1108 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1109 |
+
second_per_grid_ts: Optional[torch.Tensor] = None,
|
| 1110 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1111 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1112 |
+
"""
|
| 1113 |
+
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
|
| 1114 |
+
|
| 1115 |
+
Explanation:
|
| 1116 |
+
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
|
| 1117 |
+
|
| 1118 |
+
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
|
| 1119 |
+
Examples:
|
| 1120 |
+
input_ids: [T T T T T], here T is for text.
|
| 1121 |
+
temporal position_ids: [0, 1, 2, 3, 4]
|
| 1122 |
+
height position_ids: [0, 1, 2, 3, 4]
|
| 1123 |
+
width position_ids: [0, 1, 2, 3, 4]
|
| 1124 |
+
|
| 1125 |
+
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
|
| 1126 |
+
and 1D rotary position embedding for text part.
|
| 1127 |
+
Examples:
|
| 1128 |
+
Temporal (Time): 3 patches, representing different segments of the video in time.
|
| 1129 |
+
Height: 2 patches, dividing each frame vertically.
|
| 1130 |
+
Width: 2 patches, dividing each frame horizontally.
|
| 1131 |
+
We also have some important parameters:
|
| 1132 |
+
fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
|
| 1133 |
+
tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
|
| 1134 |
+
temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
|
| 1135 |
+
interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
|
| 1136 |
+
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
|
| 1137 |
+
vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
|
| 1138 |
+
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
|
| 1139 |
+
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
|
| 1140 |
+
text temporal position_ids: [101, 102, 103, 104, 105]
|
| 1141 |
+
text height position_ids: [101, 102, 103, 104, 105]
|
| 1142 |
+
text width position_ids: [101, 102, 103, 104, 105]
|
| 1143 |
+
Here we calculate the text start position_ids as the max vision position_ids plus 1.
|
| 1144 |
+
|
| 1145 |
+
Args:
|
| 1146 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1147 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| 1148 |
+
it.
|
| 1149 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1150 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1151 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1152 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1153 |
+
second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):
|
| 1154 |
+
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
|
| 1155 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1156 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1157 |
+
|
| 1158 |
+
- 1 for tokens that are **not masked**,
|
| 1159 |
+
- 0 for tokens that are **masked**.
|
| 1160 |
+
|
| 1161 |
+
Returns:
|
| 1162 |
+
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
|
| 1163 |
+
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
|
| 1164 |
+
"""
|
| 1165 |
+
spatial_merge_size = self.config.vision_config.spatial_merge_size
|
| 1166 |
+
image_token_id = self.config.image_token_id
|
| 1167 |
+
video_token_id = self.config.video_token_id
|
| 1168 |
+
vision_start_token_id = self.config.vision_start_token_id
|
| 1169 |
+
mrope_position_deltas = []
|
| 1170 |
+
if input_ids is not None and (
|
| 1171 |
+
image_grid_thw is not None or video_grid_thw is not None
|
| 1172 |
+
):
|
| 1173 |
+
total_input_ids = input_ids
|
| 1174 |
+
if attention_mask is None:
|
| 1175 |
+
attention_mask = torch.ones_like(total_input_ids)
|
| 1176 |
+
position_ids = torch.ones(
|
| 1177 |
+
3,
|
| 1178 |
+
input_ids.shape[0],
|
| 1179 |
+
input_ids.shape[1],
|
| 1180 |
+
dtype=input_ids.dtype,
|
| 1181 |
+
device=input_ids.device,
|
| 1182 |
+
)
|
| 1183 |
+
image_index, video_index = 0, 0
|
| 1184 |
+
attention_mask = attention_mask.to(total_input_ids.device)
|
| 1185 |
+
for i, input_ids in enumerate(total_input_ids):
|
| 1186 |
+
input_ids = input_ids[attention_mask[i] == 1]
|
| 1187 |
+
image_nums, video_nums = 0, 0
|
| 1188 |
+
vision_start_indices = torch.argwhere(
|
| 1189 |
+
input_ids == vision_start_token_id
|
| 1190 |
+
).squeeze(1)
|
| 1191 |
+
vision_tokens = input_ids[vision_start_indices + 1]
|
| 1192 |
+
image_nums = (vision_tokens == image_token_id).sum()
|
| 1193 |
+
video_nums = (vision_tokens == video_token_id).sum()
|
| 1194 |
+
input_tokens = input_ids.tolist()
|
| 1195 |
+
llm_pos_ids_list: list = []
|
| 1196 |
+
st = 0
|
| 1197 |
+
remain_images, remain_videos = image_nums, video_nums
|
| 1198 |
+
for _ in range(image_nums + video_nums):
|
| 1199 |
+
if image_token_id in input_tokens and remain_images > 0:
|
| 1200 |
+
ed_image = input_tokens.index(image_token_id, st)
|
| 1201 |
+
else:
|
| 1202 |
+
ed_image = len(input_tokens) + 1
|
| 1203 |
+
if video_token_id in input_tokens and remain_videos > 0:
|
| 1204 |
+
ed_video = input_tokens.index(video_token_id, st)
|
| 1205 |
+
else:
|
| 1206 |
+
ed_video = len(input_tokens) + 1
|
| 1207 |
+
if ed_image < ed_video:
|
| 1208 |
+
t, h, w = (
|
| 1209 |
+
image_grid_thw[image_index][0],
|
| 1210 |
+
image_grid_thw[image_index][1],
|
| 1211 |
+
image_grid_thw[image_index][2],
|
| 1212 |
+
)
|
| 1213 |
+
second_per_grid_t = 0
|
| 1214 |
+
image_index += 1
|
| 1215 |
+
remain_images -= 1
|
| 1216 |
+
ed = ed_image
|
| 1217 |
+
|
| 1218 |
+
else:
|
| 1219 |
+
t, h, w = (
|
| 1220 |
+
video_grid_thw[video_index][0],
|
| 1221 |
+
video_grid_thw[video_index][1],
|
| 1222 |
+
video_grid_thw[video_index][2],
|
| 1223 |
+
)
|
| 1224 |
+
if second_per_grid_ts is not None:
|
| 1225 |
+
second_per_grid_t = second_per_grid_ts[video_index]
|
| 1226 |
+
else:
|
| 1227 |
+
second_per_grid_t = 1.0
|
| 1228 |
+
video_index += 1
|
| 1229 |
+
remain_videos -= 1
|
| 1230 |
+
ed = ed_video
|
| 1231 |
+
llm_grid_t, llm_grid_h, llm_grid_w = (
|
| 1232 |
+
t.item(),
|
| 1233 |
+
h.item() // spatial_merge_size,
|
| 1234 |
+
w.item() // spatial_merge_size,
|
| 1235 |
+
)
|
| 1236 |
+
text_len = ed - st
|
| 1237 |
+
|
| 1238 |
+
st_idx = (
|
| 1239 |
+
llm_pos_ids_list[-1].max() + 1
|
| 1240 |
+
if len(llm_pos_ids_list) > 0
|
| 1241 |
+
else 0
|
| 1242 |
+
)
|
| 1243 |
+
llm_pos_ids_list.append(
|
| 1244 |
+
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
|
| 1245 |
+
)
|
| 1246 |
+
|
| 1247 |
+
range_tensor = torch.arange(llm_grid_t).view(-1, 1)
|
| 1248 |
+
expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w)
|
| 1249 |
+
|
| 1250 |
+
## normalize type, send to device.
|
| 1251 |
+
second_per_grid_t = torch.as_tensor(
|
| 1252 |
+
second_per_grid_t,
|
| 1253 |
+
dtype=range_tensor.dtype,
|
| 1254 |
+
device=range_tensor.device,
|
| 1255 |
+
)
|
| 1256 |
+
|
| 1257 |
+
time_tensor = (
|
| 1258 |
+
expanded_range
|
| 1259 |
+
* second_per_grid_t
|
| 1260 |
+
* self.config.vision_config.tokens_per_second
|
| 1261 |
+
)
|
| 1262 |
+
|
| 1263 |
+
time_tensor_long = time_tensor.long()
|
| 1264 |
+
t_index = time_tensor_long.flatten()
|
| 1265 |
+
|
| 1266 |
+
h_index = (
|
| 1267 |
+
torch.arange(llm_grid_h)
|
| 1268 |
+
.view(1, -1, 1)
|
| 1269 |
+
.expand(llm_grid_t, -1, llm_grid_w)
|
| 1270 |
+
.flatten()
|
| 1271 |
+
)
|
| 1272 |
+
w_index = (
|
| 1273 |
+
torch.arange(llm_grid_w)
|
| 1274 |
+
.view(1, 1, -1)
|
| 1275 |
+
.expand(llm_grid_t, llm_grid_h, -1)
|
| 1276 |
+
.flatten()
|
| 1277 |
+
)
|
| 1278 |
+
llm_pos_ids_list.append(
|
| 1279 |
+
torch.stack([t_index, h_index, w_index]) + text_len + st_idx
|
| 1280 |
+
)
|
| 1281 |
+
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
|
| 1282 |
+
|
| 1283 |
+
if st < len(input_tokens):
|
| 1284 |
+
st_idx = (
|
| 1285 |
+
llm_pos_ids_list[-1].max() + 1
|
| 1286 |
+
if len(llm_pos_ids_list) > 0
|
| 1287 |
+
else 0
|
| 1288 |
+
)
|
| 1289 |
+
text_len = len(input_tokens) - st
|
| 1290 |
+
llm_pos_ids_list.append(
|
| 1291 |
+
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
|
| 1292 |
+
)
|
| 1293 |
+
|
| 1294 |
+
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
|
| 1295 |
+
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(
|
| 1296 |
+
position_ids.device
|
| 1297 |
+
)
|
| 1298 |
+
mrope_position_deltas.append(
|
| 1299 |
+
llm_positions.max() + 1 - len(total_input_ids[i])
|
| 1300 |
+
)
|
| 1301 |
+
mrope_position_deltas = torch.tensor(
|
| 1302 |
+
mrope_position_deltas, device=input_ids.device
|
| 1303 |
+
).unsqueeze(1)
|
| 1304 |
+
return position_ids, mrope_position_deltas
|
| 1305 |
+
else:
|
| 1306 |
+
if attention_mask is not None:
|
| 1307 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1308 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1309 |
+
position_ids = (
|
| 1310 |
+
position_ids.unsqueeze(0)
|
| 1311 |
+
.expand(3, -1, -1)
|
| 1312 |
+
.to(attention_mask.device)
|
| 1313 |
+
)
|
| 1314 |
+
max_position_ids = position_ids.max(0, keepdim=False)[0].max(
|
| 1315 |
+
-1, keepdim=True
|
| 1316 |
+
)[0]
|
| 1317 |
+
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
|
| 1318 |
+
else:
|
| 1319 |
+
position_ids = (
|
| 1320 |
+
torch.arange(input_ids.shape[1], device=input_ids.device)
|
| 1321 |
+
.view(1, 1, -1)
|
| 1322 |
+
.expand(3, input_ids.shape[0], -1)
|
| 1323 |
+
)
|
| 1324 |
+
mrope_position_deltas = torch.zeros(
|
| 1325 |
+
[input_ids.shape[0], 1],
|
| 1326 |
+
device=input_ids.device,
|
| 1327 |
+
dtype=input_ids.dtype,
|
| 1328 |
+
)
|
| 1329 |
+
|
| 1330 |
+
return position_ids, mrope_position_deltas
|
| 1331 |
+
|
| 1332 |
+
def get_video_features(
|
| 1333 |
+
self,
|
| 1334 |
+
pixel_values_videos: torch.FloatTensor,
|
| 1335 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1336 |
+
):
|
| 1337 |
+
"""
|
| 1338 |
+
Encodes videos into continuous embeddings that can be forwarded to the language model.
|
| 1339 |
+
|
| 1340 |
+
Args:
|
| 1341 |
+
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
| 1342 |
+
The tensors corresponding to the input videos.
|
| 1343 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1344 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1345 |
+
"""
|
| 1346 |
+
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
|
| 1347 |
+
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
|
| 1348 |
+
split_sizes = (
|
| 1349 |
+
video_grid_thw.prod(-1) // self.visual.spatial_merge_size**2
|
| 1350 |
+
).tolist()
|
| 1351 |
+
video_embeds = torch.split(video_embeds, split_sizes)
|
| 1352 |
+
return video_embeds
|
| 1353 |
+
|
| 1354 |
+
def get_image_features(
|
| 1355 |
+
self,
|
| 1356 |
+
pixel_values: torch.FloatTensor,
|
| 1357 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1358 |
+
):
|
| 1359 |
+
"""
|
| 1360 |
+
Encodes images into continuous embeddings that can be forwarded to the language model.
|
| 1361 |
+
|
| 1362 |
+
Args:
|
| 1363 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
| 1364 |
+
The tensors corresponding to the input images.
|
| 1365 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1366 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1367 |
+
"""
|
| 1368 |
+
pixel_values = pixel_values.type(self.visual.dtype)
|
| 1369 |
+
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
|
| 1370 |
+
split_sizes = (
|
| 1371 |
+
image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2
|
| 1372 |
+
).tolist()
|
| 1373 |
+
image_embeds = torch.split(image_embeds, split_sizes)
|
| 1374 |
+
return image_embeds
|
| 1375 |
+
|
| 1376 |
+
def get_placeholder_mask(
|
| 1377 |
+
self,
|
| 1378 |
+
input_ids: torch.LongTensor,
|
| 1379 |
+
inputs_embeds: torch.FloatTensor,
|
| 1380 |
+
image_features: torch.FloatTensor = None,
|
| 1381 |
+
video_features: torch.FloatTensor = None,
|
| 1382 |
+
):
|
| 1383 |
+
"""
|
| 1384 |
+
Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
|
| 1385 |
+
equal to the length of multimodal features. If the lengths are different, an error is raised.
|
| 1386 |
+
"""
|
| 1387 |
+
if input_ids is None:
|
| 1388 |
+
special_image_mask = inputs_embeds == self.get_input_embeddings()(
|
| 1389 |
+
torch.tensor(
|
| 1390 |
+
self.config.image_token_id,
|
| 1391 |
+
dtype=torch.long,
|
| 1392 |
+
device=inputs_embeds.device,
|
| 1393 |
+
)
|
| 1394 |
+
)
|
| 1395 |
+
special_image_mask = special_image_mask.all(-1)
|
| 1396 |
+
special_video_mask = inputs_embeds == self.get_input_embeddings()(
|
| 1397 |
+
torch.tensor(
|
| 1398 |
+
self.config.video_token_id,
|
| 1399 |
+
dtype=torch.long,
|
| 1400 |
+
device=inputs_embeds.device,
|
| 1401 |
+
)
|
| 1402 |
+
)
|
| 1403 |
+
special_video_mask = special_video_mask.all(-1)
|
| 1404 |
+
else:
|
| 1405 |
+
special_image_mask = input_ids == self.config.image_token_id
|
| 1406 |
+
special_video_mask = input_ids == self.config.video_token_id
|
| 1407 |
+
|
| 1408 |
+
n_image_tokens = special_image_mask.sum()
|
| 1409 |
+
special_image_mask = (
|
| 1410 |
+
special_image_mask.unsqueeze(-1)
|
| 1411 |
+
.expand_as(inputs_embeds)
|
| 1412 |
+
.to(inputs_embeds.device)
|
| 1413 |
+
)
|
| 1414 |
+
if (
|
| 1415 |
+
image_features is not None
|
| 1416 |
+
and inputs_embeds[special_image_mask].numel() != image_features.numel()
|
| 1417 |
+
):
|
| 1418 |
+
raise ValueError(
|
| 1419 |
+
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
|
| 1420 |
+
)
|
| 1421 |
+
|
| 1422 |
+
n_video_tokens = special_video_mask.sum()
|
| 1423 |
+
special_video_mask = (
|
| 1424 |
+
special_video_mask.unsqueeze(-1)
|
| 1425 |
+
.expand_as(inputs_embeds)
|
| 1426 |
+
.to(inputs_embeds.device)
|
| 1427 |
+
)
|
| 1428 |
+
if (
|
| 1429 |
+
video_features is not None
|
| 1430 |
+
and inputs_embeds[special_video_mask].numel() != video_features.numel()
|
| 1431 |
+
):
|
| 1432 |
+
raise ValueError(
|
| 1433 |
+
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
|
| 1434 |
+
)
|
| 1435 |
+
|
| 1436 |
+
return special_image_mask, special_video_mask
|
| 1437 |
+
|
| 1438 |
+
@auto_docstring
|
| 1439 |
+
def forward(
|
| 1440 |
+
self,
|
| 1441 |
+
input_ids: torch.LongTensor = None,
|
| 1442 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1443 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1444 |
+
past_key_values: Optional[Cache] = None,
|
| 1445 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1446 |
+
use_cache: Optional[bool] = None,
|
| 1447 |
+
output_attentions: Optional[bool] = None,
|
| 1448 |
+
output_hidden_states: Optional[bool] = None,
|
| 1449 |
+
return_dict: Optional[bool] = None,
|
| 1450 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1451 |
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
| 1452 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1453 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1454 |
+
rope_deltas: Optional[torch.LongTensor] = None,
|
| 1455 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1456 |
+
second_per_grid_ts: Optional[torch.Tensor] = None,
|
| 1457 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1458 |
+
) -> Union[tuple, Qwen2_5_VLModelOutputWithPast]:
|
| 1459 |
+
r"""
|
| 1460 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1461 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1462 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1463 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1464 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 1465 |
+
The rope index difference between sequence length and multimodal rope.
|
| 1466 |
+
second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):
|
| 1467 |
+
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
|
| 1468 |
+
"""
|
| 1469 |
+
|
| 1470 |
+
output_attentions = (
|
| 1471 |
+
output_attentions
|
| 1472 |
+
if output_attentions is not None
|
| 1473 |
+
else self.config.output_attentions
|
| 1474 |
+
)
|
| 1475 |
+
output_hidden_states = (
|
| 1476 |
+
output_hidden_states
|
| 1477 |
+
if output_hidden_states is not None
|
| 1478 |
+
else self.config.output_hidden_states
|
| 1479 |
+
)
|
| 1480 |
+
return_dict = (
|
| 1481 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
| 1482 |
+
)
|
| 1483 |
+
|
| 1484 |
+
if inputs_embeds is None:
|
| 1485 |
+
inputs_embeds = self.get_input_embeddings()(input_ids)
|
| 1486 |
+
|
| 1487 |
+
if pixel_values is not None:
|
| 1488 |
+
image_embeds = self.get_image_features(pixel_values, image_grid_thw)
|
| 1489 |
+
image_embeds = torch.cat(image_embeds, dim=0).to(
|
| 1490 |
+
inputs_embeds.device, inputs_embeds.dtype
|
| 1491 |
+
)
|
| 1492 |
+
image_mask, _ = self.get_placeholder_mask(
|
| 1493 |
+
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
|
| 1494 |
+
)
|
| 1495 |
+
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
|
| 1496 |
+
|
| 1497 |
+
if pixel_values_videos is not None:
|
| 1498 |
+
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
|
| 1499 |
+
video_embeds = torch.cat(video_embeds, dim=0).to(
|
| 1500 |
+
inputs_embeds.device, inputs_embeds.dtype
|
| 1501 |
+
)
|
| 1502 |
+
_, video_mask = self.get_placeholder_mask(
|
| 1503 |
+
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
|
| 1504 |
+
)
|
| 1505 |
+
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
|
| 1506 |
+
|
| 1507 |
+
if position_ids is None:
|
| 1508 |
+
# Calculate RoPE index once per generation in the pre-fill stage only.
|
| 1509 |
+
# When compiling, we can't check tensor values thus we check only input length
|
| 1510 |
+
# It is safe to assume that `length!=1` means we're in pre-fill because compiled
|
| 1511 |
+
# models currently cannot do asssisted decoding
|
| 1512 |
+
prefill_compiled_stage = is_torchdynamo_compiling() and (
|
| 1513 |
+
(input_ids is not None and input_ids.shape[1] != 1)
|
| 1514 |
+
or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
|
| 1515 |
+
)
|
| 1516 |
+
prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
|
| 1517 |
+
(cache_position is not None and cache_position[0] == 0)
|
| 1518 |
+
or (past_key_values is None or past_key_values.get_seq_length() == 0)
|
| 1519 |
+
)
|
| 1520 |
+
if (
|
| 1521 |
+
prefill_compiled_stage or prefill_noncompiled_stage
|
| 1522 |
+
) or self.rope_deltas is None:
|
| 1523 |
+
position_ids, rope_deltas = self.get_rope_index(
|
| 1524 |
+
input_ids,
|
| 1525 |
+
image_grid_thw,
|
| 1526 |
+
video_grid_thw,
|
| 1527 |
+
second_per_grid_ts=second_per_grid_ts,
|
| 1528 |
+
attention_mask=attention_mask,
|
| 1529 |
+
)
|
| 1530 |
+
self.rope_deltas = rope_deltas
|
| 1531 |
+
else:
|
| 1532 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1533 |
+
position_ids = torch.arange(seq_length, device=inputs_embeds.device)
|
| 1534 |
+
position_ids = position_ids.view(1, 1, -1).expand(3, batch_size, -1)
|
| 1535 |
+
if cache_position is not None:
|
| 1536 |
+
delta = (cache_position[0] + self.rope_deltas).to(
|
| 1537 |
+
inputs_embeds.device
|
| 1538 |
+
)
|
| 1539 |
+
else:
|
| 1540 |
+
delta = torch.zeros(
|
| 1541 |
+
(batch_size, seq_length), device=inputs_embeds.device
|
| 1542 |
+
)
|
| 1543 |
+
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=1)
|
| 1544 |
+
position_ids = position_ids + delta.to(position_ids.device)
|
| 1545 |
+
|
| 1546 |
+
outputs = self.language_model(
|
| 1547 |
+
input_ids=None,
|
| 1548 |
+
position_ids=position_ids,
|
| 1549 |
+
attention_mask=attention_mask,
|
| 1550 |
+
past_key_values=past_key_values,
|
| 1551 |
+
inputs_embeds=inputs_embeds,
|
| 1552 |
+
use_cache=use_cache,
|
| 1553 |
+
output_attentions=output_attentions,
|
| 1554 |
+
output_hidden_states=output_hidden_states,
|
| 1555 |
+
return_dict=True,
|
| 1556 |
+
cache_position=cache_position,
|
| 1557 |
+
**kwargs,
|
| 1558 |
+
)
|
| 1559 |
+
|
| 1560 |
+
output = Qwen2_5_VLModelOutputWithPast(
|
| 1561 |
+
last_hidden_state=outputs.last_hidden_state,
|
| 1562 |
+
past_key_values=outputs.past_key_values,
|
| 1563 |
+
hidden_states=outputs.hidden_states,
|
| 1564 |
+
attentions=outputs.attentions,
|
| 1565 |
+
rope_deltas=self.rope_deltas,
|
| 1566 |
+
)
|
| 1567 |
+
return output if return_dict else output.to_tuple()
|
| 1568 |
+
|
| 1569 |
+
|
| 1570 |
+
@dataclass
|
| 1571 |
+
@auto_docstring(
|
| 1572 |
+
custom_intro="""
|
| 1573 |
+
Base class for Qwen2_5_VL causal language model (or autoregressive) outputs.
|
| 1574 |
+
"""
|
| 1575 |
+
)
|
| 1576 |
+
class Qwen2_5_VLCausalLMOutputWithPast(ModelOutput):
|
| 1577 |
+
r"""
|
| 1578 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 1579 |
+
Language modeling loss (for next-token prediction).
|
| 1580 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 1581 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 1582 |
+
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 1583 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
| 1584 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
| 1585 |
+
|
| 1586 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
| 1587 |
+
`past_key_values` input) to speed up sequential decoding.
|
| 1588 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 1589 |
+
The rope index difference between sequence length and multimodal rope.
|
| 1590 |
+
"""
|
| 1591 |
+
|
| 1592 |
+
loss: Optional[torch.FloatTensor] = None
|
| 1593 |
+
logits: Optional[torch.FloatTensor] = None
|
| 1594 |
+
past_key_values: Optional[list[torch.FloatTensor]] = None
|
| 1595 |
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
| 1596 |
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
| 1597 |
+
rope_deltas: Optional[torch.LongTensor] = None
|
| 1598 |
+
|
| 1599 |
+
|
| 1600 |
+
class Qwen2_5_VLForConditionalGeneration(Qwen2_5_VLPreTrainedModel, GenerationMixin):
|
| 1601 |
+
_checkpoint_conversion_mapping = {
|
| 1602 |
+
"^visual": "model.visual",
|
| 1603 |
+
r"^model(?!\.(language_model|visual))": "model.language_model",
|
| 1604 |
+
}
|
| 1605 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1606 |
+
# Reference: fix gemma3 grad acc #37208
|
| 1607 |
+
accepts_loss_kwargs = False
|
| 1608 |
+
|
| 1609 |
+
def __init__(self, config):
|
| 1610 |
+
super().__init__(config)
|
| 1611 |
+
self.model = Qwen2_5_VLModel(config)
|
| 1612 |
+
self.lm_head = nn.Linear(
|
| 1613 |
+
config.text_config.hidden_size, config.text_config.vocab_size, bias=False
|
| 1614 |
+
)
|
| 1615 |
+
|
| 1616 |
+
self.post_init()
|
| 1617 |
+
|
| 1618 |
+
def get_input_embeddings(self):
|
| 1619 |
+
return self.model.get_input_embeddings()
|
| 1620 |
+
|
| 1621 |
+
def set_input_embeddings(self, value):
|
| 1622 |
+
self.model.set_input_embeddings(value)
|
| 1623 |
+
|
| 1624 |
+
def set_decoder(self, decoder):
|
| 1625 |
+
self.model.set_decoder(decoder)
|
| 1626 |
+
|
| 1627 |
+
def get_decoder(self):
|
| 1628 |
+
return self.model.get_decoder()
|
| 1629 |
+
|
| 1630 |
+
def get_video_features(
|
| 1631 |
+
self,
|
| 1632 |
+
pixel_values_videos: torch.FloatTensor,
|
| 1633 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1634 |
+
):
|
| 1635 |
+
return self.model.get_video_features(pixel_values_videos, video_grid_thw)
|
| 1636 |
+
|
| 1637 |
+
def get_image_features(
|
| 1638 |
+
self,
|
| 1639 |
+
pixel_values: torch.FloatTensor,
|
| 1640 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1641 |
+
):
|
| 1642 |
+
return self.model.get_image_features(pixel_values, image_grid_thw)
|
| 1643 |
+
|
| 1644 |
+
# Make modules available through conditional class for BC
|
| 1645 |
+
@property
|
| 1646 |
+
def language_model(self):
|
| 1647 |
+
return self.model.language_model
|
| 1648 |
+
|
| 1649 |
+
@property
|
| 1650 |
+
def visual(self):
|
| 1651 |
+
return self.model.visual
|
| 1652 |
+
|
| 1653 |
+
@can_return_tuple
|
| 1654 |
+
@auto_docstring
|
| 1655 |
+
def forward(
|
| 1656 |
+
self,
|
| 1657 |
+
input_ids: torch.LongTensor = None,
|
| 1658 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1659 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1660 |
+
past_key_values: Optional[Cache] = None,
|
| 1661 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1662 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1663 |
+
use_cache: Optional[bool] = None,
|
| 1664 |
+
output_attentions: Optional[bool] = None,
|
| 1665 |
+
output_hidden_states: Optional[bool] = None,
|
| 1666 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1667 |
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
| 1668 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1669 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1670 |
+
rope_deltas: Optional[torch.LongTensor] = None,
|
| 1671 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1672 |
+
second_per_grid_ts: Optional[torch.Tensor] = None,
|
| 1673 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 1674 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1675 |
+
) -> Union[tuple, Qwen2_5_VLCausalLMOutputWithPast]:
|
| 1676 |
+
r"""
|
| 1677 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1678 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1679 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1680 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1681 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1682 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1683 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1684 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1685 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 1686 |
+
The rope index difference between sequence length and multimodal rope.
|
| 1687 |
+
second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):
|
| 1688 |
+
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
|
| 1689 |
+
|
| 1690 |
+
Example:
|
| 1691 |
+
|
| 1692 |
+
```python
|
| 1693 |
+
>>> from PIL import Image
|
| 1694 |
+
>>> import requests
|
| 1695 |
+
>>> from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
|
| 1696 |
+
|
| 1697 |
+
>>> model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
| 1698 |
+
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
| 1699 |
+
|
| 1700 |
+
>>> messages = [
|
| 1701 |
+
{
|
| 1702 |
+
"role": "user",
|
| 1703 |
+
"content": [
|
| 1704 |
+
{"type": "image"},
|
| 1705 |
+
{"type": "text", "text": "What is shown in this image?"},
|
| 1706 |
+
],
|
| 1707 |
+
},
|
| 1708 |
+
]
|
| 1709 |
+
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
|
| 1710 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 1711 |
+
|
| 1712 |
+
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 1713 |
+
>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])
|
| 1714 |
+
|
| 1715 |
+
>>> # Generate
|
| 1716 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| 1717 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 1718 |
+
"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ..."
|
| 1719 |
+
```"""
|
| 1720 |
+
|
| 1721 |
+
output_attentions = (
|
| 1722 |
+
output_attentions
|
| 1723 |
+
if output_attentions is not None
|
| 1724 |
+
else self.config.output_attentions
|
| 1725 |
+
)
|
| 1726 |
+
output_hidden_states = (
|
| 1727 |
+
output_hidden_states
|
| 1728 |
+
if output_hidden_states is not None
|
| 1729 |
+
else self.config.output_hidden_states
|
| 1730 |
+
)
|
| 1731 |
+
|
| 1732 |
+
outputs = self.model(
|
| 1733 |
+
input_ids=input_ids,
|
| 1734 |
+
pixel_values=pixel_values,
|
| 1735 |
+
pixel_values_videos=pixel_values_videos,
|
| 1736 |
+
image_grid_thw=image_grid_thw,
|
| 1737 |
+
video_grid_thw=video_grid_thw,
|
| 1738 |
+
second_per_grid_ts=second_per_grid_ts,
|
| 1739 |
+
position_ids=position_ids,
|
| 1740 |
+
attention_mask=attention_mask,
|
| 1741 |
+
past_key_values=past_key_values,
|
| 1742 |
+
inputs_embeds=inputs_embeds,
|
| 1743 |
+
use_cache=use_cache,
|
| 1744 |
+
output_attentions=output_attentions,
|
| 1745 |
+
output_hidden_states=output_hidden_states,
|
| 1746 |
+
return_dict=True,
|
| 1747 |
+
cache_position=cache_position,
|
| 1748 |
+
**kwargs,
|
| 1749 |
+
)
|
| 1750 |
+
|
| 1751 |
+
hidden_states = outputs[0]
|
| 1752 |
+
|
| 1753 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 1754 |
+
slice_indices = (
|
| 1755 |
+
slice(-logits_to_keep, None)
|
| 1756 |
+
if isinstance(logits_to_keep, int)
|
| 1757 |
+
else logits_to_keep
|
| 1758 |
+
)
|
| 1759 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 1760 |
+
|
| 1761 |
+
loss = None
|
| 1762 |
+
if labels is not None:
|
| 1763 |
+
loss = self.loss_function(
|
| 1764 |
+
logits=logits,
|
| 1765 |
+
labels=labels,
|
| 1766 |
+
vocab_size=self.config.text_config.vocab_size,
|
| 1767 |
+
**kwargs,
|
| 1768 |
+
)
|
| 1769 |
+
|
| 1770 |
+
return Qwen2_5_VLCausalLMOutputWithPast(
|
| 1771 |
+
loss=loss,
|
| 1772 |
+
logits=logits,
|
| 1773 |
+
past_key_values=outputs.past_key_values,
|
| 1774 |
+
hidden_states=outputs.hidden_states,
|
| 1775 |
+
attentions=outputs.attentions,
|
| 1776 |
+
rope_deltas=outputs.rope_deltas,
|
| 1777 |
+
)
|
| 1778 |
+
|
| 1779 |
+
def prepare_inputs_for_generation(
|
| 1780 |
+
self,
|
| 1781 |
+
input_ids,
|
| 1782 |
+
past_key_values=None,
|
| 1783 |
+
attention_mask=None,
|
| 1784 |
+
inputs_embeds=None,
|
| 1785 |
+
cache_position=None,
|
| 1786 |
+
position_ids=None,
|
| 1787 |
+
use_cache=True,
|
| 1788 |
+
pixel_values=None,
|
| 1789 |
+
pixel_values_videos=None,
|
| 1790 |
+
image_grid_thw=None,
|
| 1791 |
+
video_grid_thw=None,
|
| 1792 |
+
second_per_grid_ts=None,
|
| 1793 |
+
**kwargs,
|
| 1794 |
+
):
|
| 1795 |
+
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
|
| 1796 |
+
|
| 1797 |
+
model_inputs = super().prepare_inputs_for_generation(
|
| 1798 |
+
input_ids,
|
| 1799 |
+
past_key_values=past_key_values,
|
| 1800 |
+
attention_mask=attention_mask,
|
| 1801 |
+
inputs_embeds=inputs_embeds,
|
| 1802 |
+
cache_position=cache_position,
|
| 1803 |
+
position_ids=position_ids,
|
| 1804 |
+
pixel_values=pixel_values,
|
| 1805 |
+
pixel_values_videos=pixel_values_videos,
|
| 1806 |
+
image_grid_thw=image_grid_thw,
|
| 1807 |
+
video_grid_thw=video_grid_thw,
|
| 1808 |
+
second_per_grid_ts=second_per_grid_ts,
|
| 1809 |
+
use_cache=use_cache,
|
| 1810 |
+
**kwargs,
|
| 1811 |
+
)
|
| 1812 |
+
|
| 1813 |
+
# Qwen2-5-VL position_ids are prepared with rope_deltas
|
| 1814 |
+
if position_ids is None:
|
| 1815 |
+
# Calculate RoPE index once per generation in the pre-fill stage only.
|
| 1816 |
+
# When compiling, we can't check tensor values thus we check only input length
|
| 1817 |
+
# It is safe to assume that `length!=1` means we're in pre-fill because compiled
|
| 1818 |
+
# models currently cannot do asssisted decoding
|
| 1819 |
+
if cache_position[0] == 0 or self.model.rope_deltas is None:
|
| 1820 |
+
vision_positions, rope_deltas = self.model.get_rope_index(
|
| 1821 |
+
model_inputs.get("input_ids", None),
|
| 1822 |
+
image_grid_thw=image_grid_thw,
|
| 1823 |
+
video_grid_thw=video_grid_thw,
|
| 1824 |
+
second_per_grid_ts=second_per_grid_ts,
|
| 1825 |
+
attention_mask=attention_mask,
|
| 1826 |
+
)
|
| 1827 |
+
self.model.rope_deltas = rope_deltas
|
| 1828 |
+
# then use the prev pre-calculated rope-deltas to get the correct position ids
|
| 1829 |
+
elif "position_ids" in model_inputs:
|
| 1830 |
+
batch_size, seq_length = model_inputs["position_ids"].shape
|
| 1831 |
+
device = model_inputs["position_ids"].device
|
| 1832 |
+
position_ids = torch.arange(seq_length, device=device)
|
| 1833 |
+
position_ids = position_ids.view(1, 1, -1).expand(3, batch_size, -1)
|
| 1834 |
+
delta = cache_position[0] + self.model.rope_deltas
|
| 1835 |
+
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
|
| 1836 |
+
vision_positions = position_ids + delta.expand_as(position_ids)
|
| 1837 |
+
|
| 1838 |
+
# Concatenate "text + vision" positions into [4, bs, seq-len]
|
| 1839 |
+
text_positions = model_inputs["position_ids"][None, ...]
|
| 1840 |
+
model_inputs["position_ids"] = torch.cat(
|
| 1841 |
+
[text_positions, vision_positions], dim=0
|
| 1842 |
+
)
|
| 1843 |
+
|
| 1844 |
+
if cache_position[0] != 0:
|
| 1845 |
+
model_inputs["pixel_values"] = None
|
| 1846 |
+
model_inputs["pixel_values_videos"] = None
|
| 1847 |
+
|
| 1848 |
+
return model_inputs
|
| 1849 |
+
|
| 1850 |
+
def _get_image_nums_and_video_nums(
|
| 1851 |
+
self,
|
| 1852 |
+
input_ids: Optional[torch.LongTensor],
|
| 1853 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1854 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1855 |
+
"""
|
| 1856 |
+
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
|
| 1857 |
+
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
|
| 1858 |
+
|
| 1859 |
+
Args:
|
| 1860 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1861 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1862 |
+
|
| 1863 |
+
Returns:
|
| 1864 |
+
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
|
| 1865 |
+
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
|
| 1866 |
+
"""
|
| 1867 |
+
image_token_id = self.config.image_token_id
|
| 1868 |
+
video_token_id = self.config.video_token_id
|
| 1869 |
+
vision_start_token_id = self.config.vision_start_token_id
|
| 1870 |
+
|
| 1871 |
+
if inputs_embeds is not None:
|
| 1872 |
+
vision_start_mask = (
|
| 1873 |
+
inputs_embeds
|
| 1874 |
+
== self.get_input_embeddings()(
|
| 1875 |
+
torch.tensor(
|
| 1876 |
+
vision_start_token_id,
|
| 1877 |
+
dtype=torch.long,
|
| 1878 |
+
device=inputs_embeds.device,
|
| 1879 |
+
)
|
| 1880 |
+
)
|
| 1881 |
+
)[..., 0]
|
| 1882 |
+
image_mask = (
|
| 1883 |
+
inputs_embeds
|
| 1884 |
+
== self.get_input_embeddings()(
|
| 1885 |
+
torch.tensor(
|
| 1886 |
+
image_token_id, dtype=torch.long, device=inputs_embeds.device
|
| 1887 |
+
)
|
| 1888 |
+
)
|
| 1889 |
+
)[..., 0]
|
| 1890 |
+
video_mask = (
|
| 1891 |
+
inputs_embeds
|
| 1892 |
+
== self.get_input_embeddings()(
|
| 1893 |
+
torch.tensor(
|
| 1894 |
+
video_token_id, dtype=torch.long, device=inputs_embeds.device
|
| 1895 |
+
)
|
| 1896 |
+
)
|
| 1897 |
+
)[..., 0]
|
| 1898 |
+
else:
|
| 1899 |
+
vision_start_mask = input_ids == vision_start_token_id
|
| 1900 |
+
image_mask = input_ids == image_token_id
|
| 1901 |
+
video_mask = input_ids == video_token_id
|
| 1902 |
+
|
| 1903 |
+
vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
|
| 1904 |
+
image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
|
| 1905 |
+
video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
|
| 1906 |
+
|
| 1907 |
+
return image_nums, video_nums
|
| 1908 |
+
|
| 1909 |
+
def _expand_inputs_for_generation(
|
| 1910 |
+
self,
|
| 1911 |
+
expand_size: int = 1,
|
| 1912 |
+
is_encoder_decoder: bool = False,
|
| 1913 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1914 |
+
**model_kwargs,
|
| 1915 |
+
) -> tuple[torch.LongTensor, dict[str, Any]]:
|
| 1916 |
+
# Overwritten -- Support for expanding tensors without a batch size dimension
|
| 1917 |
+
# e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
|
| 1918 |
+
# pixel_values.shape[0] is sum(seqlen_images for samples)
|
| 1919 |
+
# image_grid_thw.shape[0] is sum(num_images for samples)
|
| 1920 |
+
|
| 1921 |
+
if expand_size == 1:
|
| 1922 |
+
return input_ids, model_kwargs
|
| 1923 |
+
|
| 1924 |
+
visual_keys = [
|
| 1925 |
+
"pixel_values",
|
| 1926 |
+
"image_grid_thw",
|
| 1927 |
+
"pixel_values_videos",
|
| 1928 |
+
"video_grid_thw",
|
| 1929 |
+
"second_per_grid_ts",
|
| 1930 |
+
]
|
| 1931 |
+
|
| 1932 |
+
def _expand_dict_for_generation_visual(dict_to_expand):
|
| 1933 |
+
image_grid_thw = model_kwargs.get("image_grid_thw", None)
|
| 1934 |
+
video_grid_thw = model_kwargs.get("video_grid_thw", None)
|
| 1935 |
+
image_nums, video_nums = self._get_image_nums_and_video_nums(
|
| 1936 |
+
input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
|
| 1937 |
+
)
|
| 1938 |
+
|
| 1939 |
+
def _repeat_interleave_samples(x, lengths, repeat_times):
|
| 1940 |
+
samples = torch.split(x, lengths)
|
| 1941 |
+
repeat_args = [repeat_times] + [1] * (x.dim() - 1)
|
| 1942 |
+
result = torch.cat(
|
| 1943 |
+
[sample.repeat(*repeat_args) for sample in samples], dim=0
|
| 1944 |
+
)
|
| 1945 |
+
return result
|
| 1946 |
+
|
| 1947 |
+
for key in dict_to_expand:
|
| 1948 |
+
if key == "pixel_values":
|
| 1949 |
+
# split images into samples
|
| 1950 |
+
samples = torch.split(image_grid_thw, list(image_nums))
|
| 1951 |
+
# compute the sequence length of images for each sample
|
| 1952 |
+
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
|
| 1953 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1954 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1955 |
+
)
|
| 1956 |
+
elif key == "image_grid_thw":
|
| 1957 |
+
# get the num of images for each sample
|
| 1958 |
+
lengths = list(image_nums)
|
| 1959 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1960 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1961 |
+
)
|
| 1962 |
+
elif key == "pixel_values_videos":
|
| 1963 |
+
samples = torch.split(video_grid_thw, list(video_nums))
|
| 1964 |
+
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
|
| 1965 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1966 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1967 |
+
)
|
| 1968 |
+
elif key == "video_grid_thw":
|
| 1969 |
+
lengths = list(video_nums)
|
| 1970 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1971 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1972 |
+
)
|
| 1973 |
+
elif key == "second_per_grid_ts":
|
| 1974 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1975 |
+
dict_to_expand[key],
|
| 1976 |
+
lengths=list(video_nums),
|
| 1977 |
+
repeat_times=expand_size,
|
| 1978 |
+
)
|
| 1979 |
+
return dict_to_expand
|
| 1980 |
+
|
| 1981 |
+
def _expand_dict_for_generation(dict_to_expand):
|
| 1982 |
+
for key in dict_to_expand:
|
| 1983 |
+
if (
|
| 1984 |
+
key != "cache_position"
|
| 1985 |
+
and dict_to_expand[key] is not None
|
| 1986 |
+
and isinstance(dict_to_expand[key], torch.Tensor)
|
| 1987 |
+
and key not in visual_keys
|
| 1988 |
+
):
|
| 1989 |
+
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(
|
| 1990 |
+
expand_size, dim=0
|
| 1991 |
+
)
|
| 1992 |
+
return dict_to_expand
|
| 1993 |
+
|
| 1994 |
+
model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
|
| 1995 |
+
|
| 1996 |
+
if input_ids is not None:
|
| 1997 |
+
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
|
| 1998 |
+
|
| 1999 |
+
model_kwargs = _expand_dict_for_generation(model_kwargs)
|
| 2000 |
+
|
| 2001 |
+
if is_encoder_decoder:
|
| 2002 |
+
if model_kwargs.get("encoder_outputs") is None:
|
| 2003 |
+
raise ValueError(
|
| 2004 |
+
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
|
| 2005 |
+
)
|
| 2006 |
+
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(
|
| 2007 |
+
model_kwargs["encoder_outputs"]
|
| 2008 |
+
)
|
| 2009 |
+
|
| 2010 |
+
return input_ids, model_kwargs
|
| 2011 |
+
|
| 2012 |
+
|
| 2013 |
+
__all__ = [
|
| 2014 |
+
"Qwen2_5_VLForConditionalGeneration",
|
| 2015 |
+
"Qwen2_5_VLModel",
|
| 2016 |
+
"Qwen2_5_VLPreTrainedModel",
|
| 2017 |
+
"Qwen2_5_VLTextModel",
|
| 2018 |
+
]
|
videoauto_r1/modeling_qwen3_vl_patched.py
ADDED
|
@@ -0,0 +1,1824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 2 |
+
# This file was automatically generated from src/transformers/models/qwen3_vl/modular_qwen3_vl.py.
|
| 3 |
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
| 4 |
+
# the file from the modular. If any change should be done, please apply the change to the
|
| 5 |
+
# modular_qwen3_vl.py file directly. One of our CI enforces this.
|
| 6 |
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
| 7 |
+
# coding=utf-8
|
| 8 |
+
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
| 9 |
+
#
|
| 10 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 11 |
+
# you may not use this file except in compliance with the License.
|
| 12 |
+
# You may obtain a copy of the License at
|
| 13 |
+
#
|
| 14 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 15 |
+
#
|
| 16 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 17 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 18 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 19 |
+
# See the License for the specific language governing permissions and
|
| 20 |
+
# limitations under the License.
|
| 21 |
+
|
| 22 |
+
from collections.abc import Callable
|
| 23 |
+
from dataclasses import dataclass
|
| 24 |
+
from typing import Any, Optional, Union
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn as nn
|
| 28 |
+
import torch.nn.functional as F
|
| 29 |
+
|
| 30 |
+
from transformers.activations import ACT2FN
|
| 31 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 32 |
+
from transformers.generation import GenerationMixin
|
| 33 |
+
from transformers.integrations import use_kernel_forward_from_hub
|
| 34 |
+
from transformers.masking_utils import create_causal_mask
|
| 35 |
+
|
| 36 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 37 |
+
from transformers.modeling_layers import GradientCheckpointingLayer
|
| 38 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
|
| 39 |
+
from transformers.modeling_rope_utils import dynamic_rope_update, ROPE_INIT_FUNCTIONS
|
| 40 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
| 41 |
+
|
| 42 |
+
from transformers.models.qwen3_vl.configuration_qwen3_vl import (
|
| 43 |
+
Qwen3VLConfig,
|
| 44 |
+
Qwen3VLTextConfig,
|
| 45 |
+
Qwen3VLVisionConfig,
|
| 46 |
+
)
|
| 47 |
+
from transformers.processing_utils import Unpack
|
| 48 |
+
from transformers.utils import (
|
| 49 |
+
auto_docstring,
|
| 50 |
+
is_torchdynamo_compiling,
|
| 51 |
+
TransformersKwargs,
|
| 52 |
+
)
|
| 53 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
| 54 |
+
from transformers.utils.generic import check_model_inputs
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class Qwen3VLVisionMLP(nn.Module):
|
| 58 |
+
def __init__(self, config):
|
| 59 |
+
super().__init__()
|
| 60 |
+
self.hidden_size = config.hidden_size
|
| 61 |
+
self.intermediate_size = config.intermediate_size
|
| 62 |
+
self.linear_fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
|
| 63 |
+
self.linear_fc2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
|
| 64 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 65 |
+
|
| 66 |
+
def forward(self, hidden_state):
|
| 67 |
+
return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state)))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class Qwen3VLVisionPatchEmbed(nn.Module):
|
| 71 |
+
def __init__(self, config) -> None:
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.patch_size = config.patch_size
|
| 74 |
+
self.temporal_patch_size = config.temporal_patch_size
|
| 75 |
+
self.in_channels = config.in_channels
|
| 76 |
+
self.embed_dim = config.hidden_size
|
| 77 |
+
|
| 78 |
+
kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
|
| 79 |
+
self.proj = nn.Conv3d(
|
| 80 |
+
self.in_channels,
|
| 81 |
+
self.embed_dim,
|
| 82 |
+
kernel_size=kernel_size,
|
| 83 |
+
stride=kernel_size,
|
| 84 |
+
bias=True,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 88 |
+
target_dtype = self.proj.weight.dtype
|
| 89 |
+
hidden_states = hidden_states.view(
|
| 90 |
+
-1,
|
| 91 |
+
self.in_channels,
|
| 92 |
+
self.temporal_patch_size,
|
| 93 |
+
self.patch_size,
|
| 94 |
+
self.patch_size,
|
| 95 |
+
)
|
| 96 |
+
hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(
|
| 97 |
+
-1, self.embed_dim
|
| 98 |
+
)
|
| 99 |
+
return hidden_states
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class Qwen3VLVisionRotaryEmbedding(nn.Module):
|
| 103 |
+
inv_freq: torch.Tensor # fix linting for `register_buffer`
|
| 104 |
+
|
| 105 |
+
def __init__(self, dim: int, theta: float = 10000.0) -> None:
|
| 106 |
+
super().__init__()
|
| 107 |
+
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
|
| 108 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 109 |
+
|
| 110 |
+
def forward(self, seqlen: int) -> torch.Tensor:
|
| 111 |
+
seq = torch.arange(
|
| 112 |
+
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
|
| 113 |
+
)
|
| 114 |
+
freqs = torch.outer(seq, self.inv_freq)
|
| 115 |
+
return freqs
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class Qwen3VLVisionPatchMerger(nn.Module):
|
| 119 |
+
def __init__(self, config: Qwen3VLVisionConfig, use_postshuffle_norm=False) -> None:
|
| 120 |
+
super().__init__()
|
| 121 |
+
self.hidden_size = config.hidden_size * (config.spatial_merge_size**2)
|
| 122 |
+
self.use_postshuffle_norm = use_postshuffle_norm
|
| 123 |
+
self.norm = nn.LayerNorm(
|
| 124 |
+
self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6
|
| 125 |
+
)
|
| 126 |
+
self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
|
| 127 |
+
self.act_fn = nn.GELU()
|
| 128 |
+
self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size)
|
| 129 |
+
|
| 130 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 131 |
+
x = self.norm(
|
| 132 |
+
x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x
|
| 133 |
+
).view(-1, self.hidden_size)
|
| 134 |
+
x = self.linear_fc2(self.act_fn(self.linear_fc1(x)))
|
| 135 |
+
return x
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def rotate_half(x):
|
| 139 |
+
"""Rotates half the hidden dims of the input."""
|
| 140 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 141 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 142 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def apply_rotary_pos_emb_vision(
|
| 146 |
+
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
|
| 147 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 148 |
+
orig_q_dtype = q.dtype
|
| 149 |
+
orig_k_dtype = k.dtype
|
| 150 |
+
q, k = q.float(), k.float()
|
| 151 |
+
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
|
| 152 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 153 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 154 |
+
q_embed = q_embed.to(orig_q_dtype)
|
| 155 |
+
k_embed = k_embed.to(orig_k_dtype)
|
| 156 |
+
return q_embed, k_embed
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 160 |
+
"""
|
| 161 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 162 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 163 |
+
"""
|
| 164 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 165 |
+
if n_rep == 1:
|
| 166 |
+
return hidden_states
|
| 167 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(
|
| 168 |
+
batch, num_key_value_heads, n_rep, slen, head_dim
|
| 169 |
+
)
|
| 170 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def eager_attention_forward(
|
| 174 |
+
module: nn.Module,
|
| 175 |
+
query: torch.Tensor,
|
| 176 |
+
key: torch.Tensor,
|
| 177 |
+
value: torch.Tensor,
|
| 178 |
+
attention_mask: Optional[torch.Tensor],
|
| 179 |
+
scaling: float,
|
| 180 |
+
dropout: float = 0.0,
|
| 181 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 182 |
+
):
|
| 183 |
+
key_states = repeat_kv(key, module.num_key_value_groups)
|
| 184 |
+
value_states = repeat_kv(value, module.num_key_value_groups)
|
| 185 |
+
|
| 186 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
| 187 |
+
if attention_mask is not None:
|
| 188 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 189 |
+
attn_weights = attn_weights + causal_mask
|
| 190 |
+
|
| 191 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
|
| 192 |
+
query.dtype
|
| 193 |
+
)
|
| 194 |
+
attn_weights = nn.functional.dropout(
|
| 195 |
+
attn_weights, p=dropout, training=module.training
|
| 196 |
+
)
|
| 197 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
| 198 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 199 |
+
|
| 200 |
+
return attn_output, attn_weights
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class Qwen3VLVisionAttention(nn.Module):
|
| 204 |
+
def __init__(self, config: Qwen3VLVisionConfig) -> None:
|
| 205 |
+
super().__init__()
|
| 206 |
+
self.dim = config.hidden_size
|
| 207 |
+
self.num_heads = config.num_heads
|
| 208 |
+
self.head_dim = self.dim // self.num_heads
|
| 209 |
+
self.num_key_value_groups = 1 # needed for eager attention
|
| 210 |
+
self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
|
| 211 |
+
self.proj = nn.Linear(self.dim, self.dim)
|
| 212 |
+
self.scaling = self.head_dim**-0.5
|
| 213 |
+
self.config = config
|
| 214 |
+
self.attention_dropout = 0.0
|
| 215 |
+
self.is_causal = False
|
| 216 |
+
|
| 217 |
+
def forward(
|
| 218 |
+
self,
|
| 219 |
+
hidden_states: torch.Tensor,
|
| 220 |
+
cu_seqlens: torch.Tensor,
|
| 221 |
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
| 222 |
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
| 223 |
+
**kwargs,
|
| 224 |
+
) -> torch.Tensor:
|
| 225 |
+
seq_length = hidden_states.shape[0]
|
| 226 |
+
query_states, key_states, value_states = (
|
| 227 |
+
self.qkv(hidden_states)
|
| 228 |
+
.reshape(seq_length, 3, self.num_heads, -1)
|
| 229 |
+
.permute(1, 0, 2, 3)
|
| 230 |
+
.unbind(0)
|
| 231 |
+
)
|
| 232 |
+
cos, sin = position_embeddings
|
| 233 |
+
query_states, key_states = apply_rotary_pos_emb_vision(
|
| 234 |
+
query_states, key_states, cos, sin
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
query_states = query_states.transpose(0, 1).unsqueeze(0)
|
| 238 |
+
key_states = key_states.transpose(0, 1).unsqueeze(0)
|
| 239 |
+
value_states = value_states.transpose(0, 1).unsqueeze(0)
|
| 240 |
+
|
| 241 |
+
attention_interface: Callable = eager_attention_forward
|
| 242 |
+
if self.config._attn_implementation != "eager":
|
| 243 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[
|
| 244 |
+
self.config._attn_implementation
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 248 |
+
# Flash Attention 2: Use cu_seqlens for variable length attention
|
| 249 |
+
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
|
| 250 |
+
attn_output, _ = attention_interface(
|
| 251 |
+
self,
|
| 252 |
+
query_states,
|
| 253 |
+
key_states,
|
| 254 |
+
value_states,
|
| 255 |
+
attention_mask=None,
|
| 256 |
+
scaling=self.scaling,
|
| 257 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 258 |
+
cu_seq_lens_q=cu_seqlens,
|
| 259 |
+
cu_seq_lens_k=cu_seqlens,
|
| 260 |
+
max_length_q=max_seqlen,
|
| 261 |
+
max_length_k=max_seqlen,
|
| 262 |
+
is_causal=False,
|
| 263 |
+
**kwargs,
|
| 264 |
+
)
|
| 265 |
+
else:
|
| 266 |
+
# Other implementations: Process each chunk separately
|
| 267 |
+
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
|
| 268 |
+
splits = [
|
| 269 |
+
torch.split(tensor, lengths.tolist(), dim=2)
|
| 270 |
+
for tensor in (query_states, key_states, value_states)
|
| 271 |
+
]
|
| 272 |
+
|
| 273 |
+
attn_outputs = [
|
| 274 |
+
attention_interface(
|
| 275 |
+
self,
|
| 276 |
+
q,
|
| 277 |
+
k,
|
| 278 |
+
v,
|
| 279 |
+
attention_mask=None,
|
| 280 |
+
scaling=self.scaling,
|
| 281 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 282 |
+
is_causal=False,
|
| 283 |
+
**kwargs,
|
| 284 |
+
)[0]
|
| 285 |
+
for q, k, v in zip(*splits)
|
| 286 |
+
]
|
| 287 |
+
attn_output = torch.cat(attn_outputs, dim=1)
|
| 288 |
+
|
| 289 |
+
attn_output = attn_output.reshape(seq_length, -1).contiguous()
|
| 290 |
+
attn_output = self.proj(attn_output)
|
| 291 |
+
return attn_output
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class Qwen3VLVisionBlock(GradientCheckpointingLayer):
|
| 295 |
+
def __init__(self, config, attn_implementation: str = "sdpa") -> None:
|
| 296 |
+
super().__init__()
|
| 297 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
|
| 298 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
|
| 299 |
+
self.attn = Qwen3VLVisionAttention(config=config)
|
| 300 |
+
self.mlp = Qwen3VLVisionMLP(config=config)
|
| 301 |
+
|
| 302 |
+
def forward(
|
| 303 |
+
self,
|
| 304 |
+
hidden_states: torch.Tensor,
|
| 305 |
+
cu_seqlens: torch.Tensor,
|
| 306 |
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
| 307 |
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
| 308 |
+
**kwargs,
|
| 309 |
+
) -> torch.Tensor:
|
| 310 |
+
hidden_states = hidden_states + self.attn(
|
| 311 |
+
self.norm1(hidden_states),
|
| 312 |
+
cu_seqlens=cu_seqlens,
|
| 313 |
+
rotary_pos_emb=rotary_pos_emb,
|
| 314 |
+
position_embeddings=position_embeddings,
|
| 315 |
+
**kwargs,
|
| 316 |
+
)
|
| 317 |
+
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
|
| 318 |
+
return hidden_states
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class Qwen3VLTextRotaryEmbedding(nn.Module):
|
| 322 |
+
inv_freq: torch.Tensor # fix linting for `register_buffer`
|
| 323 |
+
|
| 324 |
+
def __init__(self, config: Qwen3VLTextConfig, device=None):
|
| 325 |
+
super().__init__()
|
| 326 |
+
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
| 327 |
+
self.rope_type = config.rope_scaling.get("rope_type", "default")
|
| 328 |
+
else:
|
| 329 |
+
self.rope_type = "default"
|
| 330 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
| 331 |
+
self.original_max_seq_len = config.max_position_embeddings
|
| 332 |
+
|
| 333 |
+
self.config = config
|
| 334 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
| 335 |
+
|
| 336 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
| 337 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 338 |
+
self.original_inv_freq = self.inv_freq
|
| 339 |
+
|
| 340 |
+
self.mrope_section = config.rope_scaling.get("mrope_section", [24, 20, 20])
|
| 341 |
+
|
| 342 |
+
def apply_interleaved_mrope(self, freqs, mrope_section):
|
| 343 |
+
"""Apply interleaved MRoPE to 3D rotary embeddings.
|
| 344 |
+
Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
|
| 345 |
+
interleaved [THTHWHTHW...TT], preserving frequency continuity.
|
| 346 |
+
args:
|
| 347 |
+
x: (3, bs, seq_len, head_dim // 2)
|
| 348 |
+
mrope_section: (3,)
|
| 349 |
+
returns:
|
| 350 |
+
x_t: (bs, seq_len, head_dim // 2)
|
| 351 |
+
"""
|
| 352 |
+
freqs_t = freqs[0] # just overwrite the first dimension T
|
| 353 |
+
for dim, offset in enumerate((1, 2), start=1): # H, W
|
| 354 |
+
length = mrope_section[dim] * 3
|
| 355 |
+
idx = slice(offset, length, 3)
|
| 356 |
+
freqs_t[..., idx] = freqs[dim, ..., idx]
|
| 357 |
+
return freqs_t
|
| 358 |
+
|
| 359 |
+
@torch.no_grad()
|
| 360 |
+
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
|
| 361 |
+
def forward(self, x, position_ids):
|
| 362 |
+
# In contrast to other models, Qwen3VL has different position ids for the grids
|
| 363 |
+
# So we expand the inv_freq to shape (3, ...)
|
| 364 |
+
if position_ids.ndim == 2:
|
| 365 |
+
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
|
| 366 |
+
inv_freq_expanded = (
|
| 367 |
+
self.inv_freq[None, None, :, None]
|
| 368 |
+
.float()
|
| 369 |
+
.expand(3, position_ids.shape[1], -1, 1)
|
| 370 |
+
)
|
| 371 |
+
position_ids_expanded = position_ids[
|
| 372 |
+
:, :, None, :
|
| 373 |
+
].float() # shape (3, bs, 1, positions)
|
| 374 |
+
|
| 375 |
+
device_type = (
|
| 376 |
+
x.device.type
|
| 377 |
+
if isinstance(x.device.type, str) and x.device.type != "mps"
|
| 378 |
+
else "cpu"
|
| 379 |
+
)
|
| 380 |
+
with torch.autocast(device_type=device_type, enabled=False): # Force float32
|
| 381 |
+
freqs = (
|
| 382 |
+
inv_freq_expanded.float() @ position_ids_expanded.float()
|
| 383 |
+
).transpose(2, 3)
|
| 384 |
+
freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
|
| 385 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 386 |
+
cos = emb.cos() * self.attention_scaling
|
| 387 |
+
sin = emb.sin() * self.attention_scaling
|
| 388 |
+
|
| 389 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
@use_kernel_forward_from_hub("RMSNorm")
|
| 393 |
+
class Qwen3VLTextRMSNorm(nn.Module):
|
| 394 |
+
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
|
| 395 |
+
"""
|
| 396 |
+
Qwen3VLTextRMSNorm is equivalent to T5LayerNorm
|
| 397 |
+
"""
|
| 398 |
+
super().__init__()
|
| 399 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 400 |
+
self.variance_epsilon = eps
|
| 401 |
+
|
| 402 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 403 |
+
input_dtype = hidden_states.dtype
|
| 404 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 405 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 406 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 407 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 408 |
+
|
| 409 |
+
def extra_repr(self):
|
| 410 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| 414 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
q (`torch.Tensor`): The query tensor.
|
| 418 |
+
k (`torch.Tensor`): The key tensor.
|
| 419 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 420 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 421 |
+
position_ids (`torch.Tensor`, *optional*):
|
| 422 |
+
Deprecated and unused.
|
| 423 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| 424 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| 425 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| 426 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| 427 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| 428 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| 429 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| 430 |
+
Returns:
|
| 431 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 432 |
+
"""
|
| 433 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
| 434 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
| 435 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 436 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 437 |
+
return q_embed, k_embed
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
class Qwen3VLTextAttention(nn.Module):
|
| 441 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 442 |
+
|
| 443 |
+
def __init__(self, config: Qwen3VLTextConfig, layer_idx: int):
|
| 444 |
+
super().__init__()
|
| 445 |
+
self.config = config
|
| 446 |
+
self.layer_idx = layer_idx
|
| 447 |
+
self.head_dim = getattr(
|
| 448 |
+
config, "head_dim", config.hidden_size // config.num_attention_heads
|
| 449 |
+
)
|
| 450 |
+
self.num_key_value_groups = (
|
| 451 |
+
config.num_attention_heads // config.num_key_value_heads
|
| 452 |
+
)
|
| 453 |
+
self.scaling = self.head_dim**-0.5
|
| 454 |
+
self.attention_dropout = config.attention_dropout
|
| 455 |
+
self.is_causal = True
|
| 456 |
+
|
| 457 |
+
self.q_proj = nn.Linear(
|
| 458 |
+
config.hidden_size,
|
| 459 |
+
config.num_attention_heads * self.head_dim,
|
| 460 |
+
bias=config.attention_bias,
|
| 461 |
+
)
|
| 462 |
+
self.k_proj = nn.Linear(
|
| 463 |
+
config.hidden_size,
|
| 464 |
+
config.num_key_value_heads * self.head_dim,
|
| 465 |
+
bias=config.attention_bias,
|
| 466 |
+
)
|
| 467 |
+
self.v_proj = nn.Linear(
|
| 468 |
+
config.hidden_size,
|
| 469 |
+
config.num_key_value_heads * self.head_dim,
|
| 470 |
+
bias=config.attention_bias,
|
| 471 |
+
)
|
| 472 |
+
self.o_proj = nn.Linear(
|
| 473 |
+
config.num_attention_heads * self.head_dim,
|
| 474 |
+
config.hidden_size,
|
| 475 |
+
bias=config.attention_bias,
|
| 476 |
+
)
|
| 477 |
+
self.q_norm = Qwen3VLTextRMSNorm(
|
| 478 |
+
self.head_dim, eps=config.rms_norm_eps
|
| 479 |
+
) # unlike olmo, only on the head dim!
|
| 480 |
+
self.k_norm = Qwen3VLTextRMSNorm(
|
| 481 |
+
self.head_dim, eps=config.rms_norm_eps
|
| 482 |
+
) # thus post q_norm does not need reshape
|
| 483 |
+
|
| 484 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 485 |
+
def forward(
|
| 486 |
+
self,
|
| 487 |
+
hidden_states: torch.Tensor,
|
| 488 |
+
position_embeddings: tuple[torch.Tensor, torch.Tensor],
|
| 489 |
+
attention_mask: Optional[torch.Tensor],
|
| 490 |
+
past_key_values: Optional[Cache] = None,
|
| 491 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 492 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 493 |
+
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
|
| 494 |
+
input_shape = hidden_states.shape[:-1]
|
| 495 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
| 496 |
+
|
| 497 |
+
query_states = self.q_norm(
|
| 498 |
+
self.q_proj(hidden_states).view(hidden_shape)
|
| 499 |
+
).transpose(1, 2)
|
| 500 |
+
key_states = self.k_norm(
|
| 501 |
+
self.k_proj(hidden_states).view(hidden_shape)
|
| 502 |
+
).transpose(1, 2)
|
| 503 |
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
| 504 |
+
|
| 505 |
+
cos, sin = position_embeddings
|
| 506 |
+
query_states, key_states = apply_rotary_pos_emb(
|
| 507 |
+
query_states, key_states, cos, sin
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
if past_key_values is not None:
|
| 511 |
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
| 512 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| 513 |
+
key_states, value_states = past_key_values.update(
|
| 514 |
+
key_states, value_states, self.layer_idx, cache_kwargs
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
attention_interface: Callable = eager_attention_forward
|
| 518 |
+
if self.config._attn_implementation != "eager":
|
| 519 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[
|
| 520 |
+
self.config._attn_implementation
|
| 521 |
+
]
|
| 522 |
+
|
| 523 |
+
attn_output, attn_weights = attention_interface(
|
| 524 |
+
self,
|
| 525 |
+
query_states,
|
| 526 |
+
key_states,
|
| 527 |
+
value_states,
|
| 528 |
+
attention_mask,
|
| 529 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
| 530 |
+
scaling=self.scaling,
|
| 531 |
+
**kwargs,
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
| 535 |
+
attn_output = self.o_proj(attn_output)
|
| 536 |
+
return attn_output, attn_weights
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
class Qwen3VLTextMLP(nn.Module):
|
| 540 |
+
def __init__(self, config):
|
| 541 |
+
super().__init__()
|
| 542 |
+
self.config = config
|
| 543 |
+
self.hidden_size = config.hidden_size
|
| 544 |
+
self.intermediate_size = config.intermediate_size
|
| 545 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 546 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| 547 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| 548 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 549 |
+
|
| 550 |
+
def forward(self, x):
|
| 551 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
| 552 |
+
return down_proj
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
class Qwen3VLTextDecoderLayer(GradientCheckpointingLayer):
|
| 556 |
+
def __init__(self, config: Qwen3VLTextConfig, layer_idx: int):
|
| 557 |
+
super().__init__()
|
| 558 |
+
self.hidden_size = config.hidden_size
|
| 559 |
+
|
| 560 |
+
self.self_attn = Qwen3VLTextAttention(config=config, layer_idx=layer_idx)
|
| 561 |
+
|
| 562 |
+
self.mlp = Qwen3VLTextMLP(config)
|
| 563 |
+
self.input_layernorm = Qwen3VLTextRMSNorm(
|
| 564 |
+
config.hidden_size, eps=config.rms_norm_eps
|
| 565 |
+
)
|
| 566 |
+
self.post_attention_layernorm = Qwen3VLTextRMSNorm(
|
| 567 |
+
config.hidden_size, eps=config.rms_norm_eps
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
|
| 571 |
+
def forward(
|
| 572 |
+
self,
|
| 573 |
+
hidden_states: torch.Tensor,
|
| 574 |
+
position_embeddings: tuple[torch.Tensor, torch.Tensor],
|
| 575 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 576 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 577 |
+
past_key_values: Optional[Cache] = None,
|
| 578 |
+
use_cache: Optional[bool] = False,
|
| 579 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 580 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 581 |
+
) -> torch.Tensor:
|
| 582 |
+
residual = hidden_states
|
| 583 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 584 |
+
# Self Attention
|
| 585 |
+
hidden_states, _ = self.self_attn(
|
| 586 |
+
hidden_states=hidden_states,
|
| 587 |
+
attention_mask=attention_mask,
|
| 588 |
+
position_ids=position_ids,
|
| 589 |
+
past_key_values=past_key_values,
|
| 590 |
+
use_cache=use_cache,
|
| 591 |
+
cache_position=cache_position,
|
| 592 |
+
position_embeddings=position_embeddings,
|
| 593 |
+
**kwargs,
|
| 594 |
+
)
|
| 595 |
+
hidden_states = residual + hidden_states
|
| 596 |
+
|
| 597 |
+
# Fully Connected
|
| 598 |
+
residual = hidden_states
|
| 599 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 600 |
+
hidden_states = self.mlp(hidden_states)
|
| 601 |
+
hidden_states = residual + hidden_states
|
| 602 |
+
return hidden_states
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
@dataclass
|
| 606 |
+
@auto_docstring(
|
| 607 |
+
custom_intro="""
|
| 608 |
+
Base class for Llava outputs, with hidden states and attentions.
|
| 609 |
+
"""
|
| 610 |
+
)
|
| 611 |
+
class Qwen3VLModelOutputWithPast(ModelOutput):
|
| 612 |
+
r"""
|
| 613 |
+
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 614 |
+
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
|
| 615 |
+
|
| 616 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
| 617 |
+
`past_key_values` input) to speed up sequential decoding.
|
| 618 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 619 |
+
The rope index difference between sequence length and multimodal rope.
|
| 620 |
+
"""
|
| 621 |
+
|
| 622 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 623 |
+
past_key_values: Optional[Cache] = None
|
| 624 |
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
| 625 |
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
| 626 |
+
rope_deltas: Optional[torch.LongTensor] = None
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
@auto_docstring
|
| 630 |
+
class Qwen3VLPreTrainedModel(PreTrainedModel):
|
| 631 |
+
config: Qwen3VLConfig
|
| 632 |
+
base_model_prefix = "model"
|
| 633 |
+
supports_gradient_checkpointing = True
|
| 634 |
+
_no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
|
| 635 |
+
_skip_keys_device_placement = "past_key_values"
|
| 636 |
+
_supports_flash_attn = True
|
| 637 |
+
_supports_sdpa = True
|
| 638 |
+
|
| 639 |
+
_can_compile_fullgraph = True
|
| 640 |
+
_supports_attention_backend = True
|
| 641 |
+
_can_record_outputs = {
|
| 642 |
+
"hidden_states": Qwen3VLTextDecoderLayer,
|
| 643 |
+
"attentions": Qwen3VLTextAttention,
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
class Qwen3VLVisionModel(Qwen3VLPreTrainedModel):
|
| 648 |
+
config: Qwen3VLVisionConfig
|
| 649 |
+
_no_split_modules = ["Qwen3VLVisionBlock"]
|
| 650 |
+
|
| 651 |
+
def __init__(self, config, *inputs, **kwargs) -> None:
|
| 652 |
+
super().__init__(config, *inputs, **kwargs)
|
| 653 |
+
self.spatial_merge_size = config.spatial_merge_size
|
| 654 |
+
self.patch_size = config.patch_size
|
| 655 |
+
self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
|
| 656 |
+
|
| 657 |
+
self.patch_embed = Qwen3VLVisionPatchEmbed(
|
| 658 |
+
config=config,
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
self.pos_embed = nn.Embedding(
|
| 662 |
+
config.num_position_embeddings, config.hidden_size
|
| 663 |
+
)
|
| 664 |
+
self.num_grid_per_side = int(config.num_position_embeddings**0.5)
|
| 665 |
+
|
| 666 |
+
head_dim = config.hidden_size // config.num_heads
|
| 667 |
+
self.rotary_pos_emb = Qwen3VLVisionRotaryEmbedding(head_dim // 2)
|
| 668 |
+
|
| 669 |
+
self.blocks = nn.ModuleList(
|
| 670 |
+
[Qwen3VLVisionBlock(config) for _ in range(config.depth)]
|
| 671 |
+
)
|
| 672 |
+
self.merger = Qwen3VLVisionPatchMerger(
|
| 673 |
+
config=config,
|
| 674 |
+
use_postshuffle_norm=False,
|
| 675 |
+
)
|
| 676 |
+
|
| 677 |
+
self.deepstack_visual_indexes = config.deepstack_visual_indexes
|
| 678 |
+
self.deepstack_merger_list = nn.ModuleList(
|
| 679 |
+
[
|
| 680 |
+
Qwen3VLVisionPatchMerger(
|
| 681 |
+
config=config,
|
| 682 |
+
use_postshuffle_norm=True,
|
| 683 |
+
)
|
| 684 |
+
for _ in range(len(config.deepstack_visual_indexes))
|
| 685 |
+
]
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
self.gradient_checkpointing = False
|
| 689 |
+
|
| 690 |
+
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
|
| 691 |
+
merge_size = self.spatial_merge_size
|
| 692 |
+
|
| 693 |
+
max_hw = int(grid_thw[:, 1:].max().item())
|
| 694 |
+
freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
|
| 695 |
+
device = freq_table.device
|
| 696 |
+
|
| 697 |
+
total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
|
| 698 |
+
pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
|
| 699 |
+
|
| 700 |
+
offset = 0
|
| 701 |
+
for num_frames, height, width in grid_thw:
|
| 702 |
+
merged_h, merged_w = height // merge_size, width // merge_size
|
| 703 |
+
|
| 704 |
+
block_rows = torch.arange(merged_h, device=device) # block row indices
|
| 705 |
+
block_cols = torch.arange(merged_w, device=device) # block col indices
|
| 706 |
+
intra_row = torch.arange(
|
| 707 |
+
merge_size, device=device
|
| 708 |
+
) # intra-block row offsets
|
| 709 |
+
intra_col = torch.arange(
|
| 710 |
+
merge_size, device=device
|
| 711 |
+
) # intra-block col offsets
|
| 712 |
+
|
| 713 |
+
# Compute full-resolution positions
|
| 714 |
+
row_idx = (
|
| 715 |
+
block_rows[:, None, None, None] * merge_size
|
| 716 |
+
+ intra_row[None, None, :, None]
|
| 717 |
+
)
|
| 718 |
+
col_idx = (
|
| 719 |
+
block_cols[None, :, None, None] * merge_size
|
| 720 |
+
+ intra_col[None, None, None, :]
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
row_idx = row_idx.expand(
|
| 724 |
+
merged_h, merged_w, merge_size, merge_size
|
| 725 |
+
).reshape(-1)
|
| 726 |
+
col_idx = col_idx.expand(
|
| 727 |
+
merged_h, merged_w, merge_size, merge_size
|
| 728 |
+
).reshape(-1)
|
| 729 |
+
|
| 730 |
+
coords = torch.stack((row_idx, col_idx), dim=-1)
|
| 731 |
+
|
| 732 |
+
if num_frames > 1:
|
| 733 |
+
coords = coords.repeat(num_frames, 1)
|
| 734 |
+
|
| 735 |
+
num_tokens = coords.shape[0]
|
| 736 |
+
pos_ids[offset : offset + num_tokens] = coords
|
| 737 |
+
offset += num_tokens
|
| 738 |
+
|
| 739 |
+
embeddings = freq_table[pos_ids] # lookup rotary embeddings
|
| 740 |
+
embeddings = embeddings.flatten(1)
|
| 741 |
+
return embeddings
|
| 742 |
+
|
| 743 |
+
def fast_pos_embed_interpolate(self, grid_thw):
|
| 744 |
+
grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
|
| 745 |
+
|
| 746 |
+
idx_list = [[] for _ in range(4)]
|
| 747 |
+
weight_list = [[] for _ in range(4)]
|
| 748 |
+
|
| 749 |
+
for t, h, w in zip(grid_ts, grid_hs, grid_ws):
|
| 750 |
+
h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
|
| 751 |
+
w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
|
| 752 |
+
|
| 753 |
+
h_idxs_floor = h_idxs.int()
|
| 754 |
+
w_idxs_floor = w_idxs.int()
|
| 755 |
+
h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
|
| 756 |
+
w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
|
| 757 |
+
|
| 758 |
+
dh = h_idxs - h_idxs_floor
|
| 759 |
+
dw = w_idxs - w_idxs_floor
|
| 760 |
+
|
| 761 |
+
base_h = h_idxs_floor * self.num_grid_per_side
|
| 762 |
+
base_h_ceil = h_idxs_ceil * self.num_grid_per_side
|
| 763 |
+
|
| 764 |
+
indices = [
|
| 765 |
+
(base_h[None].T + w_idxs_floor[None]).flatten(),
|
| 766 |
+
(base_h[None].T + w_idxs_ceil[None]).flatten(),
|
| 767 |
+
(base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
|
| 768 |
+
(base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
|
| 769 |
+
]
|
| 770 |
+
|
| 771 |
+
weights = [
|
| 772 |
+
((1 - dh)[None].T * (1 - dw)[None]).flatten(),
|
| 773 |
+
((1 - dh)[None].T * dw[None]).flatten(),
|
| 774 |
+
(dh[None].T * (1 - dw)[None]).flatten(),
|
| 775 |
+
(dh[None].T * dw[None]).flatten(),
|
| 776 |
+
]
|
| 777 |
+
|
| 778 |
+
for i in range(4):
|
| 779 |
+
idx_list[i].extend(indices[i].tolist())
|
| 780 |
+
weight_list[i].extend(weights[i].tolist())
|
| 781 |
+
|
| 782 |
+
idx_tensor = torch.tensor(
|
| 783 |
+
idx_list, dtype=torch.long, device=self.pos_embed.weight.device
|
| 784 |
+
)
|
| 785 |
+
weight_tensor = torch.tensor(
|
| 786 |
+
weight_list,
|
| 787 |
+
dtype=self.pos_embed.weight.dtype,
|
| 788 |
+
device=self.pos_embed.weight.device,
|
| 789 |
+
)
|
| 790 |
+
pos_embeds = self.pos_embed(idx_tensor) * weight_tensor[:, :, None]
|
| 791 |
+
patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
|
| 792 |
+
|
| 793 |
+
patch_pos_embeds = patch_pos_embeds.split(
|
| 794 |
+
[h * w for h, w in zip(grid_hs, grid_ws)]
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
patch_pos_embeds_permute = []
|
| 798 |
+
merge_size = self.config.spatial_merge_size
|
| 799 |
+
for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
|
| 800 |
+
pos_embed = pos_embed.repeat(t, 1)
|
| 801 |
+
pos_embed = (
|
| 802 |
+
pos_embed.view(
|
| 803 |
+
t, h // merge_size, merge_size, w // merge_size, merge_size, -1
|
| 804 |
+
)
|
| 805 |
+
.permute(0, 1, 3, 2, 4, 5)
|
| 806 |
+
.flatten(0, 4)
|
| 807 |
+
)
|
| 808 |
+
patch_pos_embeds_permute.append(pos_embed)
|
| 809 |
+
patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
|
| 810 |
+
return patch_pos_embeds
|
| 811 |
+
|
| 812 |
+
def forward(
|
| 813 |
+
self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs
|
| 814 |
+
) -> torch.Tensor:
|
| 815 |
+
"""
|
| 816 |
+
Args:
|
| 817 |
+
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
|
| 818 |
+
The final hidden states of the model.
|
| 819 |
+
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
|
| 820 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 821 |
+
|
| 822 |
+
Returns:
|
| 823 |
+
`torch.Tensor`: hidden_states.
|
| 824 |
+
"""
|
| 825 |
+
hidden_states = self.patch_embed(hidden_states)
|
| 826 |
+
|
| 827 |
+
pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
|
| 828 |
+
hidden_states = hidden_states + pos_embeds
|
| 829 |
+
|
| 830 |
+
rotary_pos_emb = self.rot_pos_emb(grid_thw)
|
| 831 |
+
|
| 832 |
+
seq_len, _ = hidden_states.size()
|
| 833 |
+
hidden_states = hidden_states.reshape(seq_len, -1)
|
| 834 |
+
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
|
| 835 |
+
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
|
| 836 |
+
position_embeddings = (emb.cos(), emb.sin())
|
| 837 |
+
|
| 838 |
+
cu_seqlens = torch.repeat_interleave(
|
| 839 |
+
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]
|
| 840 |
+
).cumsum(
|
| 841 |
+
dim=0,
|
| 842 |
+
# Select dtype based on the following factors:
|
| 843 |
+
# - FA2 requires that cu_seqlens_q must have dtype int32
|
| 844 |
+
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
|
| 845 |
+
# See https://github.com/huggingface/transformers/pull/34852 for more information
|
| 846 |
+
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
|
| 847 |
+
)
|
| 848 |
+
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
|
| 849 |
+
|
| 850 |
+
deepstack_feature_lists = []
|
| 851 |
+
for layer_num, blk in enumerate(self.blocks):
|
| 852 |
+
hidden_states = blk(
|
| 853 |
+
hidden_states,
|
| 854 |
+
cu_seqlens=cu_seqlens,
|
| 855 |
+
position_embeddings=position_embeddings,
|
| 856 |
+
**kwargs,
|
| 857 |
+
)
|
| 858 |
+
if layer_num in self.deepstack_visual_indexes:
|
| 859 |
+
deepstack_feature = self.deepstack_merger_list[
|
| 860 |
+
self.deepstack_visual_indexes.index(layer_num)
|
| 861 |
+
](hidden_states)
|
| 862 |
+
deepstack_feature_lists.append(deepstack_feature)
|
| 863 |
+
|
| 864 |
+
hidden_states = self.merger(hidden_states)
|
| 865 |
+
|
| 866 |
+
return hidden_states, deepstack_feature_lists
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
@auto_docstring(
|
| 870 |
+
custom_intro=(
|
| 871 |
+
"Text part of Qwen3VL, "
|
| 872 |
+
"not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
|
| 873 |
+
)
|
| 874 |
+
)
|
| 875 |
+
class Qwen3VLTextModel(Qwen3VLPreTrainedModel):
|
| 876 |
+
config: Qwen3VLTextConfig
|
| 877 |
+
_no_split_modules = ["Qwen3VLTextDecoderLayer"]
|
| 878 |
+
|
| 879 |
+
def __init__(self, config: Qwen3VLTextConfig):
|
| 880 |
+
super().__init__(config)
|
| 881 |
+
self.padding_idx = config.pad_token_id
|
| 882 |
+
self.vocab_size = config.vocab_size
|
| 883 |
+
|
| 884 |
+
self.embed_tokens = nn.Embedding(
|
| 885 |
+
config.vocab_size, config.hidden_size, self.padding_idx
|
| 886 |
+
)
|
| 887 |
+
self.layers = nn.ModuleList(
|
| 888 |
+
[
|
| 889 |
+
Qwen3VLTextDecoderLayer(config, layer_idx)
|
| 890 |
+
for layer_idx in range(config.num_hidden_layers)
|
| 891 |
+
]
|
| 892 |
+
)
|
| 893 |
+
self.norm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 894 |
+
self.rotary_emb = Qwen3VLTextRotaryEmbedding(config=config)
|
| 895 |
+
self.gradient_checkpointing = False
|
| 896 |
+
|
| 897 |
+
# Initialize weights and apply final processing
|
| 898 |
+
self.post_init()
|
| 899 |
+
|
| 900 |
+
@check_model_inputs
|
| 901 |
+
@auto_docstring
|
| 902 |
+
def forward(
|
| 903 |
+
self,
|
| 904 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 905 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 906 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 907 |
+
past_key_values: Optional[Cache] = None,
|
| 908 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 909 |
+
use_cache: Optional[bool] = None,
|
| 910 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 911 |
+
# args for deepstack
|
| 912 |
+
visual_pos_masks: Optional[torch.Tensor] = None,
|
| 913 |
+
deepstack_visual_embeds: Optional[list[torch.Tensor]] = None,
|
| 914 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
| 915 |
+
) -> Union[tuple, BaseModelOutputWithPast]:
|
| 916 |
+
r"""
|
| 917 |
+
visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*):
|
| 918 |
+
The mask of the visual positions.
|
| 919 |
+
deepstack_visual_embeds (`list[torch.Tensor]`, *optional*):
|
| 920 |
+
The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim).
|
| 921 |
+
The feature is extracted from the different visual encoder layers, and fed to the decoder
|
| 922 |
+
hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334).
|
| 923 |
+
"""
|
| 924 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 925 |
+
raise ValueError(
|
| 926 |
+
"You must specify exactly one of input_ids or inputs_embeds"
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
# torch.jit.trace() doesn't support cache objects in the output
|
| 930 |
+
if use_cache and past_key_values is None and not torch.jit.is_tracing():
|
| 931 |
+
past_key_values = DynamicCache(config=self.config)
|
| 932 |
+
|
| 933 |
+
if inputs_embeds is None:
|
| 934 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 935 |
+
|
| 936 |
+
if cache_position is None:
|
| 937 |
+
past_seen_tokens = (
|
| 938 |
+
past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 939 |
+
)
|
| 940 |
+
cache_position = torch.arange(
|
| 941 |
+
past_seen_tokens,
|
| 942 |
+
past_seen_tokens + inputs_embeds.shape[1],
|
| 943 |
+
device=inputs_embeds.device,
|
| 944 |
+
)
|
| 945 |
+
|
| 946 |
+
# the hard coded `3` is for temporal, height and width.
|
| 947 |
+
if position_ids is None:
|
| 948 |
+
position_ids = cache_position.view(1, 1, -1).expand(
|
| 949 |
+
3, inputs_embeds.shape[0], -1
|
| 950 |
+
)
|
| 951 |
+
elif position_ids.ndim == 2:
|
| 952 |
+
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
|
| 953 |
+
|
| 954 |
+
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
|
| 955 |
+
text_position_ids = position_ids[0]
|
| 956 |
+
position_ids = position_ids[1:]
|
| 957 |
+
else:
|
| 958 |
+
text_position_ids = position_ids[0]
|
| 959 |
+
|
| 960 |
+
attention_mask = create_causal_mask(
|
| 961 |
+
config=self.config,
|
| 962 |
+
input_embeds=inputs_embeds,
|
| 963 |
+
attention_mask=attention_mask,
|
| 964 |
+
cache_position=cache_position,
|
| 965 |
+
past_key_values=past_key_values,
|
| 966 |
+
position_ids=text_position_ids,
|
| 967 |
+
)
|
| 968 |
+
|
| 969 |
+
hidden_states = inputs_embeds
|
| 970 |
+
|
| 971 |
+
# create position embeddings to be shared across the decoder layers
|
| 972 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 973 |
+
|
| 974 |
+
# decoder layers
|
| 975 |
+
for layer_idx, decoder_layer in enumerate(self.layers):
|
| 976 |
+
layer_outputs = decoder_layer(
|
| 977 |
+
hidden_states,
|
| 978 |
+
attention_mask=attention_mask,
|
| 979 |
+
position_ids=text_position_ids,
|
| 980 |
+
past_key_values=past_key_values,
|
| 981 |
+
cache_position=cache_position,
|
| 982 |
+
position_embeddings=position_embeddings,
|
| 983 |
+
**kwargs,
|
| 984 |
+
)
|
| 985 |
+
hidden_states = layer_outputs
|
| 986 |
+
|
| 987 |
+
# add visual features to the hidden states of first several layers
|
| 988 |
+
if deepstack_visual_embeds is not None and layer_idx in range(
|
| 989 |
+
len(deepstack_visual_embeds)
|
| 990 |
+
):
|
| 991 |
+
hidden_states = self._deepstack_process(
|
| 992 |
+
hidden_states,
|
| 993 |
+
visual_pos_masks,
|
| 994 |
+
deepstack_visual_embeds[layer_idx],
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
hidden_states = self.norm(hidden_states)
|
| 998 |
+
|
| 999 |
+
return BaseModelOutputWithPast(
|
| 1000 |
+
last_hidden_state=hidden_states,
|
| 1001 |
+
past_key_values=past_key_values,
|
| 1002 |
+
)
|
| 1003 |
+
|
| 1004 |
+
def _deepstack_process(
|
| 1005 |
+
self,
|
| 1006 |
+
hidden_states: torch.Tensor,
|
| 1007 |
+
visual_pos_masks: torch.Tensor,
|
| 1008 |
+
visual_embeds: torch.Tensor,
|
| 1009 |
+
):
|
| 1010 |
+
visual_pos_masks = visual_pos_masks.to(hidden_states.device)
|
| 1011 |
+
visual_embeds = visual_embeds.to(hidden_states.device, hidden_states.dtype)
|
| 1012 |
+
local_this = hidden_states[visual_pos_masks, :].clone() + visual_embeds
|
| 1013 |
+
hidden_states[visual_pos_masks, :] = local_this
|
| 1014 |
+
return hidden_states
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
@auto_docstring
|
| 1018 |
+
class Qwen3VLModel(Qwen3VLPreTrainedModel):
|
| 1019 |
+
base_model_prefix = ""
|
| 1020 |
+
_checkpoint_conversion_mapping = {}
|
| 1021 |
+
# Reference: fix gemma3 grad acc #37208
|
| 1022 |
+
accepts_loss_kwargs = False
|
| 1023 |
+
config: Qwen3VLConfig
|
| 1024 |
+
_no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
|
| 1025 |
+
|
| 1026 |
+
def __init__(self, config):
|
| 1027 |
+
super().__init__(config)
|
| 1028 |
+
self.visual = Qwen3VLVisionModel._from_config(config.vision_config)
|
| 1029 |
+
self.language_model = Qwen3VLTextModel._from_config(config.text_config)
|
| 1030 |
+
self.rope_deltas = None # cache rope_deltas here
|
| 1031 |
+
|
| 1032 |
+
# Initialize weights and apply final processing
|
| 1033 |
+
self.post_init()
|
| 1034 |
+
|
| 1035 |
+
def get_input_embeddings(self):
|
| 1036 |
+
return self.language_model.get_input_embeddings()
|
| 1037 |
+
|
| 1038 |
+
def set_input_embeddings(self, value):
|
| 1039 |
+
self.language_model.set_input_embeddings(value)
|
| 1040 |
+
|
| 1041 |
+
def set_decoder(self, decoder):
|
| 1042 |
+
self.language_model = decoder
|
| 1043 |
+
|
| 1044 |
+
def get_decoder(self):
|
| 1045 |
+
return self.language_model
|
| 1046 |
+
|
| 1047 |
+
def get_rope_index(
|
| 1048 |
+
self,
|
| 1049 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1050 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1051 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1052 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1053 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1054 |
+
"""Different from the original implementation, Qwen3VL use timestamps rather than absolute time position ids."""
|
| 1055 |
+
|
| 1056 |
+
# Since we use timestamps to seperate videos, like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>, the video_grid_thw should also be split
|
| 1057 |
+
if video_grid_thw is not None:
|
| 1058 |
+
video_grid_thw = torch.repeat_interleave(
|
| 1059 |
+
video_grid_thw, video_grid_thw[:, 0], dim=0
|
| 1060 |
+
)
|
| 1061 |
+
video_grid_thw[:, 0] = 1
|
| 1062 |
+
|
| 1063 |
+
spatial_merge_size = self.config.vision_config.spatial_merge_size
|
| 1064 |
+
image_token_id = self.config.image_token_id
|
| 1065 |
+
video_token_id = self.config.video_token_id
|
| 1066 |
+
vision_start_token_id = self.config.vision_start_token_id
|
| 1067 |
+
mrope_position_deltas = []
|
| 1068 |
+
if input_ids is not None and (
|
| 1069 |
+
image_grid_thw is not None or video_grid_thw is not None
|
| 1070 |
+
):
|
| 1071 |
+
total_input_ids = input_ids
|
| 1072 |
+
if attention_mask is None:
|
| 1073 |
+
attention_mask = torch.ones_like(total_input_ids)
|
| 1074 |
+
position_ids = torch.ones(
|
| 1075 |
+
3,
|
| 1076 |
+
input_ids.shape[0],
|
| 1077 |
+
input_ids.shape[1],
|
| 1078 |
+
dtype=input_ids.dtype,
|
| 1079 |
+
device=input_ids.device,
|
| 1080 |
+
)
|
| 1081 |
+
image_index, video_index = 0, 0
|
| 1082 |
+
attention_mask = attention_mask.to(total_input_ids.device)
|
| 1083 |
+
for i, input_ids in enumerate(total_input_ids):
|
| 1084 |
+
input_ids = input_ids[attention_mask[i] == 1]
|
| 1085 |
+
image_nums, video_nums = 0, 0
|
| 1086 |
+
vision_start_indices = torch.argwhere(
|
| 1087 |
+
input_ids == vision_start_token_id
|
| 1088 |
+
).squeeze(1)
|
| 1089 |
+
vision_tokens = input_ids[vision_start_indices + 1]
|
| 1090 |
+
image_nums = (vision_tokens == image_token_id).sum()
|
| 1091 |
+
video_nums = (vision_tokens == video_token_id).sum()
|
| 1092 |
+
input_tokens = input_ids.tolist()
|
| 1093 |
+
llm_pos_ids_list: list = []
|
| 1094 |
+
st = 0
|
| 1095 |
+
remain_images, remain_videos = image_nums, video_nums
|
| 1096 |
+
for _ in range(image_nums + video_nums):
|
| 1097 |
+
if image_token_id in input_tokens and remain_images > 0:
|
| 1098 |
+
ed_image = input_tokens.index(image_token_id, st)
|
| 1099 |
+
else:
|
| 1100 |
+
ed_image = len(input_tokens) + 1
|
| 1101 |
+
if video_token_id in input_tokens and remain_videos > 0:
|
| 1102 |
+
ed_video = input_tokens.index(video_token_id, st)
|
| 1103 |
+
else:
|
| 1104 |
+
ed_video = len(input_tokens) + 1
|
| 1105 |
+
if ed_image < ed_video:
|
| 1106 |
+
t, h, w = (
|
| 1107 |
+
image_grid_thw[image_index][0],
|
| 1108 |
+
image_grid_thw[image_index][1],
|
| 1109 |
+
image_grid_thw[image_index][2],
|
| 1110 |
+
)
|
| 1111 |
+
image_index += 1
|
| 1112 |
+
remain_images -= 1
|
| 1113 |
+
ed = ed_image
|
| 1114 |
+
|
| 1115 |
+
else:
|
| 1116 |
+
t, h, w = (
|
| 1117 |
+
video_grid_thw[video_index][0],
|
| 1118 |
+
video_grid_thw[video_index][1],
|
| 1119 |
+
video_grid_thw[video_index][2],
|
| 1120 |
+
)
|
| 1121 |
+
video_index += 1
|
| 1122 |
+
remain_videos -= 1
|
| 1123 |
+
ed = ed_video
|
| 1124 |
+
llm_grid_t, llm_grid_h, llm_grid_w = (
|
| 1125 |
+
t.item(),
|
| 1126 |
+
h.item() // spatial_merge_size,
|
| 1127 |
+
w.item() // spatial_merge_size,
|
| 1128 |
+
)
|
| 1129 |
+
text_len = ed - st
|
| 1130 |
+
|
| 1131 |
+
st_idx = (
|
| 1132 |
+
llm_pos_ids_list[-1].max() + 1
|
| 1133 |
+
if len(llm_pos_ids_list) > 0
|
| 1134 |
+
else 0
|
| 1135 |
+
)
|
| 1136 |
+
llm_pos_ids_list.append(
|
| 1137 |
+
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
|
| 1138 |
+
)
|
| 1139 |
+
|
| 1140 |
+
# t_index is always 0 because llm_grid_t is always 1 (we use timestamps to encode the temporal information for videos)
|
| 1141 |
+
t_index = (
|
| 1142 |
+
torch.arange(llm_grid_t)
|
| 1143 |
+
.view(-1, 1)
|
| 1144 |
+
.expand(-1, llm_grid_h * llm_grid_w)
|
| 1145 |
+
.flatten()
|
| 1146 |
+
)
|
| 1147 |
+
h_index = (
|
| 1148 |
+
torch.arange(llm_grid_h)
|
| 1149 |
+
.view(1, -1, 1)
|
| 1150 |
+
.expand(llm_grid_t, -1, llm_grid_w)
|
| 1151 |
+
.flatten()
|
| 1152 |
+
)
|
| 1153 |
+
w_index = (
|
| 1154 |
+
torch.arange(llm_grid_w)
|
| 1155 |
+
.view(1, 1, -1)
|
| 1156 |
+
.expand(llm_grid_t, llm_grid_h, -1)
|
| 1157 |
+
.flatten()
|
| 1158 |
+
)
|
| 1159 |
+
llm_pos_ids_list.append(
|
| 1160 |
+
torch.stack([t_index, h_index, w_index]) + text_len + st_idx
|
| 1161 |
+
)
|
| 1162 |
+
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
|
| 1163 |
+
|
| 1164 |
+
if st < len(input_tokens):
|
| 1165 |
+
st_idx = (
|
| 1166 |
+
llm_pos_ids_list[-1].max() + 1
|
| 1167 |
+
if len(llm_pos_ids_list) > 0
|
| 1168 |
+
else 0
|
| 1169 |
+
)
|
| 1170 |
+
text_len = len(input_tokens) - st
|
| 1171 |
+
llm_pos_ids_list.append(
|
| 1172 |
+
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
|
| 1173 |
+
)
|
| 1174 |
+
|
| 1175 |
+
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
|
| 1176 |
+
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(
|
| 1177 |
+
position_ids.device
|
| 1178 |
+
)
|
| 1179 |
+
mrope_position_deltas.append(
|
| 1180 |
+
llm_positions.max() + 1 - len(total_input_ids[i])
|
| 1181 |
+
)
|
| 1182 |
+
mrope_position_deltas = torch.tensor(
|
| 1183 |
+
mrope_position_deltas, device=input_ids.device
|
| 1184 |
+
).unsqueeze(1)
|
| 1185 |
+
return position_ids, mrope_position_deltas
|
| 1186 |
+
else:
|
| 1187 |
+
if attention_mask is not None:
|
| 1188 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1189 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1190 |
+
position_ids = (
|
| 1191 |
+
position_ids.unsqueeze(0)
|
| 1192 |
+
.expand(3, -1, -1)
|
| 1193 |
+
.to(attention_mask.device)
|
| 1194 |
+
)
|
| 1195 |
+
max_position_ids = position_ids.max(0, keepdim=False)[0].max(
|
| 1196 |
+
-1, keepdim=True
|
| 1197 |
+
)[0]
|
| 1198 |
+
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
|
| 1199 |
+
else:
|
| 1200 |
+
position_ids = (
|
| 1201 |
+
torch.arange(input_ids.shape[1], device=input_ids.device)
|
| 1202 |
+
.view(1, 1, -1)
|
| 1203 |
+
.expand(3, input_ids.shape[0], -1)
|
| 1204 |
+
)
|
| 1205 |
+
mrope_position_deltas = torch.zeros(
|
| 1206 |
+
[input_ids.shape[0], 1],
|
| 1207 |
+
device=input_ids.device,
|
| 1208 |
+
dtype=input_ids.dtype,
|
| 1209 |
+
)
|
| 1210 |
+
|
| 1211 |
+
return position_ids, mrope_position_deltas
|
| 1212 |
+
|
| 1213 |
+
def get_video_features(
|
| 1214 |
+
self,
|
| 1215 |
+
pixel_values_videos: torch.FloatTensor,
|
| 1216 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1217 |
+
):
|
| 1218 |
+
"""
|
| 1219 |
+
Encodes videos into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
|
| 1220 |
+
|
| 1221 |
+
Args:
|
| 1222 |
+
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
| 1223 |
+
The tensors corresponding to the input videos.
|
| 1224 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1225 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1226 |
+
"""
|
| 1227 |
+
# Same implementation as for images
|
| 1228 |
+
return self.get_image_features(pixel_values_videos, video_grid_thw)
|
| 1229 |
+
|
| 1230 |
+
def get_image_features(
|
| 1231 |
+
self,
|
| 1232 |
+
pixel_values: torch.FloatTensor,
|
| 1233 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1234 |
+
):
|
| 1235 |
+
"""
|
| 1236 |
+
Encodes images into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
|
| 1237 |
+
|
| 1238 |
+
Args:
|
| 1239 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
| 1240 |
+
The tensors corresponding to the input images.
|
| 1241 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1242 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1243 |
+
"""
|
| 1244 |
+
pixel_values = pixel_values.type(self.visual.dtype)
|
| 1245 |
+
image_embeds, deepstack_image_embeds = self.visual(
|
| 1246 |
+
pixel_values, grid_thw=image_grid_thw
|
| 1247 |
+
)
|
| 1248 |
+
split_sizes = (
|
| 1249 |
+
image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2
|
| 1250 |
+
).tolist()
|
| 1251 |
+
image_embeds = torch.split(image_embeds, split_sizes)
|
| 1252 |
+
return image_embeds, deepstack_image_embeds
|
| 1253 |
+
|
| 1254 |
+
def get_placeholder_mask(
|
| 1255 |
+
self,
|
| 1256 |
+
input_ids: torch.LongTensor,
|
| 1257 |
+
inputs_embeds: torch.FloatTensor,
|
| 1258 |
+
image_features: Optional[torch.FloatTensor] = None,
|
| 1259 |
+
video_features: Optional[torch.FloatTensor] = None,
|
| 1260 |
+
):
|
| 1261 |
+
"""
|
| 1262 |
+
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
|
| 1263 |
+
equal to the length of multimodal features. If the lengths are different, an error is raised.
|
| 1264 |
+
"""
|
| 1265 |
+
if input_ids is None:
|
| 1266 |
+
special_image_mask = inputs_embeds == self.get_input_embeddings()(
|
| 1267 |
+
torch.tensor(
|
| 1268 |
+
self.config.image_token_id,
|
| 1269 |
+
dtype=torch.long,
|
| 1270 |
+
device=inputs_embeds.device,
|
| 1271 |
+
)
|
| 1272 |
+
)
|
| 1273 |
+
special_image_mask = special_image_mask.all(-1)
|
| 1274 |
+
special_video_mask = inputs_embeds == self.get_input_embeddings()(
|
| 1275 |
+
torch.tensor(
|
| 1276 |
+
self.config.video_token_id,
|
| 1277 |
+
dtype=torch.long,
|
| 1278 |
+
device=inputs_embeds.device,
|
| 1279 |
+
)
|
| 1280 |
+
)
|
| 1281 |
+
special_video_mask = special_video_mask.all(-1)
|
| 1282 |
+
else:
|
| 1283 |
+
special_image_mask = input_ids == self.config.image_token_id
|
| 1284 |
+
special_video_mask = input_ids == self.config.video_token_id
|
| 1285 |
+
|
| 1286 |
+
n_image_tokens = special_image_mask.sum()
|
| 1287 |
+
special_image_mask = (
|
| 1288 |
+
special_image_mask.unsqueeze(-1)
|
| 1289 |
+
.expand_as(inputs_embeds)
|
| 1290 |
+
.to(inputs_embeds.device)
|
| 1291 |
+
)
|
| 1292 |
+
if (
|
| 1293 |
+
image_features is not None
|
| 1294 |
+
and inputs_embeds[special_image_mask].numel() != image_features.numel()
|
| 1295 |
+
):
|
| 1296 |
+
raise ValueError(
|
| 1297 |
+
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
|
| 1298 |
+
)
|
| 1299 |
+
|
| 1300 |
+
n_video_tokens = special_video_mask.sum()
|
| 1301 |
+
special_video_mask = (
|
| 1302 |
+
special_video_mask.unsqueeze(-1)
|
| 1303 |
+
.expand_as(inputs_embeds)
|
| 1304 |
+
.to(inputs_embeds.device)
|
| 1305 |
+
)
|
| 1306 |
+
if (
|
| 1307 |
+
video_features is not None
|
| 1308 |
+
and inputs_embeds[special_video_mask].numel() != video_features.numel()
|
| 1309 |
+
):
|
| 1310 |
+
raise ValueError(
|
| 1311 |
+
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
|
| 1312 |
+
)
|
| 1313 |
+
|
| 1314 |
+
return special_image_mask, special_video_mask
|
| 1315 |
+
|
| 1316 |
+
@auto_docstring
|
| 1317 |
+
@check_model_inputs
|
| 1318 |
+
def forward(
|
| 1319 |
+
self,
|
| 1320 |
+
input_ids: torch.LongTensor = None,
|
| 1321 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1322 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1323 |
+
past_key_values: Optional[Cache] = None,
|
| 1324 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1325 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1326 |
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
| 1327 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1328 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1329 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1330 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1331 |
+
) -> Union[tuple, Qwen3VLModelOutputWithPast]:
|
| 1332 |
+
r"""
|
| 1333 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1334 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1335 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1336 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1337 |
+
"""
|
| 1338 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 1339 |
+
raise ValueError(
|
| 1340 |
+
"You must specify exactly one of input_ids or inputs_embeds"
|
| 1341 |
+
)
|
| 1342 |
+
|
| 1343 |
+
if inputs_embeds is None:
|
| 1344 |
+
inputs_embeds = self.get_input_embeddings()(input_ids)
|
| 1345 |
+
|
| 1346 |
+
image_mask = None
|
| 1347 |
+
video_mask = None
|
| 1348 |
+
|
| 1349 |
+
if pixel_values is not None:
|
| 1350 |
+
image_embeds, deepstack_image_embeds = self.get_image_features(
|
| 1351 |
+
pixel_values, image_grid_thw
|
| 1352 |
+
)
|
| 1353 |
+
image_embeds = torch.cat(image_embeds, dim=0).to(
|
| 1354 |
+
inputs_embeds.device, inputs_embeds.dtype
|
| 1355 |
+
)
|
| 1356 |
+
image_mask, _ = self.get_placeholder_mask(
|
| 1357 |
+
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
|
| 1358 |
+
)
|
| 1359 |
+
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
|
| 1360 |
+
|
| 1361 |
+
if pixel_values_videos is not None:
|
| 1362 |
+
video_embeds, deepstack_video_embeds = self.get_video_features(
|
| 1363 |
+
pixel_values_videos, video_grid_thw
|
| 1364 |
+
)
|
| 1365 |
+
video_embeds = torch.cat(video_embeds, dim=0).to(
|
| 1366 |
+
inputs_embeds.device, inputs_embeds.dtype
|
| 1367 |
+
)
|
| 1368 |
+
_, video_mask = self.get_placeholder_mask(
|
| 1369 |
+
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
|
| 1370 |
+
)
|
| 1371 |
+
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
|
| 1372 |
+
|
| 1373 |
+
visual_pos_masks = None
|
| 1374 |
+
deepstack_visual_embeds = None
|
| 1375 |
+
if image_mask is not None and video_mask is not None:
|
| 1376 |
+
# aggregate visual_pos_masks and deepstack_visual_embeds
|
| 1377 |
+
image_mask = image_mask[..., 0]
|
| 1378 |
+
video_mask = video_mask[..., 0]
|
| 1379 |
+
visual_pos_masks = image_mask | video_mask
|
| 1380 |
+
deepstack_visual_embeds = []
|
| 1381 |
+
image_mask_joint = image_mask[visual_pos_masks]
|
| 1382 |
+
video_mask_joint = video_mask[visual_pos_masks]
|
| 1383 |
+
for img_embed, vid_embed in zip(
|
| 1384 |
+
deepstack_image_embeds, deepstack_video_embeds
|
| 1385 |
+
):
|
| 1386 |
+
embed_joint = img_embed.new_zeros(
|
| 1387 |
+
visual_pos_masks.sum(), img_embed.shape[-1]
|
| 1388 |
+
).to(img_embed.device)
|
| 1389 |
+
embed_joint[image_mask_joint, :] = img_embed
|
| 1390 |
+
embed_joint[video_mask_joint, :] = vid_embed
|
| 1391 |
+
deepstack_visual_embeds.append(embed_joint)
|
| 1392 |
+
elif image_mask is not None:
|
| 1393 |
+
image_mask = image_mask[..., 0]
|
| 1394 |
+
visual_pos_masks = image_mask
|
| 1395 |
+
deepstack_visual_embeds = deepstack_image_embeds
|
| 1396 |
+
elif video_mask is not None:
|
| 1397 |
+
video_mask = video_mask[..., 0]
|
| 1398 |
+
visual_pos_masks = video_mask
|
| 1399 |
+
deepstack_visual_embeds = deepstack_video_embeds
|
| 1400 |
+
|
| 1401 |
+
if position_ids is None:
|
| 1402 |
+
attention_mask_tensor = (
|
| 1403 |
+
attention_mask
|
| 1404 |
+
if not isinstance(attention_mask, dict)
|
| 1405 |
+
else attention_mask["full_attention"]
|
| 1406 |
+
)
|
| 1407 |
+
if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
|
| 1408 |
+
attention_mask_tensor = torch.diagonal(
|
| 1409 |
+
attention_mask_tensor[:, 0], dim1=1, dim2=2
|
| 1410 |
+
)
|
| 1411 |
+
# Only apply conversion for floating point tensors (inverted masks)
|
| 1412 |
+
if attention_mask_tensor.dtype.is_floating_point:
|
| 1413 |
+
attention_mask_tensor = (
|
| 1414 |
+
attention_mask_tensor
|
| 1415 |
+
/ torch.finfo(attention_mask_tensor.dtype).min
|
| 1416 |
+
)
|
| 1417 |
+
attention_mask_tensor = (1.0 - attention_mask_tensor).int()
|
| 1418 |
+
|
| 1419 |
+
# Calculate RoPE index once per generation in the pre-fill stage only.
|
| 1420 |
+
# When compiling, we can't check tensor values thus we check only input length
|
| 1421 |
+
# It is safe to assume that `length!=1` means we're in pre-fill because compiled
|
| 1422 |
+
# models currently cannot do asssisted decoding
|
| 1423 |
+
prefill_compiled_stage = is_torchdynamo_compiling() and (
|
| 1424 |
+
(input_ids is not None and input_ids.shape[1] != 1)
|
| 1425 |
+
or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
|
| 1426 |
+
)
|
| 1427 |
+
prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
|
| 1428 |
+
(cache_position is not None and cache_position[0] == 0)
|
| 1429 |
+
or (past_key_values is None or past_key_values.get_seq_length() == 0)
|
| 1430 |
+
)
|
| 1431 |
+
if (
|
| 1432 |
+
prefill_compiled_stage or prefill_noncompiled_stage
|
| 1433 |
+
) or self.rope_deltas is None:
|
| 1434 |
+
position_ids, rope_deltas = self.get_rope_index(
|
| 1435 |
+
input_ids,
|
| 1436 |
+
image_grid_thw,
|
| 1437 |
+
video_grid_thw,
|
| 1438 |
+
attention_mask=attention_mask_tensor,
|
| 1439 |
+
)
|
| 1440 |
+
self.rope_deltas = rope_deltas
|
| 1441 |
+
# then use the prev pre-calculated rope-deltas to get the correct position ids
|
| 1442 |
+
else:
|
| 1443 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
| 1444 |
+
delta = (
|
| 1445 |
+
(cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
|
| 1446 |
+
if cache_position is not None
|
| 1447 |
+
else 0
|
| 1448 |
+
)
|
| 1449 |
+
position_ids = torch.arange(seq_length, device=inputs_embeds.device)
|
| 1450 |
+
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
|
| 1451 |
+
if cache_position is not None: # otherwise `deltas` is an int `0`
|
| 1452 |
+
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
|
| 1453 |
+
position_ids = position_ids.add(delta)
|
| 1454 |
+
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
|
| 1455 |
+
|
| 1456 |
+
outputs = self.language_model(
|
| 1457 |
+
input_ids=None,
|
| 1458 |
+
position_ids=position_ids,
|
| 1459 |
+
attention_mask=attention_mask,
|
| 1460 |
+
past_key_values=past_key_values,
|
| 1461 |
+
inputs_embeds=inputs_embeds,
|
| 1462 |
+
cache_position=cache_position,
|
| 1463 |
+
visual_pos_masks=visual_pos_masks,
|
| 1464 |
+
deepstack_visual_embeds=deepstack_visual_embeds,
|
| 1465 |
+
**kwargs,
|
| 1466 |
+
)
|
| 1467 |
+
|
| 1468 |
+
return Qwen3VLModelOutputWithPast(
|
| 1469 |
+
last_hidden_state=outputs.last_hidden_state,
|
| 1470 |
+
past_key_values=outputs.past_key_values,
|
| 1471 |
+
rope_deltas=self.rope_deltas,
|
| 1472 |
+
)
|
| 1473 |
+
|
| 1474 |
+
|
| 1475 |
+
@dataclass
|
| 1476 |
+
@auto_docstring(
|
| 1477 |
+
custom_intro="""
|
| 1478 |
+
Base class for Qwen3VL causal language model (or autoregressive) outputs.
|
| 1479 |
+
"""
|
| 1480 |
+
)
|
| 1481 |
+
class Qwen3VLCausalLMOutputWithPast(ModelOutput):
|
| 1482 |
+
r"""
|
| 1483 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 1484 |
+
Language modeling loss (for next-token prediction).
|
| 1485 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 1486 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 1487 |
+
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
| 1488 |
+
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
|
| 1489 |
+
|
| 1490 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
| 1491 |
+
`past_key_values` input) to speed up sequential decoding.
|
| 1492 |
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
| 1493 |
+
The rope index difference between sequence length and multimodal rope.
|
| 1494 |
+
"""
|
| 1495 |
+
|
| 1496 |
+
loss: Optional[torch.FloatTensor] = None
|
| 1497 |
+
logits: Optional[torch.FloatTensor] = None
|
| 1498 |
+
past_key_values: Optional[Cache] = None
|
| 1499 |
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
| 1500 |
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
| 1501 |
+
rope_deltas: Optional[torch.LongTensor] = None
|
| 1502 |
+
|
| 1503 |
+
|
| 1504 |
+
class Qwen3VLForConditionalGeneration(Qwen3VLPreTrainedModel, GenerationMixin):
|
| 1505 |
+
_checkpoint_conversion_mapping = {}
|
| 1506 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1507 |
+
# Reference: fix gemma3 grad acc #37208
|
| 1508 |
+
accepts_loss_kwargs = False
|
| 1509 |
+
config: Qwen3VLConfig
|
| 1510 |
+
|
| 1511 |
+
def __init__(self, config):
|
| 1512 |
+
super().__init__(config)
|
| 1513 |
+
self.model = Qwen3VLModel(config)
|
| 1514 |
+
self.lm_head = nn.Linear(
|
| 1515 |
+
config.text_config.hidden_size, config.text_config.vocab_size, bias=False
|
| 1516 |
+
)
|
| 1517 |
+
|
| 1518 |
+
self.post_init()
|
| 1519 |
+
|
| 1520 |
+
def get_input_embeddings(self):
|
| 1521 |
+
return self.model.get_input_embeddings()
|
| 1522 |
+
|
| 1523 |
+
def set_input_embeddings(self, value):
|
| 1524 |
+
self.model.set_input_embeddings(value)
|
| 1525 |
+
|
| 1526 |
+
def set_decoder(self, decoder):
|
| 1527 |
+
self.model.set_decoder(decoder)
|
| 1528 |
+
|
| 1529 |
+
def get_decoder(self):
|
| 1530 |
+
return self.model.get_decoder()
|
| 1531 |
+
|
| 1532 |
+
def get_video_features(
|
| 1533 |
+
self,
|
| 1534 |
+
pixel_values_videos: torch.FloatTensor,
|
| 1535 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1536 |
+
):
|
| 1537 |
+
return self.model.get_video_features(pixel_values_videos, video_grid_thw)
|
| 1538 |
+
|
| 1539 |
+
def get_image_features(
|
| 1540 |
+
self,
|
| 1541 |
+
pixel_values: torch.FloatTensor,
|
| 1542 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1543 |
+
):
|
| 1544 |
+
return self.model.get_image_features(pixel_values, image_grid_thw)
|
| 1545 |
+
|
| 1546 |
+
# Make modules available through conditional class for BC
|
| 1547 |
+
@property
|
| 1548 |
+
def language_model(self):
|
| 1549 |
+
return self.model.language_model
|
| 1550 |
+
|
| 1551 |
+
@property
|
| 1552 |
+
def visual(self):
|
| 1553 |
+
return self.model.visual
|
| 1554 |
+
|
| 1555 |
+
@check_model_inputs
|
| 1556 |
+
def forward(
|
| 1557 |
+
self,
|
| 1558 |
+
input_ids: torch.LongTensor = None,
|
| 1559 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1560 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1561 |
+
past_key_values: Optional[Cache] = None,
|
| 1562 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1563 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1564 |
+
pixel_values: Optional[torch.Tensor] = None,
|
| 1565 |
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
| 1566 |
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
| 1567 |
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
| 1568 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1569 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
| 1570 |
+
**kwargs: Unpack[TransformersKwargs],
|
| 1571 |
+
) -> Union[tuple, Qwen3VLCausalLMOutputWithPast]:
|
| 1572 |
+
r"""
|
| 1573 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1574 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 1575 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 1576 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 1577 |
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
| 1578 |
+
The temporal, height and width of feature shape of each image in LLM.
|
| 1579 |
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
| 1580 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 1581 |
+
"""
|
| 1582 |
+
outputs = self.model(
|
| 1583 |
+
input_ids=input_ids,
|
| 1584 |
+
pixel_values=pixel_values,
|
| 1585 |
+
pixel_values_videos=pixel_values_videos,
|
| 1586 |
+
image_grid_thw=image_grid_thw,
|
| 1587 |
+
video_grid_thw=video_grid_thw,
|
| 1588 |
+
position_ids=position_ids,
|
| 1589 |
+
attention_mask=attention_mask,
|
| 1590 |
+
past_key_values=past_key_values,
|
| 1591 |
+
inputs_embeds=inputs_embeds,
|
| 1592 |
+
cache_position=cache_position,
|
| 1593 |
+
**kwargs,
|
| 1594 |
+
)
|
| 1595 |
+
|
| 1596 |
+
hidden_states = outputs[0]
|
| 1597 |
+
|
| 1598 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 1599 |
+
slice_indices = (
|
| 1600 |
+
slice(-logits_to_keep, None)
|
| 1601 |
+
if isinstance(logits_to_keep, int)
|
| 1602 |
+
else logits_to_keep
|
| 1603 |
+
)
|
| 1604 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 1605 |
+
|
| 1606 |
+
loss = None
|
| 1607 |
+
if labels is not None:
|
| 1608 |
+
loss = self.loss_function(
|
| 1609 |
+
logits=logits,
|
| 1610 |
+
labels=labels,
|
| 1611 |
+
vocab_size=self.config.text_config.vocab_size,
|
| 1612 |
+
)
|
| 1613 |
+
|
| 1614 |
+
return Qwen3VLCausalLMOutputWithPast(
|
| 1615 |
+
loss=loss,
|
| 1616 |
+
logits=logits,
|
| 1617 |
+
past_key_values=outputs.past_key_values,
|
| 1618 |
+
rope_deltas=outputs.rope_deltas,
|
| 1619 |
+
)
|
| 1620 |
+
|
| 1621 |
+
def prepare_inputs_for_generation(
|
| 1622 |
+
self,
|
| 1623 |
+
input_ids,
|
| 1624 |
+
past_key_values=None,
|
| 1625 |
+
attention_mask=None,
|
| 1626 |
+
inputs_embeds=None,
|
| 1627 |
+
cache_position=None,
|
| 1628 |
+
position_ids=None,
|
| 1629 |
+
use_cache=True,
|
| 1630 |
+
pixel_values=None,
|
| 1631 |
+
pixel_values_videos=None,
|
| 1632 |
+
image_grid_thw=None,
|
| 1633 |
+
video_grid_thw=None,
|
| 1634 |
+
**kwargs,
|
| 1635 |
+
):
|
| 1636 |
+
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
|
| 1637 |
+
|
| 1638 |
+
model_inputs = super().prepare_inputs_for_generation(
|
| 1639 |
+
input_ids,
|
| 1640 |
+
past_key_values=past_key_values,
|
| 1641 |
+
attention_mask=attention_mask,
|
| 1642 |
+
inputs_embeds=inputs_embeds,
|
| 1643 |
+
cache_position=cache_position,
|
| 1644 |
+
position_ids=position_ids,
|
| 1645 |
+
pixel_values=pixel_values,
|
| 1646 |
+
pixel_values_videos=pixel_values_videos,
|
| 1647 |
+
image_grid_thw=image_grid_thw,
|
| 1648 |
+
video_grid_thw=video_grid_thw,
|
| 1649 |
+
use_cache=use_cache,
|
| 1650 |
+
**kwargs,
|
| 1651 |
+
)
|
| 1652 |
+
|
| 1653 |
+
# Qwen3VL position_ids are prepareed with rope_deltas in forward
|
| 1654 |
+
model_inputs["position_ids"] = None
|
| 1655 |
+
|
| 1656 |
+
if cache_position[0] != 0:
|
| 1657 |
+
model_inputs["pixel_values"] = None
|
| 1658 |
+
model_inputs["pixel_values_videos"] = None
|
| 1659 |
+
|
| 1660 |
+
return model_inputs
|
| 1661 |
+
|
| 1662 |
+
def _get_image_nums_and_video_nums(
|
| 1663 |
+
self,
|
| 1664 |
+
input_ids: Optional[torch.LongTensor],
|
| 1665 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1666 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 1667 |
+
"""
|
| 1668 |
+
Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
|
| 1669 |
+
These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
|
| 1670 |
+
|
| 1671 |
+
Args:
|
| 1672 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 1673 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1674 |
+
|
| 1675 |
+
Returns:
|
| 1676 |
+
image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
|
| 1677 |
+
video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
|
| 1678 |
+
"""
|
| 1679 |
+
image_token_id = self.config.image_token_id
|
| 1680 |
+
video_token_id = self.config.video_token_id
|
| 1681 |
+
vision_start_token_id = self.config.vision_start_token_id
|
| 1682 |
+
|
| 1683 |
+
if inputs_embeds is not None:
|
| 1684 |
+
vision_start_mask = (
|
| 1685 |
+
inputs_embeds
|
| 1686 |
+
== self.get_input_embeddings()(
|
| 1687 |
+
torch.tensor(
|
| 1688 |
+
vision_start_token_id,
|
| 1689 |
+
dtype=torch.long,
|
| 1690 |
+
device=inputs_embeds.device,
|
| 1691 |
+
)
|
| 1692 |
+
)
|
| 1693 |
+
)[..., 0]
|
| 1694 |
+
image_mask = (
|
| 1695 |
+
inputs_embeds
|
| 1696 |
+
== self.get_input_embeddings()(
|
| 1697 |
+
torch.tensor(
|
| 1698 |
+
image_token_id, dtype=torch.long, device=inputs_embeds.device
|
| 1699 |
+
)
|
| 1700 |
+
)
|
| 1701 |
+
)[..., 0]
|
| 1702 |
+
video_mask = (
|
| 1703 |
+
inputs_embeds
|
| 1704 |
+
== self.get_input_embeddings()(
|
| 1705 |
+
torch.tensor(
|
| 1706 |
+
video_token_id, dtype=torch.long, device=inputs_embeds.device
|
| 1707 |
+
)
|
| 1708 |
+
)
|
| 1709 |
+
)[..., 0]
|
| 1710 |
+
else:
|
| 1711 |
+
vision_start_mask = input_ids == vision_start_token_id
|
| 1712 |
+
image_mask = input_ids == image_token_id
|
| 1713 |
+
video_mask = input_ids == video_token_id
|
| 1714 |
+
|
| 1715 |
+
vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
|
| 1716 |
+
image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
|
| 1717 |
+
video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
|
| 1718 |
+
|
| 1719 |
+
return image_nums, video_nums
|
| 1720 |
+
|
| 1721 |
+
def _expand_inputs_for_generation(
|
| 1722 |
+
self,
|
| 1723 |
+
expand_size: int = 1,
|
| 1724 |
+
is_encoder_decoder: bool = False,
|
| 1725 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1726 |
+
**model_kwargs,
|
| 1727 |
+
) -> tuple[torch.LongTensor, dict[str, Any]]:
|
| 1728 |
+
# Overwritten -- Support for expanding tensors without a batch size dimension
|
| 1729 |
+
# e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
|
| 1730 |
+
# pixel_values.shape[0] is sum(seqlen_images for samples)
|
| 1731 |
+
# image_grid_thw.shape[0] is sum(num_images for samples)
|
| 1732 |
+
|
| 1733 |
+
if expand_size == 1:
|
| 1734 |
+
return input_ids, model_kwargs
|
| 1735 |
+
|
| 1736 |
+
visual_keys = [
|
| 1737 |
+
"pixel_values",
|
| 1738 |
+
"image_grid_thw",
|
| 1739 |
+
"pixel_values_videos",
|
| 1740 |
+
"video_grid_thw",
|
| 1741 |
+
]
|
| 1742 |
+
|
| 1743 |
+
def _expand_dict_for_generation_visual(dict_to_expand):
|
| 1744 |
+
image_grid_thw = model_kwargs.get("image_grid_thw", None)
|
| 1745 |
+
video_grid_thw = model_kwargs.get("video_grid_thw", None)
|
| 1746 |
+
image_nums, video_nums = self._get_image_nums_and_video_nums(
|
| 1747 |
+
input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
|
| 1748 |
+
)
|
| 1749 |
+
|
| 1750 |
+
def _repeat_interleave_samples(x, lengths, repeat_times):
|
| 1751 |
+
samples = torch.split(x, lengths)
|
| 1752 |
+
repeat_args = [repeat_times] + [1] * (x.dim() - 1)
|
| 1753 |
+
result = torch.cat(
|
| 1754 |
+
[sample.repeat(*repeat_args) for sample in samples], dim=0
|
| 1755 |
+
)
|
| 1756 |
+
return result
|
| 1757 |
+
|
| 1758 |
+
for key in dict_to_expand:
|
| 1759 |
+
if key == "pixel_values":
|
| 1760 |
+
# split images into samples
|
| 1761 |
+
samples = torch.split(image_grid_thw, list(image_nums))
|
| 1762 |
+
# compute the sequence length of images for each sample
|
| 1763 |
+
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
|
| 1764 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1765 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1766 |
+
)
|
| 1767 |
+
elif key == "image_grid_thw":
|
| 1768 |
+
# get the num of images for each sample
|
| 1769 |
+
lengths = list(image_nums)
|
| 1770 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1771 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1772 |
+
)
|
| 1773 |
+
elif key == "pixel_values_videos":
|
| 1774 |
+
samples = torch.split(video_grid_thw, list(video_nums))
|
| 1775 |
+
lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
|
| 1776 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1777 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1778 |
+
)
|
| 1779 |
+
elif key == "video_grid_thw":
|
| 1780 |
+
lengths = list(video_nums)
|
| 1781 |
+
dict_to_expand[key] = _repeat_interleave_samples(
|
| 1782 |
+
dict_to_expand[key], lengths=lengths, repeat_times=expand_size
|
| 1783 |
+
)
|
| 1784 |
+
return dict_to_expand
|
| 1785 |
+
|
| 1786 |
+
def _expand_dict_for_generation(dict_to_expand):
|
| 1787 |
+
for key in dict_to_expand:
|
| 1788 |
+
if (
|
| 1789 |
+
key != "cache_position"
|
| 1790 |
+
and dict_to_expand[key] is not None
|
| 1791 |
+
and isinstance(dict_to_expand[key], torch.Tensor)
|
| 1792 |
+
and key not in visual_keys
|
| 1793 |
+
):
|
| 1794 |
+
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(
|
| 1795 |
+
expand_size, dim=0
|
| 1796 |
+
)
|
| 1797 |
+
return dict_to_expand
|
| 1798 |
+
|
| 1799 |
+
model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
|
| 1800 |
+
|
| 1801 |
+
if input_ids is not None:
|
| 1802 |
+
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
|
| 1803 |
+
|
| 1804 |
+
model_kwargs = _expand_dict_for_generation(model_kwargs)
|
| 1805 |
+
|
| 1806 |
+
if is_encoder_decoder:
|
| 1807 |
+
if model_kwargs.get("encoder_outputs") is None:
|
| 1808 |
+
raise ValueError(
|
| 1809 |
+
"If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined."
|
| 1810 |
+
)
|
| 1811 |
+
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(
|
| 1812 |
+
model_kwargs["encoder_outputs"]
|
| 1813 |
+
)
|
| 1814 |
+
|
| 1815 |
+
return input_ids, model_kwargs
|
| 1816 |
+
|
| 1817 |
+
|
| 1818 |
+
__all__ = [
|
| 1819 |
+
"Qwen3VLVisionModel",
|
| 1820 |
+
"Qwen3VLForConditionalGeneration",
|
| 1821 |
+
"Qwen3VLModel",
|
| 1822 |
+
"Qwen3VLPreTrainedModel",
|
| 1823 |
+
"Qwen3VLTextModel",
|
| 1824 |
+
]
|
videoauto_r1/qwen_vl_utils/vision_process.py
ADDED
|
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import copy
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import warnings
|
| 9 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 10 |
+
from functools import lru_cache
|
| 11 |
+
from io import BytesIO
|
| 12 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
|
| 16 |
+
import requests
|
| 17 |
+
import torch
|
| 18 |
+
import torchvision
|
| 19 |
+
from packaging import version
|
| 20 |
+
from PIL import Image
|
| 21 |
+
from torchvision import io, transforms
|
| 22 |
+
from torchvision.transforms import InterpolationMode
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
MAX_RATIO = 200
|
| 26 |
+
SPATIAL_MERGE_SIZE = 2
|
| 27 |
+
IMAGE_MIN_TOKEN_NUM = 4
|
| 28 |
+
IMAGE_MAX_TOKEN_NUM = 16384
|
| 29 |
+
VIDEO_MIN_TOKEN_NUM = 128
|
| 30 |
+
VIDEO_MAX_TOKEN_NUM = 768
|
| 31 |
+
|
| 32 |
+
FPS = 2.0
|
| 33 |
+
FRAME_FACTOR = 2
|
| 34 |
+
FPS_MIN_FRAMES = 4
|
| 35 |
+
FPS_MAX_FRAMES = 768
|
| 36 |
+
MAX_NUM_WORKERS_FETCH_VIDEO = 8
|
| 37 |
+
|
| 38 |
+
MODEL_SEQ_LEN = int(float(os.environ.get("MODEL_SEQ_LEN", 128000)))
|
| 39 |
+
logger = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def round_by_factor(number: int, factor: int) -> int:
|
| 43 |
+
"""Returns the closest integer to 'number' that is divisible by 'factor'."""
|
| 44 |
+
return round(number / factor) * factor
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def ceil_by_factor(number: int, factor: int) -> int:
|
| 48 |
+
"""Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'."""
|
| 49 |
+
return math.ceil(number / factor) * factor
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def floor_by_factor(number: int, factor: int) -> int:
|
| 53 |
+
"""Returns the largest integer less than or equal to 'number' that is divisible by 'factor'."""
|
| 54 |
+
return math.floor(number / factor) * factor
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def smart_resize(
|
| 58 |
+
height: int,
|
| 59 |
+
width: int,
|
| 60 |
+
factor: int,
|
| 61 |
+
min_pixels: Optional[int] = None,
|
| 62 |
+
max_pixels: Optional[int] = None,
|
| 63 |
+
) -> Tuple[int, int]:
|
| 64 |
+
"""
|
| 65 |
+
Rescales the image so that the following conditions are met:
|
| 66 |
+
|
| 67 |
+
1. Both dimensions (height and width) are divisible by 'factor'.
|
| 68 |
+
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
|
| 69 |
+
3. The aspect ratio of the image is maintained as closely as possible.
|
| 70 |
+
"""
|
| 71 |
+
max_pixels = (
|
| 72 |
+
max_pixels if max_pixels is not None else (IMAGE_MAX_TOKEN_NUM * factor**2)
|
| 73 |
+
)
|
| 74 |
+
min_pixels = (
|
| 75 |
+
min_pixels if min_pixels is not None else (IMAGE_MIN_TOKEN_NUM * factor**2)
|
| 76 |
+
)
|
| 77 |
+
assert (
|
| 78 |
+
max_pixels >= min_pixels
|
| 79 |
+
), "The max_pixels of image must be greater than or equal to min_pixels."
|
| 80 |
+
if max(height, width) / min(height, width) > MAX_RATIO:
|
| 81 |
+
raise ValueError(
|
| 82 |
+
f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}"
|
| 83 |
+
)
|
| 84 |
+
h_bar = max(factor, round_by_factor(height, factor))
|
| 85 |
+
w_bar = max(factor, round_by_factor(width, factor))
|
| 86 |
+
if h_bar * w_bar > max_pixels:
|
| 87 |
+
beta = math.sqrt((height * width) / max_pixels)
|
| 88 |
+
h_bar = floor_by_factor(height / beta, factor)
|
| 89 |
+
w_bar = floor_by_factor(width / beta, factor)
|
| 90 |
+
elif h_bar * w_bar < min_pixels:
|
| 91 |
+
beta = math.sqrt(min_pixels / (height * width))
|
| 92 |
+
h_bar = ceil_by_factor(height * beta, factor)
|
| 93 |
+
w_bar = ceil_by_factor(width * beta, factor)
|
| 94 |
+
return h_bar, w_bar
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def to_rgb(pil_image: Image.Image) -> Image.Image:
|
| 98 |
+
if pil_image.mode == "RGBA":
|
| 99 |
+
white_background = Image.new("RGB", pil_image.size, (255, 255, 255))
|
| 100 |
+
white_background.paste(
|
| 101 |
+
pil_image, mask=pil_image.split()[3]
|
| 102 |
+
) # Use alpha channel as mask
|
| 103 |
+
return white_background
|
| 104 |
+
else:
|
| 105 |
+
return pil_image.convert("RGB")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def fetch_image(
|
| 109 |
+
ele: Dict[str, Union[str, Image.Image]], image_patch_size: int = 14
|
| 110 |
+
) -> Image.Image:
|
| 111 |
+
if "image" in ele:
|
| 112 |
+
image = ele["image"]
|
| 113 |
+
else:
|
| 114 |
+
image = ele["image_url"]
|
| 115 |
+
|
| 116 |
+
image_obj = None
|
| 117 |
+
patch_factor = int(image_patch_size * SPATIAL_MERGE_SIZE)
|
| 118 |
+
if isinstance(image, Image.Image):
|
| 119 |
+
image_obj = image
|
| 120 |
+
elif isinstance(image, dict):
|
| 121 |
+
image_obj = Image.open(BytesIO(image["bytes"]))
|
| 122 |
+
elif image.startswith("http://") or image.startswith("https://"):
|
| 123 |
+
with requests.get(image, stream=True) as response:
|
| 124 |
+
response.raise_for_status()
|
| 125 |
+
with BytesIO(response.content) as bio:
|
| 126 |
+
image_obj = copy.deepcopy(Image.open(bio))
|
| 127 |
+
elif image.startswith("file://"):
|
| 128 |
+
image_obj = Image.open(image[7:])
|
| 129 |
+
elif image.startswith("data:image"):
|
| 130 |
+
if "base64," in image:
|
| 131 |
+
_, base64_data = image.split("base64,", 1)
|
| 132 |
+
data = base64.b64decode(base64_data)
|
| 133 |
+
with BytesIO(data) as bio:
|
| 134 |
+
image_obj = copy.deepcopy(Image.open(bio))
|
| 135 |
+
else:
|
| 136 |
+
image_obj = Image.open(image)
|
| 137 |
+
if image_obj is None:
|
| 138 |
+
raise ValueError(
|
| 139 |
+
f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}"
|
| 140 |
+
)
|
| 141 |
+
image = to_rgb(image_obj)
|
| 142 |
+
|
| 143 |
+
## resize
|
| 144 |
+
if "resized_height" in ele and "resized_width" in ele:
|
| 145 |
+
resized_height, resized_width = smart_resize(
|
| 146 |
+
ele["resized_height"],
|
| 147 |
+
ele["resized_width"],
|
| 148 |
+
factor=patch_factor,
|
| 149 |
+
)
|
| 150 |
+
else:
|
| 151 |
+
width, height = image.size
|
| 152 |
+
min_pixels = ele.get("min_pixels", IMAGE_MIN_TOKEN_NUM * patch_factor**2)
|
| 153 |
+
max_pixels = ele.get("max_pixels", IMAGE_MAX_TOKEN_NUM * patch_factor**2)
|
| 154 |
+
resized_height, resized_width = smart_resize(
|
| 155 |
+
height,
|
| 156 |
+
width,
|
| 157 |
+
factor=patch_factor,
|
| 158 |
+
min_pixels=min_pixels,
|
| 159 |
+
max_pixels=max_pixels,
|
| 160 |
+
)
|
| 161 |
+
image = image.resize((resized_width, resized_height))
|
| 162 |
+
print(
|
| 163 |
+
f"token per image: {int(resized_height * resized_width / patch_factor**2)}",
|
| 164 |
+
file=sys.stderr,
|
| 165 |
+
)
|
| 166 |
+
return image
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def smart_nframes(
|
| 170 |
+
ele: Dict[str, Any],
|
| 171 |
+
total_frames: int,
|
| 172 |
+
video_fps: Union[int, float],
|
| 173 |
+
) -> int:
|
| 174 |
+
"""calculate the number of frames for video used for model inputs.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
ele (dict): a dict contains the configuration of video.
|
| 178 |
+
support either `fps` or `nframes`:
|
| 179 |
+
- nframes: the number of frames to extract for model inputs.
|
| 180 |
+
- fps: the fps to extract frames for model inputs.
|
| 181 |
+
- min_frames: the minimum number of frames of the video, only used when fps is provided.
|
| 182 |
+
- max_frames: the maximum number of frames of the video, only used when fps is provided.
|
| 183 |
+
total_frames (int): the original total number of frames of the video.
|
| 184 |
+
video_fps (int | float): the original fps of the video.
|
| 185 |
+
|
| 186 |
+
Raises:
|
| 187 |
+
ValueError: nframes should in interval [FRAME_FACTOR, total_frames].
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
int: the number of frames for video used for model inputs.
|
| 191 |
+
"""
|
| 192 |
+
assert not (
|
| 193 |
+
"fps" in ele and "nframes" in ele
|
| 194 |
+
), "Only accept either `fps` or `nframes`"
|
| 195 |
+
if "nframes" in ele:
|
| 196 |
+
nframes = round_by_factor(ele["nframes"], FRAME_FACTOR)
|
| 197 |
+
else:
|
| 198 |
+
fps = ele.get("fps", FPS)
|
| 199 |
+
min_frames = ceil_by_factor(ele.get("min_frames", FPS_MIN_FRAMES), FRAME_FACTOR)
|
| 200 |
+
max_frames = floor_by_factor(
|
| 201 |
+
ele.get("max_frames", min(FPS_MAX_FRAMES, total_frames)), FRAME_FACTOR
|
| 202 |
+
)
|
| 203 |
+
nframes = total_frames / video_fps * fps
|
| 204 |
+
if nframes > total_frames:
|
| 205 |
+
logger.warning(
|
| 206 |
+
f"smart_nframes: nframes[{nframes}] > total_frames[{total_frames}]"
|
| 207 |
+
)
|
| 208 |
+
nframes = min(min(max(nframes, min_frames), max_frames), total_frames)
|
| 209 |
+
nframes = floor_by_factor(nframes, FRAME_FACTOR)
|
| 210 |
+
if not (FRAME_FACTOR <= nframes and nframes <= total_frames):
|
| 211 |
+
raise ValueError(
|
| 212 |
+
f"nframes should in interval [{FRAME_FACTOR}, {total_frames}], but got {nframes}."
|
| 213 |
+
)
|
| 214 |
+
return nframes
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _read_video_torchvision(
|
| 218 |
+
ele: Dict[str, Any],
|
| 219 |
+
) -> Tuple[torch.Tensor, float]:
|
| 220 |
+
"""read video using torchvision.io.read_video
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
ele (dict): a dict contains the configuration of video.
|
| 224 |
+
support keys:
|
| 225 |
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
| 226 |
+
- video_start: the start time of video.
|
| 227 |
+
- video_end: the end time of video.
|
| 228 |
+
Returns:
|
| 229 |
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
| 230 |
+
"""
|
| 231 |
+
video_path = ele["video"]
|
| 232 |
+
if version.parse(torchvision.__version__) < version.parse("0.19.0"):
|
| 233 |
+
if "http://" in video_path or "https://" in video_path:
|
| 234 |
+
warnings.warn(
|
| 235 |
+
"torchvision < 0.19.0 does not support http/https video path, please upgrade to 0.19.0."
|
| 236 |
+
)
|
| 237 |
+
if "file://" in video_path:
|
| 238 |
+
video_path = video_path[7:]
|
| 239 |
+
st = time.time()
|
| 240 |
+
video, audio, info = io.read_video(
|
| 241 |
+
video_path,
|
| 242 |
+
start_pts=ele.get("video_start", 0.0),
|
| 243 |
+
end_pts=ele.get("video_end", None),
|
| 244 |
+
pts_unit="sec",
|
| 245 |
+
output_format="TCHW",
|
| 246 |
+
)
|
| 247 |
+
total_frames, video_fps = video.size(0), info["video_fps"]
|
| 248 |
+
logger.info(
|
| 249 |
+
f"torchvision: {video_path=}, {total_frames=}, {video_fps=}, load time={time.time() - st:.3f}s"
|
| 250 |
+
)
|
| 251 |
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
| 252 |
+
idx = torch.linspace(0, total_frames - 1, nframes).round().long()
|
| 253 |
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
| 254 |
+
video = video[idx]
|
| 255 |
+
|
| 256 |
+
video_metadata = dict(
|
| 257 |
+
fps=video_fps,
|
| 258 |
+
frames_indices=idx,
|
| 259 |
+
total_num_frames=total_frames,
|
| 260 |
+
video_backend="torchvision",
|
| 261 |
+
)
|
| 262 |
+
return video, video_metadata, sample_fps
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def is_decord_available() -> bool:
|
| 266 |
+
import importlib.util
|
| 267 |
+
|
| 268 |
+
return importlib.util.find_spec("decord") is not None
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def calculate_video_frame_range(
|
| 272 |
+
ele: Dict[str, Any],
|
| 273 |
+
total_frames: int,
|
| 274 |
+
video_fps: float,
|
| 275 |
+
) -> Tuple[int, int, int]:
|
| 276 |
+
"""
|
| 277 |
+
Calculate the start and end frame indices based on the given time range.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
ele (dict): A dictionary containing optional 'video_start' and 'video_end' keys (in seconds).
|
| 281 |
+
total_frames (int): Total number of frames in the video.
|
| 282 |
+
video_fps (float): Frames per second of the video.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
tuple: A tuple containing (start_frame, end_frame, frame_count).
|
| 286 |
+
|
| 287 |
+
Raises:
|
| 288 |
+
ValueError: If input parameters are invalid or the time range is inconsistent.
|
| 289 |
+
"""
|
| 290 |
+
# Validate essential parameters
|
| 291 |
+
if video_fps <= 0:
|
| 292 |
+
raise ValueError("video_fps must be a positive number")
|
| 293 |
+
if total_frames <= 0:
|
| 294 |
+
raise ValueError("total_frames must be a positive integer")
|
| 295 |
+
|
| 296 |
+
# Get start and end time in seconds
|
| 297 |
+
video_start = ele.get("video_start", None)
|
| 298 |
+
video_end = ele.get("video_end", None)
|
| 299 |
+
if video_start is None and video_end is None:
|
| 300 |
+
return 0, total_frames - 1, total_frames
|
| 301 |
+
|
| 302 |
+
max_duration = total_frames / video_fps
|
| 303 |
+
# Process start frame
|
| 304 |
+
if video_start is not None:
|
| 305 |
+
video_start_clamped = max(0.0, min(video_start, max_duration))
|
| 306 |
+
start_frame = math.ceil(video_start_clamped * video_fps)
|
| 307 |
+
else:
|
| 308 |
+
start_frame = 0
|
| 309 |
+
# Process end frame
|
| 310 |
+
if video_end is not None:
|
| 311 |
+
video_end_clamped = max(0.0, min(video_end, max_duration))
|
| 312 |
+
end_frame = math.floor(video_end_clamped * video_fps)
|
| 313 |
+
end_frame = min(end_frame, total_frames - 1)
|
| 314 |
+
else:
|
| 315 |
+
end_frame = total_frames - 1
|
| 316 |
+
|
| 317 |
+
# Validate frame order
|
| 318 |
+
if start_frame >= end_frame:
|
| 319 |
+
raise ValueError(
|
| 320 |
+
f"Invalid time range: Start frame {start_frame} (at {video_start_clamped if video_start is not None else 0}s) "
|
| 321 |
+
f"exceeds end frame {end_frame} (at {video_end_clamped if video_end is not None else max_duration}s). "
|
| 322 |
+
f"Video duration: {max_duration:.2f}s ({total_frames} frames @ {video_fps}fps)"
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
logger.info(
|
| 326 |
+
f"calculate video frame range: {start_frame=}, {end_frame=}, {total_frames=} from {video_start=}, {video_end=}, {video_fps=:.3f}"
|
| 327 |
+
)
|
| 328 |
+
return start_frame, end_frame, end_frame - start_frame + 1
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _read_video_decord(
|
| 332 |
+
ele: Dict[str, Any],
|
| 333 |
+
) -> Tuple[torch.Tensor, float]:
|
| 334 |
+
"""read video using decord.VideoReader
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
ele (dict): a dict contains the configuration of video.
|
| 338 |
+
support keys:
|
| 339 |
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
| 340 |
+
- video_start: the start time of video.
|
| 341 |
+
- video_end: the end time of video.
|
| 342 |
+
Returns:
|
| 343 |
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
| 344 |
+
"""
|
| 345 |
+
import decord
|
| 346 |
+
|
| 347 |
+
video_path = ele["video"]
|
| 348 |
+
st = time.time()
|
| 349 |
+
vr = decord.VideoReader(video_path)
|
| 350 |
+
total_frames, video_fps = len(vr), vr.get_avg_fps()
|
| 351 |
+
start_frame, end_frame, total_frames = calculate_video_frame_range(
|
| 352 |
+
ele,
|
| 353 |
+
total_frames,
|
| 354 |
+
video_fps,
|
| 355 |
+
)
|
| 356 |
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
| 357 |
+
idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
|
| 358 |
+
video = vr.get_batch(idx).asnumpy()
|
| 359 |
+
video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
|
| 360 |
+
logger.info(
|
| 361 |
+
f"decord: {video_path=}, {total_frames=}, {video_fps=}, load time={time.time() - st:.3f}s"
|
| 362 |
+
)
|
| 363 |
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
| 364 |
+
|
| 365 |
+
video_metadata = dict(
|
| 366 |
+
fps=video_fps,
|
| 367 |
+
frames_indices=idx,
|
| 368 |
+
total_num_frames=total_frames,
|
| 369 |
+
video_backend="decord",
|
| 370 |
+
)
|
| 371 |
+
return video, video_metadata, sample_fps
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def is_torchcodec_available() -> bool:
|
| 375 |
+
import importlib.util
|
| 376 |
+
|
| 377 |
+
return importlib.util.find_spec("torchcodec") is not None
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def _read_video_torchcodec(
|
| 381 |
+
ele: Dict[str, Any],
|
| 382 |
+
) -> Tuple[torch.Tensor, float]:
|
| 383 |
+
"""read video using torchcodec.decoders.VideoDecoder
|
| 384 |
+
|
| 385 |
+
Args:
|
| 386 |
+
ele (dict): a dict contains the configuration of video.
|
| 387 |
+
support keys:
|
| 388 |
+
- video: the path of video. support "file://", "http://", "https://" and local path.
|
| 389 |
+
- video_start: the start time of video.
|
| 390 |
+
- video_end: the end time of video.
|
| 391 |
+
Returns:
|
| 392 |
+
torch.Tensor: the video tensor with shape (T, C, H, W).
|
| 393 |
+
"""
|
| 394 |
+
from torchcodec.decoders import VideoDecoder
|
| 395 |
+
|
| 396 |
+
TORCHCODEC_NUM_THREADS = int(os.environ.get("TORCHCODEC_NUM_THREADS", 8))
|
| 397 |
+
logger.info(f"set TORCHCODEC_NUM_THREADS: {TORCHCODEC_NUM_THREADS}")
|
| 398 |
+
video_path = ele["video"]
|
| 399 |
+
st = time.time()
|
| 400 |
+
decoder = VideoDecoder(video_path, num_ffmpeg_threads=TORCHCODEC_NUM_THREADS)
|
| 401 |
+
video_fps = decoder.metadata.average_fps
|
| 402 |
+
total_frames = decoder.metadata.num_frames
|
| 403 |
+
start_frame, end_frame, total_frames = calculate_video_frame_range(
|
| 404 |
+
ele,
|
| 405 |
+
total_frames,
|
| 406 |
+
video_fps,
|
| 407 |
+
)
|
| 408 |
+
nframes = smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
|
| 409 |
+
idx = torch.linspace(start_frame, end_frame, nframes).round().long().tolist()
|
| 410 |
+
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
|
| 411 |
+
video = decoder.get_frames_at(indices=idx).data
|
| 412 |
+
logger.info(
|
| 413 |
+
f"torchcodec: {video_path=}, {total_frames=}, {video_fps=}, load time={time.time() - st:.3f}s"
|
| 414 |
+
)
|
| 415 |
+
print(
|
| 416 |
+
f"video duration: {total_frames / video_fps:.2f}s, sampled {nframes} frames",
|
| 417 |
+
file=sys.stderr,
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
video_metadata = dict(
|
| 421 |
+
fps=video_fps,
|
| 422 |
+
frames_indices=idx,
|
| 423 |
+
total_num_frames=total_frames,
|
| 424 |
+
video_backend="torchcodec",
|
| 425 |
+
)
|
| 426 |
+
return video, video_metadata, sample_fps
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
VIDEO_READER_BACKENDS = {
|
| 430 |
+
"decord": _read_video_decord,
|
| 431 |
+
"torchvision": _read_video_torchvision,
|
| 432 |
+
"torchcodec": _read_video_torchcodec,
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
FORCE_QWENVL_VIDEO_READER = os.getenv("FORCE_QWENVL_VIDEO_READER", None)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
@lru_cache(maxsize=1)
|
| 439 |
+
def get_video_reader_backend() -> str:
|
| 440 |
+
if FORCE_QWENVL_VIDEO_READER is not None:
|
| 441 |
+
video_reader_backend = FORCE_QWENVL_VIDEO_READER
|
| 442 |
+
elif is_torchcodec_available():
|
| 443 |
+
video_reader_backend = "torchcodec"
|
| 444 |
+
elif is_decord_available():
|
| 445 |
+
video_reader_backend = "decord"
|
| 446 |
+
else:
|
| 447 |
+
video_reader_backend = "torchvision"
|
| 448 |
+
print(f"qwen-vl-utils using {video_reader_backend} to read video.", file=sys.stderr)
|
| 449 |
+
return video_reader_backend
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def fetch_video(
|
| 453 |
+
ele: Dict[str, Any],
|
| 454 |
+
image_patch_size: int = 14,
|
| 455 |
+
return_video_sample_fps: bool = False,
|
| 456 |
+
return_video_metadata: bool = False,
|
| 457 |
+
) -> Union[torch.Tensor, List[Image.Image]]:
|
| 458 |
+
image_factor = image_patch_size * SPATIAL_MERGE_SIZE
|
| 459 |
+
VIDEO_FRAME_MIN_PIXELS = VIDEO_MIN_TOKEN_NUM * image_factor * image_factor
|
| 460 |
+
VIDEO_FRAME_MAX_PIXELS = VIDEO_MAX_TOKEN_NUM * image_factor * image_factor
|
| 461 |
+
if isinstance(ele["video"], str):
|
| 462 |
+
video_reader_backend = get_video_reader_backend()
|
| 463 |
+
try:
|
| 464 |
+
video, video_metadata, sample_fps = VIDEO_READER_BACKENDS[
|
| 465 |
+
video_reader_backend
|
| 466 |
+
](ele)
|
| 467 |
+
except Exception as e:
|
| 468 |
+
logger.warning(
|
| 469 |
+
f"video_reader_backend {video_reader_backend} error, use torchvision as default, msg: {e}"
|
| 470 |
+
)
|
| 471 |
+
video, video_metadata, sample_fps = VIDEO_READER_BACKENDS["torchvision"](
|
| 472 |
+
ele
|
| 473 |
+
)
|
| 474 |
+
else:
|
| 475 |
+
# The input is a list of frames
|
| 476 |
+
assert isinstance(ele["video"], (list, tuple))
|
| 477 |
+
process_info = ele.copy()
|
| 478 |
+
process_info.pop("type", None)
|
| 479 |
+
process_info.pop("video", None)
|
| 480 |
+
# use ThreadPoolExecutor to parallel process frames
|
| 481 |
+
max_workers = min(MAX_NUM_WORKERS_FETCH_VIDEO, len(ele["video"]))
|
| 482 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
| 483 |
+
futures = [
|
| 484 |
+
executor.submit(
|
| 485 |
+
fetch_image, {"image": video_element, **process_info}, image_factor
|
| 486 |
+
)
|
| 487 |
+
for video_element in ele["video"]
|
| 488 |
+
]
|
| 489 |
+
image_list = [future.result() for future in futures]
|
| 490 |
+
|
| 491 |
+
nframes = ceil_by_factor(len(image_list), FRAME_FACTOR)
|
| 492 |
+
if len(image_list) < nframes:
|
| 493 |
+
image_list.extend([image_list[-1]] * (nframes - len(image_list)))
|
| 494 |
+
|
| 495 |
+
sample_fps = ele.get("sample_fps", 2.0)
|
| 496 |
+
video = torch.stack(
|
| 497 |
+
[
|
| 498 |
+
torch.from_numpy(np.array(image).transpose(2, 0, 1))
|
| 499 |
+
for image in image_list
|
| 500 |
+
]
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
# fake video metadata
|
| 504 |
+
raw_fps = process_info.pop("raw_fps", sample_fps)
|
| 505 |
+
video_metadata = dict(
|
| 506 |
+
fps=raw_fps,
|
| 507 |
+
frames_indices=[i for i in range(len(video))],
|
| 508 |
+
total_num_frames=(nframes / sample_fps) * raw_fps,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
nframes, _, height, width = video.shape
|
| 512 |
+
min_pixels = ele.get("min_pixels", VIDEO_FRAME_MIN_PIXELS)
|
| 513 |
+
total_pixels = ele.get(
|
| 514 |
+
"total_pixels", MODEL_SEQ_LEN * image_factor * image_factor * 0.9
|
| 515 |
+
)
|
| 516 |
+
max_pixels = max(
|
| 517 |
+
min(VIDEO_FRAME_MAX_PIXELS, total_pixels / nframes * FRAME_FACTOR),
|
| 518 |
+
int(min_pixels * 1.05),
|
| 519 |
+
)
|
| 520 |
+
max_pixels_supposed = ele.get("max_pixels", max_pixels)
|
| 521 |
+
if max_pixels_supposed > max_pixels:
|
| 522 |
+
logger.warning(
|
| 523 |
+
f"The given max_pixels[{max_pixels_supposed}] exceeds limit[{max_pixels}]."
|
| 524 |
+
)
|
| 525 |
+
max_pixels = min(max_pixels_supposed, max_pixels)
|
| 526 |
+
if "resized_height" in ele and "resized_width" in ele:
|
| 527 |
+
resized_height, resized_width = smart_resize(
|
| 528 |
+
ele["resized_height"],
|
| 529 |
+
ele["resized_width"],
|
| 530 |
+
factor=image_factor,
|
| 531 |
+
)
|
| 532 |
+
else:
|
| 533 |
+
resized_height, resized_width = smart_resize(
|
| 534 |
+
height,
|
| 535 |
+
width,
|
| 536 |
+
factor=image_factor,
|
| 537 |
+
min_pixels=min_pixels,
|
| 538 |
+
max_pixels=max_pixels,
|
| 539 |
+
)
|
| 540 |
+
video = transforms.functional.resize(
|
| 541 |
+
video,
|
| 542 |
+
[resized_height, resized_width],
|
| 543 |
+
interpolation=InterpolationMode.BICUBIC,
|
| 544 |
+
antialias=True,
|
| 545 |
+
).float()
|
| 546 |
+
|
| 547 |
+
print(
|
| 548 |
+
f"token per {FRAME_FACTOR} frame: {int(resized_height * resized_width / image_factor**2)}, "
|
| 549 |
+
f"video total tokens: {int(video.shape[0] * video.shape[-2] * video.shape[-1] / 2 / image_factor**2)}",
|
| 550 |
+
file=sys.stderr,
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
final_video = (video, video_metadata) if return_video_metadata else video
|
| 554 |
+
|
| 555 |
+
if return_video_sample_fps:
|
| 556 |
+
return final_video, sample_fps
|
| 557 |
+
return final_video
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
def extract_vision_info(
|
| 561 |
+
conversations: Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]],
|
| 562 |
+
) -> List[Dict[str, Any]]:
|
| 563 |
+
vision_infos = []
|
| 564 |
+
if isinstance(conversations[0], dict):
|
| 565 |
+
conversations = [conversations]
|
| 566 |
+
for conversation in conversations:
|
| 567 |
+
for message in conversation:
|
| 568 |
+
if isinstance(message["content"], list):
|
| 569 |
+
for ele in message["content"]:
|
| 570 |
+
if (
|
| 571 |
+
"image" in ele
|
| 572 |
+
or "image_url" in ele
|
| 573 |
+
or "video" in ele
|
| 574 |
+
or ele.get("type", "text") in ("image", "image_url", "video")
|
| 575 |
+
):
|
| 576 |
+
vision_infos.append(ele)
|
| 577 |
+
return vision_infos
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def process_vision_info(
|
| 581 |
+
conversations: Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]],
|
| 582 |
+
return_video_kwargs: bool = False,
|
| 583 |
+
return_video_metadata: bool = False,
|
| 584 |
+
image_patch_size: int = 14,
|
| 585 |
+
) -> Tuple[
|
| 586 |
+
Optional[List[Image.Image]],
|
| 587 |
+
Optional[List[Union[torch.Tensor, List[Image.Image]]]],
|
| 588 |
+
Optional[Dict[str, Any]],
|
| 589 |
+
]:
|
| 590 |
+
vision_infos = extract_vision_info(conversations)
|
| 591 |
+
## Read images or videos
|
| 592 |
+
image_inputs = []
|
| 593 |
+
video_inputs = []
|
| 594 |
+
video_sample_fps_list = []
|
| 595 |
+
for vision_info in vision_infos:
|
| 596 |
+
if "image" in vision_info or "image_url" in vision_info:
|
| 597 |
+
image_inputs.append(
|
| 598 |
+
fetch_image(vision_info, image_patch_size=image_patch_size)
|
| 599 |
+
)
|
| 600 |
+
elif "video" in vision_info:
|
| 601 |
+
video_input, video_sample_fps = fetch_video(
|
| 602 |
+
vision_info,
|
| 603 |
+
return_video_sample_fps=True,
|
| 604 |
+
image_patch_size=image_patch_size,
|
| 605 |
+
return_video_metadata=return_video_metadata,
|
| 606 |
+
)
|
| 607 |
+
video_sample_fps_list.append(video_sample_fps)
|
| 608 |
+
video_inputs.append(video_input)
|
| 609 |
+
else:
|
| 610 |
+
raise ValueError("image, image_url or video should in content.")
|
| 611 |
+
if len(image_inputs) == 0:
|
| 612 |
+
image_inputs = None
|
| 613 |
+
if len(video_inputs) == 0:
|
| 614 |
+
video_inputs = None
|
| 615 |
+
|
| 616 |
+
video_kwargs = {"do_sample_frames": False}
|
| 617 |
+
if not return_video_metadata: # BC for qwen2.5vl
|
| 618 |
+
video_kwargs.update({"fps": video_sample_fps_list})
|
| 619 |
+
|
| 620 |
+
if return_video_kwargs:
|
| 621 |
+
return image_inputs, video_inputs, video_kwargs
|
| 622 |
+
return image_inputs, video_inputs
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
def cached_process_vision_info(
|
| 626 |
+
conversations: Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]],
|
| 627 |
+
return_video_kwargs: bool = False,
|
| 628 |
+
return_video_metadata: bool = False,
|
| 629 |
+
image_patch_size: int = 14,
|
| 630 |
+
) -> tuple[
|
| 631 |
+
Optional[List[Image.Image]],
|
| 632 |
+
Optional[List[Union[torch.Tensor, List[Image.Image]]]],
|
| 633 |
+
Optional[Dict[str, Any]],
|
| 634 |
+
]:
|
| 635 |
+
vision_infos = extract_vision_info(conversations)
|
| 636 |
+
## Read images or videos
|
| 637 |
+
image_inputs = []
|
| 638 |
+
video_inputs = []
|
| 639 |
+
video_sample_fps_list = []
|
| 640 |
+
processed = [] # list of [vision_info, "image"/"video", idx_in_corresponding_list]
|
| 641 |
+
|
| 642 |
+
for vi in vision_infos:
|
| 643 |
+
is_image = ("image" in vi) or ("image_url" in vi)
|
| 644 |
+
is_video = "video" in vi
|
| 645 |
+
if not (is_image or is_video):
|
| 646 |
+
raise ValueError("image, image_url or video should be in content.")
|
| 647 |
+
|
| 648 |
+
match_idx = next((i for i, rec in enumerate(processed) if rec[0] == vi), -1)
|
| 649 |
+
|
| 650 |
+
if match_idx >= 0:
|
| 651 |
+
data_type, ref_idx = processed[match_idx][1], processed[match_idx][2]
|
| 652 |
+
if data_type == "image":
|
| 653 |
+
image_inputs.append(image_inputs[ref_idx])
|
| 654 |
+
else: # video
|
| 655 |
+
video_inputs.append(video_inputs[ref_idx])
|
| 656 |
+
video_sample_fps_list.append(video_sample_fps_list[ref_idx])
|
| 657 |
+
else:
|
| 658 |
+
if is_image:
|
| 659 |
+
img = fetch_image(vi, image_patch_size=image_patch_size)
|
| 660 |
+
image_inputs.append(img)
|
| 661 |
+
processed.append([vi, "image", len(image_inputs) - 1])
|
| 662 |
+
else: # is_video
|
| 663 |
+
vid, fps = fetch_video(
|
| 664 |
+
vi,
|
| 665 |
+
return_video_sample_fps=True,
|
| 666 |
+
image_patch_size=image_patch_size,
|
| 667 |
+
return_video_metadata=return_video_metadata,
|
| 668 |
+
)
|
| 669 |
+
video_inputs.append(vid)
|
| 670 |
+
video_sample_fps_list.append(fps)
|
| 671 |
+
processed.append([vi, "video", len(video_inputs) - 1])
|
| 672 |
+
|
| 673 |
+
if len(image_inputs) == 0:
|
| 674 |
+
image_inputs = None
|
| 675 |
+
if len(video_inputs) == 0:
|
| 676 |
+
video_inputs = None
|
| 677 |
+
|
| 678 |
+
video_kwargs = {"do_sample_frames": False}
|
| 679 |
+
if not return_video_metadata: # BC for qwen2.5vl
|
| 680 |
+
video_kwargs.update({"fps": video_sample_fps_list})
|
| 681 |
+
|
| 682 |
+
if return_video_kwargs:
|
| 683 |
+
return image_inputs, video_inputs, video_kwargs
|
| 684 |
+
return image_inputs, video_inputs
|