Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- VLMEvalKit-sudoku/assets/apple.jpg +3 -0
- VLMEvalKit-sudoku/docs/en/.readthedocs.yaml +17 -0
- VLMEvalKit-sudoku/docs/en/Contributors.md +21 -0
- VLMEvalKit-sudoku/docs/en/EvalByLMDeploy.md +27 -0
- VLMEvalKit-sudoku/docs/en/_templates/autosummary/class.rst +13 -0
- VLMEvalKit-sudoku/docs/en/conf.py +234 -0
- VLMEvalKit-sudoku/docs/en/docutils.conf +2 -0
- VLMEvalKit-sudoku/docs/ja/README_ja.md +117 -0
- VLMEvalKit-sudoku/docs/zh-CN/ConfigSystem.md +69 -0
- VLMEvalKit-sudoku/docs/zh-CN/EvalByLMDeploy.md +28 -0
- VLMEvalKit-sudoku/docs/zh-CN/README_zh-CN.md +131 -0
- VLMEvalKit-sudoku/docs/zh-CN/_static/image/logo.svg +24 -0
- VLMEvalKit-sudoku/docs/zh-CN/_templates/autosummary/class.rst +13 -0
- VLMEvalKit-sudoku/docs/zh-CN/conf.py +242 -0
- VLMEvalKit-sudoku/llava/eval/eval_chartqa.py +74 -0
- VLMEvalKit-sudoku/llava/eval/eval_pope.py +84 -0
- VLMEvalKit-sudoku/llava/eval/eval_science_qa_gpt4.py +104 -0
- VLMEvalKit-sudoku/llava/eval/m4c_evaluator.py +345 -0
- VLMEvalKit-sudoku/llava/eval/model_qa.py +64 -0
- VLMEvalKit-sudoku/llava/eval/model_vqa.py +240 -0
- VLMEvalKit-sudoku/llava/eval/model_vqa_mmbench.py +187 -0
- VLMEvalKit-sudoku/llava/mm_utils.py +395 -0
- VLMEvalKit-sudoku/llava/model/apply_delta.py +47 -0
- VLMEvalKit-sudoku/llava/model/consolidate.py +30 -0
- VLMEvalKit-sudoku/llava/model/language_model/llava_llama.py +168 -0
- VLMEvalKit-sudoku/llava/model/llava_arch.py +808 -0
- VLMEvalKit-sudoku/llava/model/make_delta.py +52 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_qwen2_5vl.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/adapt_clip_vision_model.py +236 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py +240 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json +27 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py +205 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py +72 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json +29 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/imagebind.py +73 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/open_clip_encoder.py +163 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/adapt_spatial_resampler.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/llava_mlp.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/mlp.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/percive_sampler.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/adapt_spatial_resampler.py +515 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/llava_mlp.py +113 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/pooler_projector.py +33 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/uhd_v1_resampler.py +218 -0
- VLMEvalKit-sudoku/llava/model/utils.py +20 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
eval_results/GNE_ShapeGrid_sudoku.xlsx filter=lfs diff=lfs merge=lfs -text
|
VLMEvalKit-sudoku/assets/apple.jpg
ADDED
|
Git LFS Details
|
VLMEvalKit-sudoku/docs/en/.readthedocs.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: 2
|
| 2 |
+
|
| 3 |
+
# Set the version of Python and other tools you might need
|
| 4 |
+
build:
|
| 5 |
+
os: ubuntu-22.04
|
| 6 |
+
tools:
|
| 7 |
+
python: "3.8"
|
| 8 |
+
|
| 9 |
+
formats:
|
| 10 |
+
- epub
|
| 11 |
+
|
| 12 |
+
sphinx:
|
| 13 |
+
configuration: docs/en/conf.py
|
| 14 |
+
|
| 15 |
+
python:
|
| 16 |
+
install:
|
| 17 |
+
- requirements: requirements/docs.txt
|
VLMEvalKit-sudoku/docs/en/Contributors.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributors
|
| 2 |
+
|
| 3 |
+
## Contributors w. 3+ Major Contributions
|
| 4 |
+
|
| 5 |
+
> In this section, we list all the contributors who have made significant contributions (3+) to the development of VLMEvalKit.
|
| 6 |
+
|
| 7 |
+
New Qualified Contributors (2024.09):
|
| 8 |
+
|
| 9 |
+
1. [amitbcp](https://github.com/amitbcp): The contributor helped support MUIRBench, Phi-3.5, Idefics3, VILA, and xGen-MM
|
| 10 |
+
2. [czczup](https://github.com/czczup): The contributor helped support the InternVL Series (V1.5, Mini-InternVL, V2, etc.)
|
| 11 |
+
3. [DseidLi](https://github.com/DseidLi): The contributor helped support LLaVA-OneVision, GQA, and developed the readthedocs site for VLMEvalKit
|
| 12 |
+
4. [mayubo2333](https://github.com/mayubo2333): The contributor helped support MMLongBench, SlideVQA, and DUDE
|
| 13 |
+
5. [sun-hailong](https://github.com/sun-hailong): The contributor helped support A-OKVQA, Parrot, MMMB, and MTL-MMBench
|
| 14 |
+
6. [PhoenixZ810](https://github.com/PhoenixZ810): The contributor helped support Video-ChatGPT, Chat-UniVI, and Llama-VID
|
| 15 |
+
7. [Cuiunbo](https://github.com/Cuiunbo): The contributor helped support OmniLMM-12B, MiniCPM-V Series (V1, V2, V2.5)
|
| 16 |
+
|
| 17 |
+
## Full Contributor List
|
| 18 |
+
|
| 19 |
+
> In this section, we list all the contributors as well as their corresponding contributions to the development of VLMEvalKit.
|
| 20 |
+
|
| 21 |
+
TBD.
|
VLMEvalKit-sudoku/docs/en/EvalByLMDeploy.md
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Using LMDeploy to Accelerate Evaluation and Inference
|
| 2 |
+
|
| 3 |
+
VLMEvalKit supports testing VLM models deployed by LMDeploy. Below, we use InternVL2-8B as an example to show how to test the model.
|
| 4 |
+
|
| 5 |
+
## Step 0: Install LMDeploy
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
pip install lmdeploy
|
| 9 |
+
```
|
| 10 |
+
For other installation methods, you can refer to LMDeploy's [documentation](https://github.com/InternLM/lmdeploy).
|
| 11 |
+
|
| 12 |
+
## Step 1: Start the Inference Service
|
| 13 |
+
|
| 14 |
+
```bash
|
| 15 |
+
lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B
|
| 16 |
+
```
|
| 17 |
+
> [!IMPORTANT]
|
| 18 |
+
> Since models in VLMEvalKit may have custom behaviors when building prompts for different datasets, such as InternVL2's handling of HallusionBench, it is necessary to specify `--model-name` when starting the server. This allows the VLMEvalKit to select appropriate prompt construction strategy based on the name when using the LMDeploy API.
|
| 19 |
+
>
|
| 20 |
+
> If `--server-port`, is specified, the corresponding environment variable `LMDEPLOY_API_BASE` needs to be set.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
## Step 2: Evaluation
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
python run.py --data MMStar --model lmdeploy --verbose --api-nproc 64
|
| 27 |
+
```
|
VLMEvalKit-sudoku/docs/en/_templates/autosummary/class.rst
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. role:: hidden
|
| 2 |
+
:class: hidden-section
|
| 3 |
+
.. currentmodule:: {{ module }}
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
{{ name | underline}}
|
| 7 |
+
|
| 8 |
+
.. autoclass:: {{ name }}
|
| 9 |
+
:members:
|
| 10 |
+
|
| 11 |
+
..
|
| 12 |
+
autogenerated from _templates/autosummary/class.rst
|
| 13 |
+
note it does not have :inherited-members:
|
VLMEvalKit-sudoku/docs/en/conf.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# Configuration file for the Sphinx documentation builder.
|
| 3 |
+
#
|
| 4 |
+
# This file only contains a selection of the most common options. For a full
|
| 5 |
+
# list see the documentation:
|
| 6 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
| 7 |
+
|
| 8 |
+
# -- Path setup --------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
# If extensions (or modules to document with autodoc) are in another directory,
|
| 11 |
+
# add these directories to sys.path here. If the directory is relative to the
|
| 12 |
+
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
| 13 |
+
#
|
| 14 |
+
import os
|
| 15 |
+
import ast
|
| 16 |
+
import subprocess
|
| 17 |
+
import sys
|
| 18 |
+
|
| 19 |
+
import pytorch_sphinx_theme
|
| 20 |
+
from sphinx.builders.html import StandaloneHTMLBuilder
|
| 21 |
+
|
| 22 |
+
sys.path.insert(0, os.path.abspath('../../'))
|
| 23 |
+
|
| 24 |
+
# -- Project information -----------------------------------------------------
|
| 25 |
+
|
| 26 |
+
project = 'VLMEvalKit'
|
| 27 |
+
copyright = '2023, VLMEvalKit'
|
| 28 |
+
author = 'VLMEvalKit Authors'
|
| 29 |
+
|
| 30 |
+
# The full version, including alpha/beta/rc tags
|
| 31 |
+
version_file = '../../vlmeval/__init__.py'
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_version():
|
| 35 |
+
with open(version_file, 'r') as f:
|
| 36 |
+
file_content = f.read()
|
| 37 |
+
# Parse the file content into an abstract syntax tree (AST)
|
| 38 |
+
tree = ast.parse(file_content, filename=version_file)
|
| 39 |
+
|
| 40 |
+
# Iterate through the body of the AST, looking for an assignment to __version__
|
| 41 |
+
for node in tree.body:
|
| 42 |
+
if isinstance(node, ast.Assign):
|
| 43 |
+
for target in node.targets:
|
| 44 |
+
if isinstance(target, ast.Name) and target.id == '__version__':
|
| 45 |
+
return node.value.s
|
| 46 |
+
raise ValueError('__version__ not found')
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
release = get_version()
|
| 50 |
+
|
| 51 |
+
# -- General configuration ---------------------------------------------------
|
| 52 |
+
|
| 53 |
+
# Add any Sphinx extension module names here, as strings. They can be
|
| 54 |
+
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
| 55 |
+
# ones.
|
| 56 |
+
extensions = [
|
| 57 |
+
'sphinx.ext.autodoc',
|
| 58 |
+
'sphinx.ext.autosummary',
|
| 59 |
+
'sphinx.ext.intersphinx',
|
| 60 |
+
'sphinx.ext.napoleon',
|
| 61 |
+
'sphinx.ext.viewcode',
|
| 62 |
+
'myst_parser',
|
| 63 |
+
'sphinx_copybutton',
|
| 64 |
+
'sphinx_tabs.tabs',
|
| 65 |
+
'notfound.extension',
|
| 66 |
+
'sphinxcontrib.jquery',
|
| 67 |
+
'sphinx_design',
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
# Add any paths that contain templates here, relative to this directory.
|
| 71 |
+
templates_path = ['_templates']
|
| 72 |
+
|
| 73 |
+
# The suffix(es) of source filenames.
|
| 74 |
+
# You can specify multiple suffix as a list of string:
|
| 75 |
+
#
|
| 76 |
+
source_suffix = {
|
| 77 |
+
'.rst': 'restructuredtext',
|
| 78 |
+
'.md': 'markdown',
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
language = 'en'
|
| 82 |
+
|
| 83 |
+
# The master toctree document.
|
| 84 |
+
root_doc = 'index'
|
| 85 |
+
html_context = {
|
| 86 |
+
'github_version': 'latest',
|
| 87 |
+
}
|
| 88 |
+
# List of patterns, relative to source directory, that match files and
|
| 89 |
+
# directories to ignore when looking for source files.
|
| 90 |
+
# This pattern also affects html_static_path and html_extra_path.
|
| 91 |
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
| 92 |
+
|
| 93 |
+
# -- Options for HTML output -------------------------------------------------
|
| 94 |
+
|
| 95 |
+
# The theme to use for HTML and HTML Help pages. See the documentation for
|
| 96 |
+
# a list of builtin themes.
|
| 97 |
+
#
|
| 98 |
+
html_theme = 'pytorch_sphinx_theme'
|
| 99 |
+
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
|
| 100 |
+
|
| 101 |
+
# Theme options are theme-specific and customize the look and feel of a theme
|
| 102 |
+
# further. For a list of options available for each theme, see the
|
| 103 |
+
# documentation.
|
| 104 |
+
# yapf: disable
|
| 105 |
+
html_theme_options = {
|
| 106 |
+
'menu': [
|
| 107 |
+
{
|
| 108 |
+
'name': 'GitHub',
|
| 109 |
+
'url': 'https://github.com/open-compass/VLMEvalKit'
|
| 110 |
+
},
|
| 111 |
+
],
|
| 112 |
+
# Specify the language of shared menu
|
| 113 |
+
'menu_lang': 'en',
|
| 114 |
+
# Disable the default edit on GitHub
|
| 115 |
+
'default_edit_on_github': False,
|
| 116 |
+
}
|
| 117 |
+
# yapf: enable
|
| 118 |
+
|
| 119 |
+
# Add any paths that contain custom static files (such as style sheets) here,
|
| 120 |
+
# relative to this directory. They are copied after the builtin static files,
|
| 121 |
+
# so a file named "default.css" will overwrite the builtin "default.css".
|
| 122 |
+
html_static_path = ['_static']
|
| 123 |
+
html_css_files = [
|
| 124 |
+
'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css',
|
| 125 |
+
'css/readthedocs.css'
|
| 126 |
+
]
|
| 127 |
+
html_js_files = [
|
| 128 |
+
'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js',
|
| 129 |
+
'js/custom.js'
|
| 130 |
+
]
|
| 131 |
+
|
| 132 |
+
# -- Options for HTMLHelp output ---------------------------------------------
|
| 133 |
+
|
| 134 |
+
# Output file base name for HTML help builder.
|
| 135 |
+
htmlhelp_basename = 'vlmevalkitdoc'
|
| 136 |
+
|
| 137 |
+
# -- Options for LaTeX output ------------------------------------------------
|
| 138 |
+
|
| 139 |
+
latex_elements = {
|
| 140 |
+
# The paper size ('letterpaper' or 'a4paper').
|
| 141 |
+
#
|
| 142 |
+
# 'papersize': 'letterpaper',
|
| 143 |
+
|
| 144 |
+
# The font size ('10pt', '11pt' or '12pt').
|
| 145 |
+
#
|
| 146 |
+
# 'pointsize': '10pt',
|
| 147 |
+
|
| 148 |
+
# Additional stuff for the LaTeX preamble.
|
| 149 |
+
#
|
| 150 |
+
# 'preamble': '',
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# Grouping the document tree into LaTeX files. List of tuples
|
| 154 |
+
# (source start file, target name, title,
|
| 155 |
+
# author, documentclass [howto, manual, or own class]).
|
| 156 |
+
latex_documents = [
|
| 157 |
+
(root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author,
|
| 158 |
+
'manual'),
|
| 159 |
+
]
|
| 160 |
+
|
| 161 |
+
# -- Options for manual page output ------------------------------------------
|
| 162 |
+
|
| 163 |
+
# One entry per manual page. List of tuples
|
| 164 |
+
# (source start file, name, description, authors, manual section).
|
| 165 |
+
man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author],
|
| 166 |
+
1)]
|
| 167 |
+
|
| 168 |
+
# -- Options for Texinfo output ----------------------------------------------
|
| 169 |
+
|
| 170 |
+
# Grouping the document tree into Texinfo files. List of tuples
|
| 171 |
+
# (source start file, target name, title, author,
|
| 172 |
+
# dir menu entry, description, category)
|
| 173 |
+
texinfo_documents = [
|
| 174 |
+
(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author,
|
| 175 |
+
'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.',
|
| 176 |
+
'Miscellaneous'),
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
# -- Options for Epub output -------------------------------------------------
|
| 180 |
+
|
| 181 |
+
# Bibliographic Dublin Core info.
|
| 182 |
+
epub_title = project
|
| 183 |
+
|
| 184 |
+
# The unique identifier of the text. This can be a ISBN number
|
| 185 |
+
# or the project homepage.
|
| 186 |
+
#
|
| 187 |
+
# epub_identifier = ''
|
| 188 |
+
|
| 189 |
+
# A unique identification for the text.
|
| 190 |
+
#
|
| 191 |
+
# epub_uid = ''
|
| 192 |
+
|
| 193 |
+
# A list of files that should not be packed into the epub file.
|
| 194 |
+
epub_exclude_files = ['search.html']
|
| 195 |
+
|
| 196 |
+
# set priority when building html
|
| 197 |
+
StandaloneHTMLBuilder.supported_image_types = [
|
| 198 |
+
'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg'
|
| 199 |
+
]
|
| 200 |
+
|
| 201 |
+
# -- Extension configuration -------------------------------------------------
|
| 202 |
+
# Ignore >>> when copying code
|
| 203 |
+
copybutton_prompt_text = r'>>> |\.\.\. '
|
| 204 |
+
copybutton_prompt_is_regexp = True
|
| 205 |
+
|
| 206 |
+
# Auto-generated header anchors
|
| 207 |
+
myst_heading_anchors = 3
|
| 208 |
+
# Enable "colon_fence" extension of myst.
|
| 209 |
+
myst_enable_extensions = ['colon_fence', 'dollarmath']
|
| 210 |
+
|
| 211 |
+
# Configuration for intersphinx
|
| 212 |
+
intersphinx_mapping = {
|
| 213 |
+
'python': ('https://docs.python.org/3', None),
|
| 214 |
+
'numpy': ('https://numpy.org/doc/stable', None),
|
| 215 |
+
'torch': ('https://pytorch.org/docs/stable/', None),
|
| 216 |
+
'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None),
|
| 217 |
+
'transformers':
|
| 218 |
+
('https://huggingface.co/docs/transformers/main/en/', None),
|
| 219 |
+
}
|
| 220 |
+
napoleon_custom_sections = [
|
| 221 |
+
# Custom sections for data elements.
|
| 222 |
+
('Meta fields', 'params_style'),
|
| 223 |
+
('Data fields', 'params_style'),
|
| 224 |
+
]
|
| 225 |
+
|
| 226 |
+
# Disable docstring inheritance
|
| 227 |
+
autodoc_inherit_docstrings = False
|
| 228 |
+
# Mock some imports during generate API docs.
|
| 229 |
+
autodoc_mock_imports = ['rich', 'attr', 'einops']
|
| 230 |
+
# Disable displaying type annotations, these can be very verbose
|
| 231 |
+
autodoc_typehints = 'none'
|
| 232 |
+
|
| 233 |
+
# The not found page
|
| 234 |
+
notfound_template = '404.html'
|
VLMEvalKit-sudoku/docs/en/docutils.conf
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[html writers]
|
| 2 |
+
table_style: colwidths-auto
|
VLMEvalKit-sudoku/docs/ja/README_ja.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
|
| 3 |
+

|
| 4 |
+
|
| 5 |
+
<b>VLMEvalKit: 大規模視覚言語モデルの評価ツールキット</b>
|
| 6 |
+
|
| 7 |
+
[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link]
|
| 8 |
+
|
| 9 |
+
[English](/README.md) | [简体中文](/docs/zh-CN/README_zh-CN.md) | 日本語
|
| 10 |
+
|
| 11 |
+
<a href="https://rank.opencompass.org.cn/leaderboard-multimodal">🏆 OpenCompass Learderboard </a> •
|
| 12 |
+
<a href="#-datasets-models-and-evaluation-results">📊Datasets & Models </a> •
|
| 13 |
+
<a href="#%EF%B8%8F-quickstart">🏗️Quickstart </a> •
|
| 14 |
+
<a href="#%EF%B8%8F-development-guide">🛠️Development </a> •
|
| 15 |
+
<a href="#-the-goal-of-vlmevalkit">🎯Goal </a> •
|
| 16 |
+
<a href="#%EF%B8%8F-citation">🖊️Citation </a>
|
| 17 |
+
|
| 18 |
+
<a href="https://huggingface.co/spaces/opencompass/open_vlm_leaderboard">🤗 HF Leaderboard</a> •
|
| 19 |
+
<a href="https://huggingface.co/datasets/VLMEval/OpenVLMRecords">🤗 Evaluation Records</a> •
|
| 20 |
+
<a href="https://discord.gg/evDT4GZmxN">🔊 Discord Channel</a> •
|
| 21 |
+
<a href="https://www.arxiv.org/abs/2407.11691">📝 Technical Report</a>
|
| 22 |
+
</div>
|
| 23 |
+
|
| 24 |
+
**VLMEvalKit**(pythonパッケージ名は**vlmeval**)は、**大規模視覚言語モデル(LVLMs)**の**オープンソース評価ツールキット**です。このツールキットは、複数のリポジトリでのデータ準備という重労働なしに、さまざまなベンチマークでLVLMsの**ワンコマンド評価**を可能にします。VLMEvalKitでは、すべてのLVLMsに対して**生成ベースの評価**を採用し、**正確なマッチング**と**LLMベースの回答抽出**の両方で得られた評価結果を提供します。
|
| 25 |
+
|
| 26 |
+
PS: 日本語の README には最新のアップデートがすべて含まれていない場合があります。英語版をご確認ください。
|
| 27 |
+
|
| 28 |
+
## 📊 データセット、モデル、および評価結果
|
| 29 |
+
|
| 30 |
+
**公式のマルチモーダルリーダーボードでのパフォーマンス数値は、ここからダウンロードできます!**
|
| 31 |
+
|
| 32 |
+
[**OpenVLM Leaderboard**](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard): [すべての詳細な結果をダウンロード](http://opencompass.openxlab.space/assets/OpenVLM.json)。
|
| 33 |
+
|
| 34 |
+
**Supported Benchmarks** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされているベンチマーク(70以上)を表示してください。
|
| 35 |
+
|
| 36 |
+
**Supported LMMs** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされている LMMs(200以上)を表示してください。
|
| 37 |
+
|
| 38 |
+
**Transformersバージョンの推奨事項:**
|
| 39 |
+
|
| 40 |
+
特定のtransformerバージョンで一部のVLMが実行できない可能性があることに注意してください。各VLMを評価するために、以下の設定を推奨します:
|
| 41 |
+
|
| 42 |
+
- **`transformers==4.33.0`を使用してください**: `Qwenシリーズ`, `Monkeyシリーズ`, `InternLM-XComposerシリーズ`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICSシリーズ`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4シリーズ`, `InstructBLIPシリーズ`, `PandaGPT`, `VXVERSE`, `GLM-4v-9B`.
|
| 43 |
+
- **`transformers==4.37.0`を使用してください**: `LLaVAシリーズ`, `ShareGPT4Vシリーズ`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLMシリーズ`, `EMU2シリーズ`, `Yi-VLシリーズ`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VLシリーズ`, `InternVLシリーズ`, `Cambrianシリーズ`, `VILA-VLシリーズ`.
|
| 44 |
+
- **`transformers==4.40.0`を使用してください**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`.
|
| 45 |
+
- **`transformers==4.42.0`を使用してください**: `AKI`.
|
| 46 |
+
- **`transformers==latest`を使用してください**: `LLaVA-Nextシリーズ`, `PaliGemma-3B`, `Chameleon-VLシリーズ`, `Video-LLaVA-7B-HF`, `Ovis1.5シリーズ`, `Mantisシリーズ`, `MiniCPM-V2.6`.
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
# デモ
|
| 50 |
+
from vlmeval.config import supported_VLM
|
| 51 |
+
model = supported_VLM['idefics_9b_instruct']()
|
| 52 |
+
# 単一画像のフォワード
|
| 53 |
+
ret = model.generate(['assets/apple.jpg', 'この画像には何がありますか?'])
|
| 54 |
+
print(ret) # この画像には葉がついた赤いリンゴがあります。
|
| 55 |
+
# 複数画像のフォワード
|
| 56 |
+
ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', '提供された画像にはリンゴが何個ありますか?'])
|
| 57 |
+
print(ret) # 提供された画像にはリンゴが2個あります。
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
## 🏗️ クイックスタート
|
| 61 |
+
|
| 62 |
+
クイックスタートガイドについては、[クイックスタート](/docs/en/Quickstart.md)を参照してください。
|
| 63 |
+
|
| 64 |
+
## 🛠️ 開発ガイド
|
| 65 |
+
|
| 66 |
+
カスタムベンチマーク、VLMsを開発するか���単に**VLMEvalKit**に他のコードを貢献する場合は、[開発ガイド](/docs/en/Development.md)を参照してください。
|
| 67 |
+
|
| 68 |
+
コミュニティからの共有を奨励し、それに応じたクレジットを共有するために、次回のレポート更新では以下のことを実施します:
|
| 69 |
+
|
| 70 |
+
- 全ての貢献に対して感謝の意を示します
|
| 71 |
+
- 新しいモデル、評価セット、または主要な機能への3つ以上の主要な貢献を持つ貢献者は、テクニカルレポートの著者リストに加わることができます。適格な貢献者は、issueを作成するか、または[VLM評価キット ディスコードチャンネル](https://discord.com/invite/evDT4GZmxN)で kennyutc にDMを送ることができます。私たちはそれに応じてフォローアップします。
|
| 72 |
+
|
| 73 |
+
## 🎯 VLMEvalKitの目標
|
| 74 |
+
|
| 75 |
+
**このコードベースは以下を目的として設計されています:**
|
| 76 |
+
|
| 77 |
+
1. 研究者や開発者が既存のLVLMsを評価し、評価結果を**簡単に再現できるようにする**ための**使いやすい**、**オープンソースの評価ツールキット**を提供します。
|
| 78 |
+
2. VLMの開発者が自分のモデルを簡単に評価できるようにします。複数のサポートされているベンチマークでVLMを評価するには、単一の`generate_inner()`関数を**実装するだけで**、他のすべてのワークロード(データのダウンロード、データの前処理、予測の推論、メトリックの計算)はコードベースによって処理されます。
|
| 79 |
+
|
| 80 |
+
**このコードベースは以下を目的として設計されていません:**
|
| 81 |
+
|
| 82 |
+
1. すべての**第三者ベンチマーク**の元の論文で報告された正確な精度数値を再現すること。その理由は2つあります:
|
| 83 |
+
1. VLMEvalKitは、すべてのVLMに対して**生成ベースの評価**を使用します(オプションで**LLMベースの回答抽出**を使用)。一方、一部のベンチマークは異なるアプローチを使用する場合があります(SEEDBenchはPPLベースの評価を使用します)。これらのベンチマークについては、対応する結果で両方のスコアを比較します。開発者には、コードベースで他の評価パラダイムをサポートすることをお勧めします。
|
| 84 |
+
2. デフォルトでは、すべてのVLMに対して同じプロンプトテンプレートを使用してベンチマークを評価します。一方、**一部のVLMには特定のプロンプトテンプレートがある**場合があります(現時点ではコードベースでカバーされていない場合があります)。VLMの開発者には、現在カバーされていない場合でも、VLMEvalKitで独自のプロンプトテンプレートを実装することをお勧めします。これにより、再現性が向上します。
|
| 85 |
+
|
| 86 |
+
## 🖊️ 引用
|
| 87 |
+
|
| 88 |
+
この作業が役立つ場合は、このリポジトリに**スター🌟**を付けてください。サポートありがとうございます!
|
| 89 |
+
|
| 90 |
+
[](https://github.com/open-compass/VLMEvalKit/stargazers)
|
| 91 |
+
|
| 92 |
+
研究でVLMEvalKitを使用する場合、または公開されたオープンソースの評価結果を参照する場合は、以下のBibTeXエントリと、使用した特定のVLM/ベンチマークに対応するBibTexエントリを使用してください。
|
| 93 |
+
|
| 94 |
+
```bib
|
| 95 |
+
@misc{duan2024vlmevalkit,
|
| 96 |
+
title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models},
|
| 97 |
+
author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen},
|
| 98 |
+
year={2024},
|
| 99 |
+
eprint={2407.11691},
|
| 100 |
+
archivePrefix={arXiv},
|
| 101 |
+
primaryClass={cs.CV},
|
| 102 |
+
url={https://arxiv.org/abs/2407.11691},
|
| 103 |
+
}
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
<p align="right"><a href="#top">🔝Top に戻る</a></p>
|
| 107 |
+
|
| 108 |
+
[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors
|
| 109 |
+
[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square
|
| 110 |
+
[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members
|
| 111 |
+
[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square
|
| 112 |
+
[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues
|
| 113 |
+
[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square
|
| 114 |
+
[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE
|
| 115 |
+
[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square
|
| 116 |
+
[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers
|
| 117 |
+
[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square
|
VLMEvalKit-sudoku/docs/zh-CN/ConfigSystem.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# 配置系统
|
| 3 |
+
|
| 4 |
+
默认情况下,VLMEvalKit通过在`run.py`脚本中使用`--model`和`--data`参数设置模型名称(在`/vlmeval/config.py`中定义)和数据集名称(在`vlmeval/dataset/__init__.py` 或 `vlmeval/dataset/video_dataset_config.py` 中定义)来启动评估。这种方法在大多数情况下简单且高效,但当用户希望使用不同设置评估多个模型/数据集时,可能不够灵活。
|
| 5 |
+
|
| 6 |
+
为了解决这个问题,VLMEvalKit提供了一个更灵活的配置系统。用户可以在json文件中指定模型和数据集设置,并通过`--config`参数将配置文件的路径传递给`run.py`脚本。以下是一个示例配置json:
|
| 7 |
+
|
| 8 |
+
```json
|
| 9 |
+
{
|
| 10 |
+
"model": {
|
| 11 |
+
"GPT4o_20240806_T00_HIGH": {
|
| 12 |
+
"class": "GPT4V",
|
| 13 |
+
"model": "gpt-4o-2024-08-06",
|
| 14 |
+
"temperature": 0,
|
| 15 |
+
"img_detail": "high"
|
| 16 |
+
},
|
| 17 |
+
"GPT4o_20240806_T10_Low": {
|
| 18 |
+
"class": "GPT4V",
|
| 19 |
+
"model": "gpt-4o-2024-08-06",
|
| 20 |
+
"temperature": 1.0,
|
| 21 |
+
"img_detail": "low"
|
| 22 |
+
},
|
| 23 |
+
"GPT4o_20241120": {}
|
| 24 |
+
},
|
| 25 |
+
"data": {
|
| 26 |
+
"MME-RealWorld-Lite": {
|
| 27 |
+
"class": "MMERealWorld",
|
| 28 |
+
"dataset": "MME-RealWorld-Lite"
|
| 29 |
+
},
|
| 30 |
+
"MMBench_DEV_EN_V11": {
|
| 31 |
+
"class": "ImageMCQDataset",
|
| 32 |
+
"dataset": "MMBench_DEV_EN_V11"
|
| 33 |
+
},
|
| 34 |
+
"MMBench_Video_8frame_nopack":{},
|
| 35 |
+
"Video-MME_16frame_subs": {
|
| 36 |
+
"class": "VideoMME",
|
| 37 |
+
"dataset": "Video-MME",
|
| 38 |
+
"nframe": 16,
|
| 39 |
+
"use_subtitle": true
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
}
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
配置json的解释:
|
| 46 |
+
|
| 47 |
+
1. 现在我们支持两个字段:`model`和`data`,每个字段都是一个字典。字典的键是模型/数据集的名称(由用户设置),值是模型/数据集的设置。
|
| 48 |
+
2. 对于`model`中的项目,值是一个包含以下键的字典:
|
| 49 |
+
- `class`:模型的类名,应该是`vlmeval/vlm/__init__.py`(开源模型)或`vlmeval/api/__init__.py`(API模型)中定义的类名。
|
| 50 |
+
- 其他kwargs:其他kwargs是模型特定的参数,请参考模型类的定义以获取详细用法。例如,`model`、`temperature`、`img_detail`是`GPT4V`类的参数。值得注意的是,大多数模型类都需要`model`参数。
|
| 51 |
+
- Tip:在位于`vlmeval/config.py`的变量`supported_VLM`中的已经被定义的模型可以作为`model`的键,而不需要填对应的值即可启动。例如,`GPT4o_20240806_T00_HIGH: {}`是等价于`GPT4o_20240806_T00_HIGH: {'class': 'GPT4V', 'model': 'gpt-4o-2024-08-06', 'temperature': 0, 'img_size': -1, 'img_detail': 'high', 'retry': 10, 'verbose': False}`。
|
| 52 |
+
3. 对于字典`data`,我们建议用户使用官方数据集名称作为键(或键的一部分),因为我们经常根据数据集名称确定后处理/判断设置。对于`data`中的项目,值是一个包含以下键的字典:
|
| 53 |
+
- `class`:数据集的类名,应该是`vlmeval/dataset/__init__.py`中定义的类名。
|
| 54 |
+
- 其他kwargs:其他kwargs是数据集特定的参数,请参考数据集类的定义以获取详细用法。通常,大多数数据集类都需要`dataset`参数。大多数视频数据集类都需要 `nframe` 或 `fps` 参数。
|
| 55 |
+
- Tip:在位于`vlmeval/dataset/video_dataset_config.py`的变量`supported_video_dataset`中的已经被定义的数据集可以作为`data`的键,而不需要填对应的值即可启动。例如,`MMBench_Video_8frame_nopack: {}`是等价于`MMBench_Video_8frame_nopack: {'class': 'MMBenchVideo', 'dataset': 'MMBench-Video', 'nframe': 8, 'pack': False}`。
|
| 56 |
+
|
| 57 |
+
将示例配置json保存为`config.json`,您可以通过以下命令启动评估:
|
| 58 |
+
|
| 59 |
+
```bash
|
| 60 |
+
python run.py --config config.json
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
这将在工作目录`$WORK_DIR`下生成以下输出文件(格式为`{$WORK_DIR}/{$MODEL_NAME}/{$MODEL_NAME}_{$DATASET_NAME}_*`):
|
| 64 |
+
|
| 65 |
+
- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MME-RealWorld-Lite*`
|
| 66 |
+
- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MME-RealWorld-Lite*`
|
| 67 |
+
- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MMBench_DEV_EN_V11*`
|
| 68 |
+
- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MMBench_DEV_EN_V11*`
|
| 69 |
+
......
|
VLMEvalKit-sudoku/docs/zh-CN/EvalByLMDeploy.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 使用 LMDeploy 加速评测推理
|
| 2 |
+
|
| 3 |
+
VLMEvalKit 支持测试由 LMDeploy 部署的 VLM 模型,下面以 InternVL2-8B 为例,展示如何测试模型
|
| 4 |
+
|
| 5 |
+
## 第0步 安装 LMDeploy
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
pip install lmdeploy
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
其他安装方式可以参考 LMDeploy 的[文档](https://github.com/InternLM/lmdeploy)
|
| 12 |
+
|
| 13 |
+
## 第1步 启动推理服务
|
| 14 |
+
|
| 15 |
+
```bash
|
| 16 |
+
lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B
|
| 17 |
+
```
|
| 18 |
+
> [!IMPORTANT]
|
| 19 |
+
> 因为 VLMEvalKit 中的模型对于不同数据集在构建 prompt 时可能有自定义行为,如 InternVL2 对于 HallusionBench 的处理,所以,server 端在启动的时候需要指定 `--model-name`,这样在使用 LMDEploy api 时可以根据名字选择合适的 prompt 构建策略。
|
| 20 |
+
>
|
| 21 |
+
> 如果指定了 `--server-port`,需要设置对应的环境变量 `LMDEPLOY_API_BASE`
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
## 第2步 评测
|
| 25 |
+
|
| 26 |
+
```bash
|
| 27 |
+
python run.py --data MMStar --model InternVL2-8B --verbose --api-nproc 64
|
| 28 |
+
```
|
VLMEvalKit-sudoku/docs/zh-CN/README_zh-CN.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
|
| 3 |
+

|
| 4 |
+
|
| 5 |
+
<b>VLMEvalKit: 一种多模态大模型评测工具 </b>
|
| 6 |
+
|
| 7 |
+
[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link]
|
| 8 |
+
|
| 9 |
+
[English](/README.md) | 简体中文 | [日本語](/docs/ja/README_ja.md)
|
| 10 |
+
|
| 11 |
+
<a href="https://rank.opencompass.org.cn/leaderboard-multimodal">🏆 OpenCompass 排行榜 </a> •
|
| 12 |
+
<a href="#%EF%B8%8F-quickstart">🏗️ 快速开始 </a> •
|
| 13 |
+
<a href="#-datasets-models-and-evaluation-results">📊 数据集和模型 </a> •
|
| 14 |
+
<a href="#%EF%B8%8F-development-guide">🛠️ 开发指南 </a> •
|
| 15 |
+
<a href="#-the-goal-of-vlmevalkit">🎯 我们的目标 </a> •
|
| 16 |
+
<a href="#%EF%B8%8F-citation">🖊️ 引用 </a>
|
| 17 |
+
|
| 18 |
+
<a href="https://huggingface.co/spaces/opencompass/open_vlm_leaderboard">🤗 HuggingFace 排行榜 (存档全部性能) </a> •
|
| 19 |
+
<a href="https://huggingface.co/datasets/VLMEval/OpenVLMRecords">🤗 原始评测记录</a> •
|
| 20 |
+
<a href="https://discord.gg/evDT4GZmxN">🔊 Discord</a> •
|
| 21 |
+
<a href="https://www.arxiv.org/abs/2407.11691">📝 技术报告 </a>
|
| 22 |
+
</div>
|
| 23 |
+
|
| 24 |
+
**VLMEvalKit** (python 包名为 **vlmeval**) 是一款专为大型视觉语言模型 (Large Vision-Language Models, LVLMs) 评测而设计的开源工具包。该工具支持在各种基准测试上对大型视觉语言模型进行**一键评估**,无需进行繁重的数据准备工作,让评估过程更加简便。在 VLMEvalKit 中,我们对所有大型视觉语言模型生成的结果进行评测,并提供基于**精确匹配**与基于 **LLM 的答案提取**两种评测结果。
|
| 25 |
+
|
| 26 |
+
## 🆕 更新
|
| 27 |
+
|
| 28 |
+
- **[2025-04-29]** 优化 `torchrun` 启动逻辑:目前 `torchrun` 启动时,若进程数为 M,机器 GPU 卡数为 N,将会自动调整每个进程分配的 GPU 数量为 `N // M`。目前此分配方式适用于 `transformers`, `lmdeploy` 推理后端,`vllm` 推理后端仅支持使用 python 启动 🔥🔥🔥
|
| 29 |
+
- **[2025-02-20]** 支持新模型:**InternVL2.5 series, QwenVL2.5 series, QVQ-72B, Doubao-VL, Janus-Pro-7B, MiniCPM-o-2.6, InternVL2-MPO, LLaVA-CoT, Hunyuan-Standard-Vision, Ovis2, Valley, SAIL-VL, Ross, Long-VITA, EMU3, SmolVLM**。支持新基准:**MMMU-Pro, WeMath, 3DSRBench, LogicVista, VL-RewardBench, CC-OCR, CG-Bench, CMMMU, WorldSense**。请参考[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)以获取更多信息。感谢社区的各位贡献者 🔥🔥🔥
|
| 30 |
+
- **[2024-11-21]** 集成了一个新的配置系统,以实现更灵活的评估设置。查看[文档](/docs/zh-CN/ConfigSystem.md)或运行`python run.py --help`了解更多详情 🔥🔥🔥
|
| 31 |
+
- **[2024-11-21]** 支持 **[QSpatial](https://andrewliao11.github.io/spatial_prompt/)**,一个用于定量空间推理的多模态基准(例如,确定大小/距离),感谢 **[andrewliao11](https://github.com/andrewliao11)** 提供官方支持 🔥🔥🔥
|
| 32 |
+
- **[2024-11-21]** 支持 **[MM-Math](https://github.com/kge-sun/mm-math)**,一个包含约6K初中多模态推理数学问题的新多模态数学基准。GPT-4o-20240806在该基准上达到了22.5%的准确率 🔥🔥🔥
|
| 33 |
+
- **[2024-11-16]** 支持 **[OlympiadBench](https://github.com/OpenBMB/OlympiadBench)**,一个多模态基准,包含奥林匹克级别的数学和物理问题 🔥🔥🔥
|
| 34 |
+
- **[2024-11-16]** 支持 **[WildVision](https://huggingface.co/datasets/WildVision/wildvision-bench)**,一个基于多模态竞技场数据的主观多模态基准 🔥🔥🔥
|
| 35 |
+
- **[2024-11-13]** 支持 **[MIA-Bench](https://arxiv.org/abs/2407.01509)**,一个多模态指令跟随基准 🔥🔥🔥
|
| 36 |
+
- **[2024-11-08]** 支持 **[Aria](https://arxiv.org/abs/2410.05993)**,一个多模态原生 MoE 模型,感谢 **[teowu](https://github.com/teowu)** 🔥🔥🔥
|
| 37 |
+
- **[2024-11-04]** 支持 **[WorldMedQA-V](https://www.arxiv.org/abs/2410.12722)**,该基准包含 1000 多个医学 VQA 问题,涵盖巴西、以色列、日本、西班牙等四个国家的语言,以及它们的英文翻译 🔥🔥🔥
|
| 38 |
+
|
| 39 |
+
## 🏗️ 快速开始 <a id="quickstart"></a>
|
| 40 |
+
|
| 41 |
+
请参阅[**快速开始**](/docs/zh-CN/Quickstart.md)获取入门指南。
|
| 42 |
+
|
| 43 |
+
## 📊 评测结果,支持的数据集和模型 <a id="data-model-results"></a>
|
| 44 |
+
|
| 45 |
+
### 评测结果
|
| 46 |
+
|
| 47 |
+
**[OpenVLM Leaderboard](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard)**: **[下载全部细粒度测试结果](http://opencompass.openxlab.space/assets/OpenVLM.json)**.
|
| 48 |
+
|
| 49 |
+
请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported Benchmarks** 标签,以查看所有支持的图像和视频基准(70+)。
|
| 50 |
+
|
| 51 |
+
请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported LMMs** 标签,以查看所���支持的 LMMs,包括商业 API、开源模型等(200+)。
|
| 52 |
+
|
| 53 |
+
### 其他
|
| 54 |
+
|
| 55 |
+
**Transformers 的版本推荐:**
|
| 56 |
+
|
| 57 |
+
**请注意**,某些 VLM 可能无法在某些特定的 transformers 版本下运行,我们建议使用以下设置来评估对应的VLM:
|
| 58 |
+
|
| 59 |
+
- **请用** `transformers==4.33.0` **来运行**: `Qwen series`, `Monkey series`, `InternLM-XComposer Series`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICS series`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4 series`, `InstructBLIP series`, `PandaGPT`, `VXVERSE`.
|
| 60 |
+
- **请用** `transformers==4.37.0 ` **来运行**: `LLaVA series`, `ShareGPT4V series`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLM Series`, `EMU2 Series`, `Yi-VL Series`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VL series`, `InternVL series`, `Cambrian Series`, `VILA Series`, `Llama-3-MixSenseV1_1`, `Parrot-7B`, `PLLaVA Series`.
|
| 61 |
+
- **请用** `transformers==4.40.0 ` **来运行**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`.
|
| 62 |
+
- **请用** `transformers==4.42.0 ` **来运行**: `AKI`.
|
| 63 |
+
- **请用** `transformers==latest` **来运行**: `LLaVA-Next series`, `PaliGemma-3B`, `Chameleon series`, `Video-LLaVA-7B-HF`, `Ovis series`, `Mantis series`, `MiniCPM-V2.6`, `OmChat-v2.0-13B-sinlge-beta`, `Idefics-3`, `GLM-4v-9B`, `VideoChat2-HD`.
|
| 64 |
+
|
| 65 |
+
**如何测试一个 VLM 是否可以正常运行:**
|
| 66 |
+
|
| 67 |
+
```python
|
| 68 |
+
from vlmeval.config import supported_VLM
|
| 69 |
+
model = supported_VLM['idefics_9b_instruct']()
|
| 70 |
+
# 前向单张图片
|
| 71 |
+
ret = model.generate(['assets/apple.jpg', 'What is in this image?'])
|
| 72 |
+
print(ret) # 这张图片上有一个带叶子的红苹果
|
| 73 |
+
# 前向多张图片
|
| 74 |
+
ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', 'How many apples are there in the provided images? '])
|
| 75 |
+
print(ret) # 提供的图片中有两个苹果
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
## 🛠️ 开发指南 <a id="development"></a>
|
| 79 |
+
|
| 80 |
+
要开发自定义评测数据集,支持其他 VLMs,或为 VLMEvalKit 贡献代码,请参阅[**开发指南**](/docs/zh-CN/Development_zh-CN.md)。
|
| 81 |
+
|
| 82 |
+
为激励来自社区的共享并分享相应的 credit,在下一次 report 更新中,我们将:
|
| 83 |
+
|
| 84 |
+
- 致谢所有的 contribution
|
| 85 |
+
- 具备三个或以上主要贡献 (支持新模型、评测集、或是主要特性) 的贡献者将可以加入技术报告的作者列表 。合条件的贡献者可以创建 issue 或是在 [VLMEvalKit Discord Channel](https://discord.com/invite/evDT4GZmxN) 私信 kennyutc,我们将进行跟进
|
| 86 |
+
|
| 87 |
+
## 🎯 VLMEvalKit 的目标 <a id="goal-of-vlmevalkit"></a>
|
| 88 |
+
|
| 89 |
+
**该代码库的设计目标是:**
|
| 90 |
+
|
| 91 |
+
1. 提供一个**易于使用**的**开源评估工具包**,方便研究人员和开发人员评测现有的多模态大模型,并使评测结果**易于复现**。
|
| 92 |
+
2. 使 VLM 开发人员能够轻松地评测自己的模型。在多个支持的基准测试上评估 VLM,只需实现一个 `generate_inner()` 函数,所有其他工作负载(数据下载、数据预处理、预测推理、度量计算)都由代码库处理。
|
| 93 |
+
|
| 94 |
+
**该代码库的设计目标不是:**
|
| 95 |
+
|
| 96 |
+
复现所有**第三方基准测试**原始论文中报告的准确数字。有两个相关的原因:
|
| 97 |
+
1. VLMEvalKit 对所有 VLMs 使用基于生成的评估(可选使用基于 LLM 的答案提取)。同时,一些基准测试可能官方使用不同的方法(*例如,SEEDBench 使用基于 PPL 的评估*)。对于这些基准测试,我们在相应的结果中比较两个得分。我们鼓励开发人员在代码库中支持其他评估范式。
|
| 98 |
+
2. 默认情况下,我们对所有多模态模型使用相同的提示模板来评估基准测试。同时,**一些多模态模型可能有他们特定的提示模板**(目前可能未在代码库中涵盖)。我们鼓励 VLM 的开发人员在 VLMEvalKit 中实现自己的提示模板,如果目前未覆盖。这将有助于提高可复现性。
|
| 99 |
+
|
| 100 |
+
## 🖊️ 引用 <a id="citation"></a>
|
| 101 |
+
|
| 102 |
+
如果我们的工作对您有所帮助,请考虑 **star🌟** VLMEvalKit。感谢支持!
|
| 103 |
+
|
| 104 |
+
[](https://github.com/open-compass/VLMEvalKit/stargazers)
|
| 105 |
+
|
| 106 |
+
如果您在研究中使用了 VLMEvalKit,或希望参考已发布的开源评估结果,请使用以下 BibTeX 条目以及与您使用的特定 VLM / 基准测试相对应的 BibTex 条目。
|
| 107 |
+
|
| 108 |
+
```bib
|
| 109 |
+
@misc{duan2024vlmevalkit,
|
| 110 |
+
title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models},
|
| 111 |
+
author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen},
|
| 112 |
+
year={2024},
|
| 113 |
+
eprint={2407.11691},
|
| 114 |
+
archivePrefix={arXiv},
|
| 115 |
+
primaryClass={cs.CV},
|
| 116 |
+
url={https://arxiv.org/abs/2407.11691},
|
| 117 |
+
}
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
<p align="right"><a href="#top">🔝回到顶部</a></p>
|
| 121 |
+
|
| 122 |
+
[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors
|
| 123 |
+
[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square
|
| 124 |
+
[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members
|
| 125 |
+
[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square
|
| 126 |
+
[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues
|
| 127 |
+
[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square
|
| 128 |
+
[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE
|
| 129 |
+
[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square
|
| 130 |
+
[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers
|
| 131 |
+
[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square
|
VLMEvalKit-sudoku/docs/zh-CN/_static/image/logo.svg
ADDED
|
|
VLMEvalKit-sudoku/docs/zh-CN/_templates/autosummary/class.rst
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. role:: hidden
|
| 2 |
+
:class: hidden-section
|
| 3 |
+
.. currentmodule:: {{ module }}
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
{{ name | underline}}
|
| 7 |
+
|
| 8 |
+
.. autoclass:: {{ name }}
|
| 9 |
+
:members:
|
| 10 |
+
|
| 11 |
+
..
|
| 12 |
+
autogenerated from _templates/autosummary/class.rst
|
| 13 |
+
note it does not have :inherited-members:
|
VLMEvalKit-sudoku/docs/zh-CN/conf.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# Configuration file for the Sphinx documentation builder.
|
| 3 |
+
#
|
| 4 |
+
# This file only contains a selection of the most common options. For a full
|
| 5 |
+
# list see the documentation:
|
| 6 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
| 7 |
+
|
| 8 |
+
# -- Path setup --------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
# If extensions (or modules to document with autodoc) are in another directory,
|
| 11 |
+
# add these directories to sys.path here. If the directory is relative to the
|
| 12 |
+
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
| 13 |
+
#
|
| 14 |
+
import os
|
| 15 |
+
import ast
|
| 16 |
+
import subprocess
|
| 17 |
+
import sys
|
| 18 |
+
|
| 19 |
+
import pytorch_sphinx_theme
|
| 20 |
+
from sphinx.builders.html import StandaloneHTMLBuilder
|
| 21 |
+
|
| 22 |
+
sys.path.insert(0, os.path.abspath('../../'))
|
| 23 |
+
|
| 24 |
+
# -- Project information -----------------------------------------------------
|
| 25 |
+
|
| 26 |
+
project = 'VLMEvalKit'
|
| 27 |
+
copyright = '2023, VLMEvalKit'
|
| 28 |
+
author = 'VLMEvalKit Authors'
|
| 29 |
+
|
| 30 |
+
# The full version, including alpha/beta/rc tags
|
| 31 |
+
version_file = '../../vlmeval/__init__.py'
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_version():
|
| 35 |
+
with open(version_file, 'r') as f:
|
| 36 |
+
file_content = f.read()
|
| 37 |
+
# Parse the file content into an abstract syntax tree (AST)
|
| 38 |
+
tree = ast.parse(file_content, filename=version_file)
|
| 39 |
+
|
| 40 |
+
# Iterate through the body of the AST, looking for an assignment to __version__
|
| 41 |
+
for node in tree.body:
|
| 42 |
+
if isinstance(node, ast.Assign):
|
| 43 |
+
for target in node.targets:
|
| 44 |
+
if isinstance(target, ast.Name) and target.id == '__version__':
|
| 45 |
+
return node.value.s
|
| 46 |
+
raise ValueError('__version__ not found')
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
release = get_version()
|
| 50 |
+
|
| 51 |
+
# -- General configuration ---------------------------------------------------
|
| 52 |
+
|
| 53 |
+
# Add any Sphinx extension module names here, as strings. They can be
|
| 54 |
+
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
| 55 |
+
# ones.
|
| 56 |
+
extensions = [
|
| 57 |
+
'sphinx.ext.autodoc',
|
| 58 |
+
'sphinx.ext.autosummary',
|
| 59 |
+
'sphinx.ext.intersphinx',
|
| 60 |
+
'sphinx.ext.napoleon',
|
| 61 |
+
'sphinx.ext.viewcode',
|
| 62 |
+
'myst_parser',
|
| 63 |
+
'sphinx_copybutton',
|
| 64 |
+
'sphinx_tabs.tabs',
|
| 65 |
+
'notfound.extension',
|
| 66 |
+
'sphinxcontrib.jquery',
|
| 67 |
+
'sphinx_design',
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
# Add any paths that contain templates here, relative to this directory.
|
| 71 |
+
templates_path = ['_templates']
|
| 72 |
+
|
| 73 |
+
# The suffix(es) of source filenames.
|
| 74 |
+
# You can specify multiple suffix as a list of string:
|
| 75 |
+
#
|
| 76 |
+
source_suffix = {
|
| 77 |
+
'.rst': 'restructuredtext',
|
| 78 |
+
'.md': 'markdown',
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
language = 'cn'
|
| 82 |
+
|
| 83 |
+
# The master toctree document.
|
| 84 |
+
root_doc = 'index'
|
| 85 |
+
html_context = {
|
| 86 |
+
'github_version': 'latest',
|
| 87 |
+
}
|
| 88 |
+
# List of patterns, relative to source directory, that match files and
|
| 89 |
+
# directories to ignore when looking for source files.
|
| 90 |
+
# This pattern also affects html_static_path and html_extra_path.
|
| 91 |
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
| 92 |
+
|
| 93 |
+
# -- Options for HTML output -------------------------------------------------
|
| 94 |
+
|
| 95 |
+
# The theme to use for HTML and HTML Help pages. See the documentation for
|
| 96 |
+
# a list of builtin themes.
|
| 97 |
+
#
|
| 98 |
+
html_theme = 'pytorch_sphinx_theme'
|
| 99 |
+
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
|
| 100 |
+
|
| 101 |
+
# Theme options are theme-specific and customize the look and feel of a theme
|
| 102 |
+
# further. For a list of options available for each theme, see the
|
| 103 |
+
# documentation.
|
| 104 |
+
# yapf: disable
|
| 105 |
+
html_theme_options = {
|
| 106 |
+
'menu': [
|
| 107 |
+
{
|
| 108 |
+
'name': 'GitHub',
|
| 109 |
+
'url': 'https://github.com/open-compass/VLMEvalKit'
|
| 110 |
+
},
|
| 111 |
+
],
|
| 112 |
+
# Specify the language of shared menu
|
| 113 |
+
'menu_lang': 'cn',
|
| 114 |
+
# Disable the default edit on GitHub
|
| 115 |
+
'default_edit_on_github': False,
|
| 116 |
+
}
|
| 117 |
+
# yapf: enable
|
| 118 |
+
|
| 119 |
+
# Add any paths that contain custom static files (such as style sheets) here,
|
| 120 |
+
# relative to this directory. They are copied after the builtin static files,
|
| 121 |
+
# so a file named "default.css" will overwrite the builtin "default.css".
|
| 122 |
+
html_static_path = ['_static']
|
| 123 |
+
html_css_files = [
|
| 124 |
+
'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css',
|
| 125 |
+
'css/readthedocs.css'
|
| 126 |
+
]
|
| 127 |
+
html_js_files = [
|
| 128 |
+
'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js',
|
| 129 |
+
'js/custom.js'
|
| 130 |
+
]
|
| 131 |
+
|
| 132 |
+
# -- Options for HTMLHelp output ---------------------------------------------
|
| 133 |
+
|
| 134 |
+
# Output file base name for HTML help builder.
|
| 135 |
+
htmlhelp_basename = 'vlmevalkitdoc'
|
| 136 |
+
|
| 137 |
+
# -- Options for LaTeX output ------------------------------------------------
|
| 138 |
+
|
| 139 |
+
latex_elements = {
|
| 140 |
+
# The paper size ('letterpaper' or 'a4paper').
|
| 141 |
+
#
|
| 142 |
+
# 'papersize': 'letterpaper',
|
| 143 |
+
|
| 144 |
+
# The font size ('10pt', '11pt' or '12pt').
|
| 145 |
+
#
|
| 146 |
+
# 'pointsize': '10pt',
|
| 147 |
+
|
| 148 |
+
# Additional stuff for the LaTeX preamble.
|
| 149 |
+
#
|
| 150 |
+
# 'preamble': '',
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# Grouping the document tree into LaTeX files. List of tuples
|
| 154 |
+
# (source start file, target name, title,
|
| 155 |
+
# author, documentclass [howto, manual, or own class]).
|
| 156 |
+
latex_documents = [
|
| 157 |
+
(root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author,
|
| 158 |
+
'manual'),
|
| 159 |
+
]
|
| 160 |
+
|
| 161 |
+
# -- Options for manual page output ------------------------------------------
|
| 162 |
+
|
| 163 |
+
# One entry per manual page. List of tuples
|
| 164 |
+
# (source start file, name, description, authors, manual section).
|
| 165 |
+
man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author],
|
| 166 |
+
1)]
|
| 167 |
+
|
| 168 |
+
# -- Options for Texinfo output ----------------------------------------------
|
| 169 |
+
|
| 170 |
+
# Grouping the document tree into Texinfo files. List of tuples
|
| 171 |
+
# (source start file, target name, title, author,
|
| 172 |
+
# dir menu entry, description, category)
|
| 173 |
+
texinfo_documents = [
|
| 174 |
+
(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author,
|
| 175 |
+
'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.',
|
| 176 |
+
'Miscellaneous'),
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
# -- Options for Epub output -------------------------------------------------
|
| 180 |
+
|
| 181 |
+
# Bibliographic Dublin Core info.
|
| 182 |
+
epub_title = project
|
| 183 |
+
|
| 184 |
+
# The unique identifier of the text. This can be a ISBN number
|
| 185 |
+
# or the project homepage.
|
| 186 |
+
#
|
| 187 |
+
# epub_identifier = ''
|
| 188 |
+
|
| 189 |
+
# A unique identification for the text.
|
| 190 |
+
#
|
| 191 |
+
# epub_uid = ''
|
| 192 |
+
|
| 193 |
+
# A list of files that should not be packed into the epub file.
|
| 194 |
+
epub_exclude_files = ['search.html']
|
| 195 |
+
|
| 196 |
+
# set priority when building html
|
| 197 |
+
StandaloneHTMLBuilder.supported_image_types = [
|
| 198 |
+
'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg'
|
| 199 |
+
]
|
| 200 |
+
|
| 201 |
+
# -- Extension configuration -------------------------------------------------
|
| 202 |
+
# Ignore >>> when copying code
|
| 203 |
+
copybutton_prompt_text = r'>>> |\.\.\. '
|
| 204 |
+
copybutton_prompt_is_regexp = True
|
| 205 |
+
|
| 206 |
+
# Auto-generated header anchors
|
| 207 |
+
myst_heading_anchors = 3
|
| 208 |
+
# Enable "colon_fence" extension of myst.
|
| 209 |
+
myst_enable_extensions = ['colon_fence', 'dollarmath']
|
| 210 |
+
|
| 211 |
+
# Configuration for intersphinx
|
| 212 |
+
intersphinx_mapping = {
|
| 213 |
+
'python': ('https://docs.python.org/3', None),
|
| 214 |
+
'numpy': ('https://numpy.org/doc/stable', None),
|
| 215 |
+
'torch': ('https://pytorch.org/docs/stable/', None),
|
| 216 |
+
'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None),
|
| 217 |
+
'transformers':
|
| 218 |
+
('https://huggingface.co/docs/transformers/main/en/', None),
|
| 219 |
+
}
|
| 220 |
+
napoleon_custom_sections = [
|
| 221 |
+
# Custom sections for data elements.
|
| 222 |
+
('Meta fields', 'params_style'),
|
| 223 |
+
('Data fields', 'params_style'),
|
| 224 |
+
]
|
| 225 |
+
|
| 226 |
+
# Disable docstring inheritance
|
| 227 |
+
autodoc_inherit_docstrings = False
|
| 228 |
+
# Mock some imports during generate API docs.
|
| 229 |
+
autodoc_mock_imports = ['rich', 'attr', 'einops']
|
| 230 |
+
# Disable displaying type annotations, these can be very verbose
|
| 231 |
+
autodoc_typehints = 'none'
|
| 232 |
+
|
| 233 |
+
# The not found page
|
| 234 |
+
notfound_template = '404.html'
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def builder_inited_handler(app):
|
| 238 |
+
subprocess.run(['./cp_origin_docs.sh'])
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def setup(app):
|
| 242 |
+
app.connect('builder-inited', builder_inited_handler)
|
VLMEvalKit-sudoku/llava/eval/eval_chartqa.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
print(sys.path)
|
| 8 |
+
|
| 9 |
+
# from mova.eval.m4c_evaluator import ChartVQAEvaluator
|
| 10 |
+
from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator, STVQAAccuracyEvaluator
|
| 11 |
+
|
| 12 |
+
def get_args():
|
| 13 |
+
parser = argparse.ArgumentParser()
|
| 14 |
+
parser.add_argument('--annotation-file', type=str)
|
| 15 |
+
parser.add_argument('--result-file', type=str)
|
| 16 |
+
parser.add_argument('--result-dir', type=str)
|
| 17 |
+
parser.add_argument('--mid_result', type=str)
|
| 18 |
+
parser.add_argument('--output_result', type=str)
|
| 19 |
+
return parser.parse_args()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def eval_single(annotation_file, result_file):
|
| 23 |
+
experiment_name = os.path.splitext(os.path.basename(result_file))[0]
|
| 24 |
+
print(experiment_name)
|
| 25 |
+
# annotations = json.load(open(annotation_file))['data']
|
| 26 |
+
annotations = [
|
| 27 |
+
json.loads(q) for q in open(os.path.expanduser(annotation_file), "r")
|
| 28 |
+
]
|
| 29 |
+
annotations = {(annotation['question_id'], annotation['question'].lower()): annotation for annotation in annotations}
|
| 30 |
+
results = [json.loads(line) for line in open(result_file)]
|
| 31 |
+
|
| 32 |
+
pred_list = []
|
| 33 |
+
mid_list = []
|
| 34 |
+
for result in results:
|
| 35 |
+
annotation = annotations[(result['question_id'], result['prompt'].lower())]
|
| 36 |
+
pred_list.append({
|
| 37 |
+
"pred_answer": result['text'].lower(),
|
| 38 |
+
"gt_answers": [annotation['answer'].lower()],
|
| 39 |
+
})
|
| 40 |
+
mid_list.append(result)
|
| 41 |
+
mid_list[-1]["annotation"] = annotation['answer']
|
| 42 |
+
|
| 43 |
+
# evaluator = ChartVQAEvaluator()
|
| 44 |
+
# acc, acc_list = evaluator.evaluate_relaxed_accuracy(pred_list)
|
| 45 |
+
# evaluator = TextVQAAccuracyEvaluator()
|
| 46 |
+
evaluator = STVQAAccuracyEvaluator()
|
| 47 |
+
acc = evaluator.eval_pred_list(pred_list)
|
| 48 |
+
acc = 100. * acc
|
| 49 |
+
# for num, d in zip(acc_list, mid_list):
|
| 50 |
+
# d["acc"] = num
|
| 51 |
+
print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), acc))
|
| 52 |
+
return len(pred_list), acc, mid_list
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
args = get_args()
|
| 57 |
+
|
| 58 |
+
if args.result_file is not None:
|
| 59 |
+
samples, acc, mid_result = eval_single(args.annotation_file, args.result_file)
|
| 60 |
+
|
| 61 |
+
if args.result_dir is not None:
|
| 62 |
+
for result_file in sorted(os.listdir(args.result_dir)):
|
| 63 |
+
if not result_file.endswith('.jsonl'):
|
| 64 |
+
print(f'Skipping {result_file}')
|
| 65 |
+
continue
|
| 66 |
+
samples, acc, mid_result = eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
|
| 67 |
+
|
| 68 |
+
# with open(args.mid_result, 'w') as f:
|
| 69 |
+
# json.dump(mid_result, f, indent=2)
|
| 70 |
+
# output_folder = os.path.dirname(args.output_result)
|
| 71 |
+
# print(output_folder)
|
| 72 |
+
# os.makedirs(os.path.dirname(output_folder), exist_ok=True)
|
| 73 |
+
# with open(args.output_result, 'w') as f:
|
| 74 |
+
# json.dump({'samples': samples, 'acc': acc}, f, indent=2)
|
VLMEvalKit-sudoku/llava/eval/eval_pope.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
def eval_pope(answers, label_file):
|
| 6 |
+
label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
|
| 7 |
+
|
| 8 |
+
for answer in answers:
|
| 9 |
+
text = answer['text']
|
| 10 |
+
|
| 11 |
+
# Only keep the first sentence
|
| 12 |
+
if text.find('.') != -1:
|
| 13 |
+
text = text.split('.')[0]
|
| 14 |
+
|
| 15 |
+
text = text.replace(',', '')
|
| 16 |
+
words = text.split(' ')
|
| 17 |
+
if 'No' in words or 'not' in words or 'no' in words:
|
| 18 |
+
answer['text'] = 'no'
|
| 19 |
+
else:
|
| 20 |
+
answer['text'] = 'yes'
|
| 21 |
+
|
| 22 |
+
for i in range(len(label_list)):
|
| 23 |
+
if label_list[i] == 'no':
|
| 24 |
+
label_list[i] = 0
|
| 25 |
+
else:
|
| 26 |
+
label_list[i] = 1
|
| 27 |
+
|
| 28 |
+
pred_list = []
|
| 29 |
+
for answer in answers:
|
| 30 |
+
if answer['text'] == 'no':
|
| 31 |
+
pred_list.append(0)
|
| 32 |
+
else:
|
| 33 |
+
pred_list.append(1)
|
| 34 |
+
|
| 35 |
+
pos = 1
|
| 36 |
+
neg = 0
|
| 37 |
+
yes_ratio = pred_list.count(1) / len(pred_list)
|
| 38 |
+
|
| 39 |
+
TP, TN, FP, FN = 0, 0, 0, 0
|
| 40 |
+
for pred, label in zip(pred_list, label_list):
|
| 41 |
+
if pred == pos and label == pos:
|
| 42 |
+
TP += 1
|
| 43 |
+
elif pred == pos and label == neg:
|
| 44 |
+
FP += 1
|
| 45 |
+
elif pred == neg and label == neg:
|
| 46 |
+
TN += 1
|
| 47 |
+
elif pred == neg and label == pos:
|
| 48 |
+
FN += 1
|
| 49 |
+
|
| 50 |
+
print('TP\tFP\tTN\tFN\t')
|
| 51 |
+
print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
|
| 52 |
+
|
| 53 |
+
precision = float(TP) / float(TP + FP)
|
| 54 |
+
recall = float(TP) / float(TP + FN)
|
| 55 |
+
f1 = 2*precision*recall / (precision + recall)
|
| 56 |
+
acc = (TP + TN) / (TP + TN + FP + FN)
|
| 57 |
+
print('Accuracy: {}'.format(acc))
|
| 58 |
+
print('Precision: {}'.format(precision))
|
| 59 |
+
print('Recall: {}'.format(recall))
|
| 60 |
+
print('F1 score: {}'.format(f1))
|
| 61 |
+
print('Yes ratio: {}'.format(yes_ratio))
|
| 62 |
+
print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
parser = argparse.ArgumentParser()
|
| 66 |
+
parser.add_argument("--annotation-dir", type=str)
|
| 67 |
+
parser.add_argument("--question-file", type=str)
|
| 68 |
+
parser.add_argument("--result-file", type=str)
|
| 69 |
+
args = parser.parse_args()
|
| 70 |
+
|
| 71 |
+
questions = [json.loads(line) for line in open(args.question_file)]
|
| 72 |
+
questions = {question['question_id']: question for question in questions}
|
| 73 |
+
answers = [json.loads(q) for q in open(args.result_file)]
|
| 74 |
+
for file in os.listdir(args.annotation_dir):
|
| 75 |
+
print(file)
|
| 76 |
+
print(answers[0]['question_id'])
|
| 77 |
+
assert file.startswith('coco_pope_')
|
| 78 |
+
assert file.endswith('.json')
|
| 79 |
+
category = file[10:-5]
|
| 80 |
+
print(category)
|
| 81 |
+
cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
|
| 82 |
+
print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
|
| 83 |
+
eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
|
| 84 |
+
print("====================================")
|
VLMEvalKit-sudoku/llava/eval/eval_science_qa_gpt4.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import re
|
| 5 |
+
import random
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_args():
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument('--base-dir', type=str)
|
| 12 |
+
parser.add_argument('--gpt4-result', type=str)
|
| 13 |
+
parser.add_argument('--our-result', type=str)
|
| 14 |
+
parser.add_argument('--split', type=str, default='test')
|
| 15 |
+
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
|
| 16 |
+
return parser.parse_args()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def convert_caps(results):
|
| 20 |
+
fakecaps = []
|
| 21 |
+
for result in results:
|
| 22 |
+
image_id = result['question_id']
|
| 23 |
+
caption = result['text']
|
| 24 |
+
fakecaps.append({"image_id": int(image_id), "caption": caption})
|
| 25 |
+
return fakecaps
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_pred_idx(prediction, choices, options):
|
| 29 |
+
"""
|
| 30 |
+
Get the index (e.g. 2) from the prediction (e.g. 'C')
|
| 31 |
+
"""
|
| 32 |
+
if prediction in options[:len(choices)]:
|
| 33 |
+
return options.index(prediction)
|
| 34 |
+
else:
|
| 35 |
+
return random.choice(range(len(choices)))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
if __name__ == "__main__":
|
| 39 |
+
args = get_args()
|
| 40 |
+
|
| 41 |
+
base_dir = args.base_dir
|
| 42 |
+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
|
| 43 |
+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
|
| 44 |
+
our_predictions = [json.loads(line) for line in open(args.our_result)]
|
| 45 |
+
our_predictions = {pred['question_id']: pred for pred in our_predictions}
|
| 46 |
+
split_problems = {idx: problems[idx] for idx in split_indices}
|
| 47 |
+
|
| 48 |
+
gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
|
| 49 |
+
|
| 50 |
+
results = defaultdict(lambda: 0)
|
| 51 |
+
|
| 52 |
+
for prob_id, prob in split_problems.items():
|
| 53 |
+
if prob_id not in our_predictions:
|
| 54 |
+
continue
|
| 55 |
+
if prob_id not in gpt4_predictions:
|
| 56 |
+
continue
|
| 57 |
+
our_pred = our_predictions[prob_id]['text']
|
| 58 |
+
gpt4_pred = gpt4_predictions[prob_id]
|
| 59 |
+
|
| 60 |
+
pattern = re.compile(r'The answer is ([A-Z]).')
|
| 61 |
+
our_res = pattern.findall(our_pred)
|
| 62 |
+
if len(our_res) == 1:
|
| 63 |
+
our_answer = our_res[0] # 'A', 'B', ...
|
| 64 |
+
else:
|
| 65 |
+
our_answer = "FAILED"
|
| 66 |
+
gpt4_res = pattern.findall(gpt4_pred)
|
| 67 |
+
if len(gpt4_res) == 1:
|
| 68 |
+
gpt4_answer = gpt4_res[0] # 'A', 'B', ...
|
| 69 |
+
else:
|
| 70 |
+
gpt4_answer = "FAILED"
|
| 71 |
+
|
| 72 |
+
our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
|
| 73 |
+
gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
|
| 74 |
+
|
| 75 |
+
if gpt4_answer == 'FAILED':
|
| 76 |
+
results['gpt4_failed'] += 1
|
| 77 |
+
# continue
|
| 78 |
+
gpt4_pred_idx = our_pred_idx
|
| 79 |
+
# if our_pred_idx != prob['answer']:
|
| 80 |
+
# print(our_predictions[prob_id]['prompt'])
|
| 81 |
+
# print('-----------------')
|
| 82 |
+
# print(f'LECTURE: {prob["lecture"]}')
|
| 83 |
+
# print(f'SOLUTION: {prob["solution"]}')
|
| 84 |
+
# print('=====================')
|
| 85 |
+
else:
|
| 86 |
+
# continue
|
| 87 |
+
pass
|
| 88 |
+
# gpt4_pred_idx = our_pred_idx
|
| 89 |
+
|
| 90 |
+
if gpt4_pred_idx == prob['answer']:
|
| 91 |
+
results['correct'] += 1
|
| 92 |
+
else:
|
| 93 |
+
results['incorrect'] += 1
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
|
| 97 |
+
results['correct_upperbound'] += 1
|
| 98 |
+
|
| 99 |
+
correct = results['correct']
|
| 100 |
+
total = results['correct'] + results['incorrect']
|
| 101 |
+
print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
|
| 102 |
+
print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
|
| 103 |
+
print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
|
| 104 |
+
|
VLMEvalKit-sudoku/llava/eval/m4c_evaluator.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class EvalAIAnswerProcessor:
|
| 8 |
+
"""
|
| 9 |
+
Processes an answer similar to Eval AI
|
| 10 |
+
copied from
|
| 11 |
+
https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
CONTRACTIONS = {
|
| 15 |
+
"aint": "ain't",
|
| 16 |
+
"arent": "aren't",
|
| 17 |
+
"cant": "can't",
|
| 18 |
+
"couldve": "could've",
|
| 19 |
+
"couldnt": "couldn't",
|
| 20 |
+
"couldn'tve": "couldn't've",
|
| 21 |
+
"couldnt've": "couldn't've",
|
| 22 |
+
"didnt": "didn't",
|
| 23 |
+
"doesnt": "doesn't",
|
| 24 |
+
"dont": "don't",
|
| 25 |
+
"hadnt": "hadn't",
|
| 26 |
+
"hadnt've": "hadn't've",
|
| 27 |
+
"hadn'tve": "hadn't've",
|
| 28 |
+
"hasnt": "hasn't",
|
| 29 |
+
"havent": "haven't",
|
| 30 |
+
"hed": "he'd",
|
| 31 |
+
"hed've": "he'd've",
|
| 32 |
+
"he'dve": "he'd've",
|
| 33 |
+
"hes": "he's",
|
| 34 |
+
"howd": "how'd",
|
| 35 |
+
"howll": "how'll",
|
| 36 |
+
"hows": "how's",
|
| 37 |
+
"Id've": "I'd've",
|
| 38 |
+
"I'dve": "I'd've",
|
| 39 |
+
"Im": "I'm",
|
| 40 |
+
"Ive": "I've",
|
| 41 |
+
"isnt": "isn't",
|
| 42 |
+
"itd": "it'd",
|
| 43 |
+
"itd've": "it'd've",
|
| 44 |
+
"it'dve": "it'd've",
|
| 45 |
+
"itll": "it'll",
|
| 46 |
+
"let's": "let's",
|
| 47 |
+
"maam": "ma'am",
|
| 48 |
+
"mightnt": "mightn't",
|
| 49 |
+
"mightnt've": "mightn't've",
|
| 50 |
+
"mightn'tve": "mightn't've",
|
| 51 |
+
"mightve": "might've",
|
| 52 |
+
"mustnt": "mustn't",
|
| 53 |
+
"mustve": "must've",
|
| 54 |
+
"neednt": "needn't",
|
| 55 |
+
"notve": "not've",
|
| 56 |
+
"oclock": "o'clock",
|
| 57 |
+
"oughtnt": "oughtn't",
|
| 58 |
+
"ow's'at": "'ow's'at",
|
| 59 |
+
"'ows'at": "'ow's'at",
|
| 60 |
+
"'ow'sat": "'ow's'at",
|
| 61 |
+
"shant": "shan't",
|
| 62 |
+
"shed've": "she'd've",
|
| 63 |
+
"she'dve": "she'd've",
|
| 64 |
+
"she's": "she's",
|
| 65 |
+
"shouldve": "should've",
|
| 66 |
+
"shouldnt": "shouldn't",
|
| 67 |
+
"shouldnt've": "shouldn't've",
|
| 68 |
+
"shouldn'tve": "shouldn't've",
|
| 69 |
+
"somebody'd": "somebodyd",
|
| 70 |
+
"somebodyd've": "somebody'd've",
|
| 71 |
+
"somebody'dve": "somebody'd've",
|
| 72 |
+
"somebodyll": "somebody'll",
|
| 73 |
+
"somebodys": "somebody's",
|
| 74 |
+
"someoned": "someone'd",
|
| 75 |
+
"someoned've": "someone'd've",
|
| 76 |
+
"someone'dve": "someone'd've",
|
| 77 |
+
"someonell": "someone'll",
|
| 78 |
+
"someones": "someone's",
|
| 79 |
+
"somethingd": "something'd",
|
| 80 |
+
"somethingd've": "something'd've",
|
| 81 |
+
"something'dve": "something'd've",
|
| 82 |
+
"somethingll": "something'll",
|
| 83 |
+
"thats": "that's",
|
| 84 |
+
"thered": "there'd",
|
| 85 |
+
"thered've": "there'd've",
|
| 86 |
+
"there'dve": "there'd've",
|
| 87 |
+
"therere": "there're",
|
| 88 |
+
"theres": "there's",
|
| 89 |
+
"theyd": "they'd",
|
| 90 |
+
"theyd've": "they'd've",
|
| 91 |
+
"they'dve": "they'd've",
|
| 92 |
+
"theyll": "they'll",
|
| 93 |
+
"theyre": "they're",
|
| 94 |
+
"theyve": "they've",
|
| 95 |
+
"twas": "'twas",
|
| 96 |
+
"wasnt": "wasn't",
|
| 97 |
+
"wed've": "we'd've",
|
| 98 |
+
"we'dve": "we'd've",
|
| 99 |
+
"weve": "we've",
|
| 100 |
+
"werent": "weren't",
|
| 101 |
+
"whatll": "what'll",
|
| 102 |
+
"whatre": "what're",
|
| 103 |
+
"whats": "what's",
|
| 104 |
+
"whatve": "what've",
|
| 105 |
+
"whens": "when's",
|
| 106 |
+
"whered": "where'd",
|
| 107 |
+
"wheres": "where's",
|
| 108 |
+
"whereve": "where've",
|
| 109 |
+
"whod": "who'd",
|
| 110 |
+
"whod've": "who'd've",
|
| 111 |
+
"who'dve": "who'd've",
|
| 112 |
+
"wholl": "who'll",
|
| 113 |
+
"whos": "who's",
|
| 114 |
+
"whove": "who've",
|
| 115 |
+
"whyll": "why'll",
|
| 116 |
+
"whyre": "why're",
|
| 117 |
+
"whys": "why's",
|
| 118 |
+
"wont": "won't",
|
| 119 |
+
"wouldve": "would've",
|
| 120 |
+
"wouldnt": "wouldn't",
|
| 121 |
+
"wouldnt've": "wouldn't've",
|
| 122 |
+
"wouldn'tve": "wouldn't've",
|
| 123 |
+
"yall": "y'all",
|
| 124 |
+
"yall'll": "y'all'll",
|
| 125 |
+
"y'allll": "y'all'll",
|
| 126 |
+
"yall'd've": "y'all'd've",
|
| 127 |
+
"y'alld've": "y'all'd've",
|
| 128 |
+
"y'all'dve": "y'all'd've",
|
| 129 |
+
"youd": "you'd",
|
| 130 |
+
"youd've": "you'd've",
|
| 131 |
+
"you'dve": "you'd've",
|
| 132 |
+
"youll": "you'll",
|
| 133 |
+
"youre": "you're",
|
| 134 |
+
"youve": "you've",
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
NUMBER_MAP = {
|
| 138 |
+
"none": "0",
|
| 139 |
+
"zero": "0",
|
| 140 |
+
"one": "1",
|
| 141 |
+
"two": "2",
|
| 142 |
+
"three": "3",
|
| 143 |
+
"four": "4",
|
| 144 |
+
"five": "5",
|
| 145 |
+
"six": "6",
|
| 146 |
+
"seven": "7",
|
| 147 |
+
"eight": "8",
|
| 148 |
+
"nine": "9",
|
| 149 |
+
"ten": "10",
|
| 150 |
+
}
|
| 151 |
+
ARTICLES = ["a", "an", "the"]
|
| 152 |
+
PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
|
| 153 |
+
COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
|
| 154 |
+
PUNCTUATIONS = [
|
| 155 |
+
";",
|
| 156 |
+
r"/",
|
| 157 |
+
"[",
|
| 158 |
+
"]",
|
| 159 |
+
'"',
|
| 160 |
+
"{",
|
| 161 |
+
"}",
|
| 162 |
+
"(",
|
| 163 |
+
")",
|
| 164 |
+
"=",
|
| 165 |
+
"+",
|
| 166 |
+
"\\",
|
| 167 |
+
"_",
|
| 168 |
+
"-",
|
| 169 |
+
">",
|
| 170 |
+
"<",
|
| 171 |
+
"@",
|
| 172 |
+
"`",
|
| 173 |
+
",",
|
| 174 |
+
"?",
|
| 175 |
+
"!",
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
def __init__(self, *args, **kwargs):
|
| 179 |
+
pass
|
| 180 |
+
|
| 181 |
+
def word_tokenize(self, word):
|
| 182 |
+
word = word.lower()
|
| 183 |
+
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
|
| 184 |
+
return word.strip()
|
| 185 |
+
|
| 186 |
+
def process_punctuation(self, in_text):
|
| 187 |
+
out_text = in_text
|
| 188 |
+
for p in self.PUNCTUATIONS:
|
| 189 |
+
if (p + " " in in_text or " " + p in in_text) or (
|
| 190 |
+
re.search(self.COMMA_STRIP, in_text) is not None
|
| 191 |
+
):
|
| 192 |
+
out_text = out_text.replace(p, "")
|
| 193 |
+
else:
|
| 194 |
+
out_text = out_text.replace(p, " ")
|
| 195 |
+
out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
|
| 196 |
+
return out_text
|
| 197 |
+
|
| 198 |
+
def process_digit_article(self, in_text):
|
| 199 |
+
out_text = []
|
| 200 |
+
temp_text = in_text.lower().split()
|
| 201 |
+
for word in temp_text:
|
| 202 |
+
word = self.NUMBER_MAP.setdefault(word, word)
|
| 203 |
+
if word not in self.ARTICLES:
|
| 204 |
+
out_text.append(word)
|
| 205 |
+
else:
|
| 206 |
+
pass
|
| 207 |
+
for word_id, word in enumerate(out_text):
|
| 208 |
+
if word in self.CONTRACTIONS:
|
| 209 |
+
out_text[word_id] = self.CONTRACTIONS[word]
|
| 210 |
+
out_text = " ".join(out_text)
|
| 211 |
+
return out_text
|
| 212 |
+
|
| 213 |
+
def __call__(self, item):
|
| 214 |
+
item = self.word_tokenize(item)
|
| 215 |
+
item = item.replace("\n", " ").replace("\t", " ").strip()
|
| 216 |
+
item = self.process_punctuation(item)
|
| 217 |
+
item = self.process_digit_article(item)
|
| 218 |
+
return item
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class TextVQAAccuracyEvaluator:
|
| 222 |
+
def __init__(self):
|
| 223 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
| 224 |
+
|
| 225 |
+
def _compute_answer_scores(self, raw_answers):
|
| 226 |
+
"""
|
| 227 |
+
compute the accuracy (soft score) of human answers
|
| 228 |
+
"""
|
| 229 |
+
answers = [self.answer_processor(a) for a in raw_answers]
|
| 230 |
+
assert len(answers) == 10
|
| 231 |
+
gt_answers = list(enumerate(answers))
|
| 232 |
+
unique_answers = set(answers)
|
| 233 |
+
unique_answer_scores = {}
|
| 234 |
+
|
| 235 |
+
for unique_answer in unique_answers:
|
| 236 |
+
accs = []
|
| 237 |
+
for gt_answer in gt_answers:
|
| 238 |
+
other_answers = [item for item in gt_answers if item != gt_answer]
|
| 239 |
+
matching_answers = [
|
| 240 |
+
item for item in other_answers if item[1] == unique_answer
|
| 241 |
+
]
|
| 242 |
+
acc = min(1, float(len(matching_answers)) / 3)
|
| 243 |
+
accs.append(acc)
|
| 244 |
+
unique_answer_scores[unique_answer] = sum(accs) / len(accs)
|
| 245 |
+
|
| 246 |
+
return unique_answer_scores
|
| 247 |
+
|
| 248 |
+
def eval_pred_list(self, pred_list):
|
| 249 |
+
pred_scores = []
|
| 250 |
+
for entry in tqdm(pred_list):
|
| 251 |
+
unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
|
| 252 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
| 253 |
+
score = unique_answer_scores.get(pred_answer, 0.0)
|
| 254 |
+
pred_scores.append(score)
|
| 255 |
+
|
| 256 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
| 257 |
+
return accuracy
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class STVQAAccuracyEvaluator:
|
| 261 |
+
def __init__(self):
|
| 262 |
+
self.answer_processor = EvalAIAnswerProcessor()
|
| 263 |
+
|
| 264 |
+
def eval_pred_list(self, pred_list):
|
| 265 |
+
pred_scores = []
|
| 266 |
+
import csv
|
| 267 |
+
for entry in pred_list:
|
| 268 |
+
pred_answer = self.answer_processor(entry["pred_answer"])
|
| 269 |
+
gts = [self.answer_processor(a) for a in entry["gt_answers"]]
|
| 270 |
+
score = 1.0 if pred_answer in gts else 0.0
|
| 271 |
+
with open('./output.csv', mode='a', newline='') as file:
|
| 272 |
+
writer = csv.writer(file)
|
| 273 |
+
# Write the row to the CSV file
|
| 274 |
+
writer.writerow([pred_answer, gts, score])
|
| 275 |
+
pred_scores.append(score)
|
| 276 |
+
|
| 277 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
| 278 |
+
return accuracy
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class STVQAANLSEvaluator:
|
| 282 |
+
def __init__(self):
|
| 283 |
+
import editdistance # install with `pip install editdistance`
|
| 284 |
+
|
| 285 |
+
self.get_edit_distance = editdistance.eval
|
| 286 |
+
|
| 287 |
+
def get_anls(self, s1, s2):
|
| 288 |
+
s1 = s1.lower().strip()
|
| 289 |
+
s2 = s2.lower().strip()
|
| 290 |
+
iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
|
| 291 |
+
anls = iou if iou >= 0.5 else 0.0
|
| 292 |
+
return anls
|
| 293 |
+
|
| 294 |
+
def eval_pred_list(self, pred_list):
|
| 295 |
+
pred_scores = []
|
| 296 |
+
import csv
|
| 297 |
+
for entry in pred_list:
|
| 298 |
+
anls = max(
|
| 299 |
+
self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
|
| 300 |
+
)
|
| 301 |
+
pred_scores.append(anls)
|
| 302 |
+
|
| 303 |
+
with open('./output.csv', mode='a', newline='') as file:
|
| 304 |
+
writer = csv.writer(file)
|
| 305 |
+
# Write the row to the CSV file
|
| 306 |
+
writer.writerow([entry["pred_answer"], entry["gt_answers"], anls])
|
| 307 |
+
|
| 308 |
+
accuracy = sum(pred_scores) / len(pred_scores)
|
| 309 |
+
return accuracy
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
class TextCapsBleu4Evaluator:
|
| 313 |
+
def __init__(self):
|
| 314 |
+
# The following script requires Java 1.8.0 and pycocotools installed.
|
| 315 |
+
# The pycocoevalcap can be installed with pip as
|
| 316 |
+
# pip install git+https://github.com/ronghanghu/coco-caption.git@python23
|
| 317 |
+
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
|
| 318 |
+
# but has no python3 support yet.
|
| 319 |
+
try:
|
| 320 |
+
from pycocoevalcap.bleu.bleu import Bleu
|
| 321 |
+
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
|
| 322 |
+
except ModuleNotFoundError:
|
| 323 |
+
print(
|
| 324 |
+
"Please install pycocoevalcap module using "
|
| 325 |
+
"pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
|
| 326 |
+
)
|
| 327 |
+
raise
|
| 328 |
+
|
| 329 |
+
self.tokenizer = PTBTokenizer()
|
| 330 |
+
self.scorer = Bleu(4)
|
| 331 |
+
|
| 332 |
+
def eval_pred_list(self, pred_list):
|
| 333 |
+
# Create reference and hypotheses captions.
|
| 334 |
+
gts = {}
|
| 335 |
+
res = {}
|
| 336 |
+
for idx, entry in enumerate(pred_list):
|
| 337 |
+
gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
|
| 338 |
+
res[idx] = [{"caption": entry["pred_answer"]}]
|
| 339 |
+
|
| 340 |
+
gts = self.tokenizer.tokenize(gts)
|
| 341 |
+
res = self.tokenizer.tokenize(res)
|
| 342 |
+
score, _ = self.scorer.compute_score(gts, res)
|
| 343 |
+
|
| 344 |
+
bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
|
| 345 |
+
return bleu4
|
VLMEvalKit-sudoku/llava/eval/model_qa.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
|
| 3 |
+
import torch
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import shortuuid
|
| 8 |
+
|
| 9 |
+
from llava.conversation import default_conversation
|
| 10 |
+
from llava.utils import disable_torch_init
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@torch.inference_mode()
|
| 14 |
+
def eval_model(model_name, questions_file, answers_file):
|
| 15 |
+
# Model
|
| 16 |
+
disable_torch_init()
|
| 17 |
+
model_name = os.path.expanduser(model_name)
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
| 19 |
+
model = AutoModelForCausalLM.from_pretrained(model_name,
|
| 20 |
+
torch_dtype=torch.float16).cuda()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
ques_file = open(os.path.expanduser(questions_file), "r")
|
| 24 |
+
ans_file = open(os.path.expanduser(answers_file), "w")
|
| 25 |
+
for i, line in enumerate(tqdm(ques_file)):
|
| 26 |
+
idx = json.loads(line)["question_id"]
|
| 27 |
+
qs = json.loads(line)["text"]
|
| 28 |
+
cat = json.loads(line)["category"]
|
| 29 |
+
conv = default_conversation.copy()
|
| 30 |
+
conv.append_message(conv.roles[0], qs)
|
| 31 |
+
prompt = conv.get_prompt()
|
| 32 |
+
inputs = tokenizer([prompt])
|
| 33 |
+
input_ids = torch.as_tensor(inputs.input_ids).cuda()
|
| 34 |
+
output_ids = model.generate(
|
| 35 |
+
input_ids,
|
| 36 |
+
do_sample=True,
|
| 37 |
+
use_cache=True,
|
| 38 |
+
temperature=0.7,
|
| 39 |
+
max_new_tokens=1024,)
|
| 40 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
| 41 |
+
try:
|
| 42 |
+
index = outputs.index(conv.sep, len(prompt))
|
| 43 |
+
except ValueError:
|
| 44 |
+
outputs += conv.sep
|
| 45 |
+
index = outputs.index(conv.sep, len(prompt))
|
| 46 |
+
|
| 47 |
+
outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
|
| 48 |
+
ans_id = shortuuid.uuid()
|
| 49 |
+
ans_file.write(json.dumps({"question_id": idx,
|
| 50 |
+
"text": outputs,
|
| 51 |
+
"answer_id": ans_id,
|
| 52 |
+
"model_id": model_name,
|
| 53 |
+
"metadata": {}}) + "\n")
|
| 54 |
+
ans_file.flush()
|
| 55 |
+
ans_file.close()
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
parser = argparse.ArgumentParser()
|
| 59 |
+
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
|
| 60 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
| 61 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
| 62 |
+
args = parser.parse_args()
|
| 63 |
+
|
| 64 |
+
eval_model(args.model_name, args.question_file, args.answers_file)
|
VLMEvalKit-sudoku/llava/eval/model_vqa.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
import shortuuid
|
| 7 |
+
|
| 8 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
| 9 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
| 10 |
+
from llava.model.builder import load_pretrained_model
|
| 11 |
+
from llava.utils import disable_torch_init
|
| 12 |
+
from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
| 13 |
+
|
| 14 |
+
from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX
|
| 15 |
+
from typing import Dict, Optional, Sequence, List
|
| 16 |
+
import transformers
|
| 17 |
+
import re
|
| 18 |
+
|
| 19 |
+
from PIL import Image
|
| 20 |
+
import math
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def split_list(lst, n):
|
| 24 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
| 25 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
| 26 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_chunk(lst, n, k):
|
| 30 |
+
chunks = split_list(lst, n)
|
| 31 |
+
return chunks[k]
|
| 32 |
+
|
| 33 |
+
def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict:
|
| 34 |
+
roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"}
|
| 35 |
+
|
| 36 |
+
im_start, im_end = tokenizer.additional_special_tokens_ids
|
| 37 |
+
nl_tokens = tokenizer("\n").input_ids
|
| 38 |
+
_system = tokenizer("system").input_ids + nl_tokens
|
| 39 |
+
_user = tokenizer("user").input_ids + nl_tokens
|
| 40 |
+
_assistant = tokenizer("assistant").input_ids + nl_tokens
|
| 41 |
+
|
| 42 |
+
# Apply prompt templates
|
| 43 |
+
input_ids, targets = [], []
|
| 44 |
+
|
| 45 |
+
source = sources
|
| 46 |
+
if roles[source[0]["from"]] != roles["human"]:
|
| 47 |
+
source = source[1:]
|
| 48 |
+
|
| 49 |
+
input_id, target = [], []
|
| 50 |
+
system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
|
| 51 |
+
input_id += system
|
| 52 |
+
target += [im_start] + [IGNORE_INDEX] * (len(system) - 3) + [im_end] + nl_tokens
|
| 53 |
+
assert len(input_id) == len(target)
|
| 54 |
+
for j, sentence in enumerate(source):
|
| 55 |
+
role = roles[sentence["from"]]
|
| 56 |
+
if has_image and sentence["value"] is not None and "<image>" in sentence["value"]:
|
| 57 |
+
num_image = len(re.findall(DEFAULT_IMAGE_TOKEN, sentence["value"]))
|
| 58 |
+
texts = sentence["value"].split('<image>')
|
| 59 |
+
_input_id = tokenizer(role).input_ids + nl_tokens
|
| 60 |
+
for i,text in enumerate(texts):
|
| 61 |
+
_input_id += tokenizer(text).input_ids
|
| 62 |
+
if i<len(texts)-1:
|
| 63 |
+
_input_id += [IMAGE_TOKEN_INDEX] + nl_tokens
|
| 64 |
+
_input_id += [im_end] + nl_tokens
|
| 65 |
+
assert sum([i==IMAGE_TOKEN_INDEX for i in _input_id])==num_image
|
| 66 |
+
else:
|
| 67 |
+
if sentence["value"] is None:
|
| 68 |
+
_input_id = tokenizer(role).input_ids + nl_tokens
|
| 69 |
+
else:
|
| 70 |
+
_input_id = tokenizer(role).input_ids + nl_tokens + tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
|
| 71 |
+
input_id += _input_id
|
| 72 |
+
if role == "<|im_start|>user":
|
| 73 |
+
_target = [im_start] + [IGNORE_INDEX] * (len(_input_id) - 3) + [im_end] + nl_tokens
|
| 74 |
+
elif role == "<|im_start|>assistant":
|
| 75 |
+
_target = [im_start] + [IGNORE_INDEX] * len(tokenizer(role).input_ids) + _input_id[len(tokenizer(role).input_ids) + 1 : -2] + [im_end] + nl_tokens
|
| 76 |
+
else:
|
| 77 |
+
raise NotImplementedError
|
| 78 |
+
target += _target
|
| 79 |
+
|
| 80 |
+
input_ids.append(input_id)
|
| 81 |
+
targets.append(target)
|
| 82 |
+
input_ids = torch.tensor(input_ids, dtype=torch.long)
|
| 83 |
+
targets = torch.tensor(targets, dtype=torch.long)
|
| 84 |
+
return input_ids
|
| 85 |
+
|
| 86 |
+
def eval_model(args):
|
| 87 |
+
|
| 88 |
+
# Model
|
| 89 |
+
disable_torch_init()
|
| 90 |
+
model_path = os.path.expanduser(args.model_path)
|
| 91 |
+
model_name = get_model_name_from_path(model_path)
|
| 92 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
|
| 93 |
+
|
| 94 |
+
# Data
|
| 95 |
+
with open(os.path.expanduser(args.question_file)) as f:
|
| 96 |
+
questions = json.load(f)
|
| 97 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
| 98 |
+
answers_file = os.path.expanduser(args.answers_file)
|
| 99 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
| 100 |
+
ans_file = open(answers_file, "w")
|
| 101 |
+
|
| 102 |
+
for line in tqdm(questions):
|
| 103 |
+
idx = line["sample_id"]
|
| 104 |
+
question_type = line["metadata"]["question_type"]
|
| 105 |
+
dataset_name = line["metadata"]["dataset"]
|
| 106 |
+
gt = line["conversations"][1]["value"]
|
| 107 |
+
|
| 108 |
+
image_files = line["image"]
|
| 109 |
+
qs = line["conversations"][0]["value"]
|
| 110 |
+
cur_prompt = args.extra_prompt + qs
|
| 111 |
+
|
| 112 |
+
args.conv_mode = "qwen_1_5"
|
| 113 |
+
|
| 114 |
+
conv = conv_templates[args.conv_mode].copy()
|
| 115 |
+
conv.append_message(conv.roles[0], qs)
|
| 116 |
+
conv.append_message(conv.roles[1], None)
|
| 117 |
+
prompt = conv.get_prompt()
|
| 118 |
+
|
| 119 |
+
input_ids = preprocess_qwen([line["conversations"][0],{'from': 'gpt','value': None}], tokenizer, has_image=True).cuda()
|
| 120 |
+
img_num = list(input_ids.squeeze()).count(IMAGE_TOKEN_INDEX)
|
| 121 |
+
|
| 122 |
+
image_tensors = []
|
| 123 |
+
for image_file in image_files:
|
| 124 |
+
image = Image.open(os.path.join(args.image_folder, image_file))
|
| 125 |
+
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values']
|
| 126 |
+
image_tensors.append(image_tensor.half().cuda())
|
| 127 |
+
# image_tensors = torch.cat(image_tensors, dim=0)
|
| 128 |
+
|
| 129 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
| 130 |
+
keywords = [stop_str]
|
| 131 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
| 132 |
+
|
| 133 |
+
with torch.inference_mode():
|
| 134 |
+
output_ids = model.generate(
|
| 135 |
+
input_ids,
|
| 136 |
+
images=image_tensors,
|
| 137 |
+
do_sample=True if args.temperature > 0 else False,
|
| 138 |
+
temperature=args.temperature,
|
| 139 |
+
top_p=args.top_p,
|
| 140 |
+
num_beams=args.num_beams,
|
| 141 |
+
# no_repeat_ngram_size=3,
|
| 142 |
+
max_new_tokens=1024,
|
| 143 |
+
use_cache=True)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
| 147 |
+
outputs = outputs.strip()
|
| 148 |
+
if outputs.endswith(stop_str):
|
| 149 |
+
outputs = outputs[:-len(stop_str)]
|
| 150 |
+
outputs = outputs.strip()
|
| 151 |
+
|
| 152 |
+
ans_id = shortuuid.uuid()
|
| 153 |
+
ans_file.write(json.dumps({
|
| 154 |
+
"dataset": dataset_name,
|
| 155 |
+
"sample_id": idx,
|
| 156 |
+
"prompt": cur_prompt,
|
| 157 |
+
"pred_response": outputs,
|
| 158 |
+
"gt_response": gt,
|
| 159 |
+
"shortuuid": ans_id,
|
| 160 |
+
"model_id": model_name,
|
| 161 |
+
"question_type": question_type,
|
| 162 |
+
}) + "\n")
|
| 163 |
+
ans_file.flush()
|
| 164 |
+
|
| 165 |
+
if len(line["conversations"]) > 2:
|
| 166 |
+
|
| 167 |
+
for i in range(2, len(line["conversations"]), 2):
|
| 168 |
+
input_ids = torch.cat((input_ids, output_ids), dim=1)
|
| 169 |
+
|
| 170 |
+
gt = line["conversations"][i + 1]["value"]
|
| 171 |
+
qs = line["conversations"][i]["value"]
|
| 172 |
+
cur_prompt = args.extra_prompt + qs
|
| 173 |
+
|
| 174 |
+
args.conv_mode = "qwen_1_5"
|
| 175 |
+
|
| 176 |
+
conv = conv_templates[args.conv_mode].copy()
|
| 177 |
+
conv.append_message(conv.roles[0], qs)
|
| 178 |
+
conv.append_message(conv.roles[1], None)
|
| 179 |
+
prompt = conv.get_prompt()
|
| 180 |
+
|
| 181 |
+
input_ids_new = preprocess_qwen([line["conversations"][i],{'from': 'gpt','value': None}], tokenizer, has_image=True).cuda()
|
| 182 |
+
input_ids = torch.cat((input_ids, input_ids_new), dim=1)
|
| 183 |
+
img_num = list(input_ids_new.squeeze()).count(IMAGE_TOKEN_INDEX)
|
| 184 |
+
|
| 185 |
+
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
|
| 186 |
+
keywords = [stop_str]
|
| 187 |
+
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
|
| 188 |
+
|
| 189 |
+
with torch.inference_mode():
|
| 190 |
+
output_ids = model.generate(
|
| 191 |
+
input_ids,
|
| 192 |
+
images=image_tensors,
|
| 193 |
+
do_sample=True if args.temperature > 0 else False,
|
| 194 |
+
temperature=args.temperature,
|
| 195 |
+
top_p=args.top_p,
|
| 196 |
+
num_beams=args.num_beams,
|
| 197 |
+
# no_repeat_ngram_size=3,
|
| 198 |
+
max_new_tokens=1024,
|
| 199 |
+
use_cache=True)
|
| 200 |
+
|
| 201 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
| 202 |
+
outputs = outputs.strip()
|
| 203 |
+
if outputs.endswith(stop_str):
|
| 204 |
+
outputs = outputs[:-len(stop_str)]
|
| 205 |
+
outputs = outputs.strip()
|
| 206 |
+
|
| 207 |
+
ans_id = shortuuid.uuid()
|
| 208 |
+
ans_file.write(json.dumps({
|
| 209 |
+
"dataset": dataset_name,
|
| 210 |
+
"sample_id": idx,
|
| 211 |
+
"prompt": cur_prompt,
|
| 212 |
+
"pred_response": outputs,
|
| 213 |
+
"gt_response": gt,
|
| 214 |
+
"shortuuid": ans_id,
|
| 215 |
+
"model_id": model_name,
|
| 216 |
+
"question_type": question_type,
|
| 217 |
+
}) + "\n")
|
| 218 |
+
ans_file.flush()
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
ans_file.close()
|
| 222 |
+
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
parser = argparse.ArgumentParser()
|
| 225 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
| 226 |
+
parser.add_argument("--model-base", type=str, default=None)
|
| 227 |
+
parser.add_argument("--image-folder", type=str, default="")
|
| 228 |
+
parser.add_argument("--extra-prompt", type=str, default="")
|
| 229 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
| 230 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
| 231 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v1")
|
| 232 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
| 233 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
| 234 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
| 235 |
+
parser.add_argument("--top_p", type=float, default=None)
|
| 236 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
| 237 |
+
parser.add_argument("--test_size", type=int, default=10000000)
|
| 238 |
+
args = parser.parse_args()
|
| 239 |
+
|
| 240 |
+
eval_model(args)
|
VLMEvalKit-sudoku/llava/eval/model_vqa_mmbench.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import shortuuid
|
| 8 |
+
|
| 9 |
+
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
| 10 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
| 11 |
+
from llava.model.builder import load_pretrained_model
|
| 12 |
+
from llava.utils import disable_torch_init
|
| 13 |
+
from llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
|
| 14 |
+
|
| 15 |
+
from PIL import Image
|
| 16 |
+
import math
|
| 17 |
+
from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
all_options = ['A', 'B', 'C', 'D']
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def split_list(lst, n):
|
| 24 |
+
"""Split a list into n (roughly) equal-sized chunks"""
|
| 25 |
+
chunk_size = math.ceil(len(lst) / n) # integer division
|
| 26 |
+
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_chunk(lst, n, k):
|
| 30 |
+
chunks = split_list(lst, n)
|
| 31 |
+
return chunks[k]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def is_none(value):
|
| 35 |
+
if value is None:
|
| 36 |
+
return True
|
| 37 |
+
if type(value) is float and math.isnan(value):
|
| 38 |
+
return True
|
| 39 |
+
if type(value) is str and value.lower() == 'nan':
|
| 40 |
+
return True
|
| 41 |
+
if type(value) is str and value.lower() == 'none':
|
| 42 |
+
return True
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
def get_options(row, options):
|
| 46 |
+
parsed_options = []
|
| 47 |
+
for option in options:
|
| 48 |
+
option_value = row[option]
|
| 49 |
+
if is_none(option_value):
|
| 50 |
+
break
|
| 51 |
+
parsed_options.append(option_value)
|
| 52 |
+
return parsed_options
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def eval_model(args):
|
| 56 |
+
# Model
|
| 57 |
+
disable_torch_init()
|
| 58 |
+
model_path = os.path.expanduser(args.model_path)
|
| 59 |
+
model_name = get_model_name_from_path(model_path)
|
| 60 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, _args=args)
|
| 61 |
+
|
| 62 |
+
questions = pd.read_table(os.path.expanduser(args.question_file))
|
| 63 |
+
questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
|
| 64 |
+
answers_file = os.path.expanduser(args.answers_file)
|
| 65 |
+
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
|
| 66 |
+
ans_file = open(answers_file, "w")
|
| 67 |
+
|
| 68 |
+
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
|
| 69 |
+
args.conv_mode = args.conv_mode + '_mmtag'
|
| 70 |
+
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
|
| 71 |
+
|
| 72 |
+
for index, row in tqdm(questions.iterrows(), total=len(questions)):
|
| 73 |
+
options = get_options(row, all_options)
|
| 74 |
+
cur_option_char = all_options[:len(options)]
|
| 75 |
+
|
| 76 |
+
if args.all_rounds:
|
| 77 |
+
num_rounds = len(options)
|
| 78 |
+
else:
|
| 79 |
+
num_rounds = 1
|
| 80 |
+
|
| 81 |
+
for round_idx in range(num_rounds):
|
| 82 |
+
idx = row['index']
|
| 83 |
+
question = row['question']
|
| 84 |
+
hint = row['hint']
|
| 85 |
+
image = load_image_from_base64(row['image'])
|
| 86 |
+
if not is_none(hint):
|
| 87 |
+
question = hint + '\n' + question
|
| 88 |
+
for option_char, option in zip(all_options[:len(options)], options):
|
| 89 |
+
question = question + '\n' + option_char + '. ' + option
|
| 90 |
+
qs = cur_prompt = question
|
| 91 |
+
if model.config.mm_use_im_start_end:
|
| 92 |
+
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
|
| 93 |
+
else:
|
| 94 |
+
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
|
| 95 |
+
|
| 96 |
+
if args.single_pred_prompt:
|
| 97 |
+
if args.lang == 'cn':
|
| 98 |
+
qs = qs + '\n' + "请直接回答选项字母。"
|
| 99 |
+
else:
|
| 100 |
+
qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
|
| 101 |
+
|
| 102 |
+
conv = conv_templates[args.conv_mode].copy()
|
| 103 |
+
conv.append_message(conv.roles[0], qs)
|
| 104 |
+
conv.append_message(conv.roles[1], None)
|
| 105 |
+
prompt = conv.get_prompt()
|
| 106 |
+
|
| 107 |
+
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
|
| 108 |
+
|
| 109 |
+
# image_tensor = process_images([image], image_processor, model.config)[0]
|
| 110 |
+
|
| 111 |
+
# image = resize_image_keep_ratio(image, max_size=1024)
|
| 112 |
+
# minicpm-v
|
| 113 |
+
source_image, patches, best_grid, ind_tokens = slice_image_minicpm(
|
| 114 |
+
image, max_slice_nums=7, scale_resolution=336, patch_size=14, never_split=False)
|
| 115 |
+
image_sizes = [source_image.size]
|
| 116 |
+
processor = image_processor
|
| 117 |
+
if best_grid is None: #说明没有切片
|
| 118 |
+
source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
|
| 119 |
+
do_rescale=True, do_normalize=True,
|
| 120 |
+
return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
|
| 121 |
+
crop_size = processor.crop_size
|
| 122 |
+
patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width'])
|
| 123 |
+
else:
|
| 124 |
+
source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
|
| 125 |
+
do_rescale=True, do_normalize=True,
|
| 126 |
+
return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
|
| 127 |
+
patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False,
|
| 128 |
+
do_rescale=True, do_normalize=True,
|
| 129 |
+
return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w
|
| 130 |
+
images = [source_tensors[0].half().cuda()] # 3, h, w
|
| 131 |
+
patch_images = [patch_tensors.half().cuda()] # bs, 3, h, w
|
| 132 |
+
ind_tokens = [ind_tokens]
|
| 133 |
+
|
| 134 |
+
with torch.inference_mode():
|
| 135 |
+
output_ids = model.generate(
|
| 136 |
+
input_ids,
|
| 137 |
+
images=images,
|
| 138 |
+
image_sizes=image_sizes,
|
| 139 |
+
patch_images=patch_images,
|
| 140 |
+
ind_tokens=ind_tokens,
|
| 141 |
+
do_sample=True if args.temperature > 0 else False,
|
| 142 |
+
temperature=args.temperature,
|
| 143 |
+
top_p=args.top_p,
|
| 144 |
+
num_beams=args.num_beams,
|
| 145 |
+
# no_repeat_ngram_size=3,
|
| 146 |
+
max_new_tokens=1024,
|
| 147 |
+
use_cache=True)
|
| 148 |
+
|
| 149 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
|
| 150 |
+
|
| 151 |
+
ans_id = shortuuid.uuid()
|
| 152 |
+
ans_file.write(json.dumps({"question_id": idx,
|
| 153 |
+
"round_id": round_idx,
|
| 154 |
+
"prompt": cur_prompt,
|
| 155 |
+
"text": outputs,
|
| 156 |
+
"options": options,
|
| 157 |
+
"option_char": cur_option_char,
|
| 158 |
+
"answer_id": ans_id,
|
| 159 |
+
"model_id": model_name,
|
| 160 |
+
"metadata": {}}) + "\n")
|
| 161 |
+
ans_file.flush()
|
| 162 |
+
|
| 163 |
+
# rotate options
|
| 164 |
+
options = options[1:] + options[:1]
|
| 165 |
+
cur_option_char = cur_option_char[1:] + cur_option_char[:1]
|
| 166 |
+
ans_file.close()
|
| 167 |
+
|
| 168 |
+
if __name__ == "__main__":
|
| 169 |
+
parser = argparse.ArgumentParser()
|
| 170 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
| 171 |
+
parser.add_argument("--model-base", type=str, default=None)
|
| 172 |
+
parser.add_argument("--image-folder", type=str, default="")
|
| 173 |
+
parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
|
| 174 |
+
parser.add_argument("--answers-file", type=str, default="answer.jsonl")
|
| 175 |
+
parser.add_argument("--conv-mode", type=str, default="llava_v1")
|
| 176 |
+
parser.add_argument("--num-chunks", type=int, default=1)
|
| 177 |
+
parser.add_argument("--chunk-idx", type=int, default=0)
|
| 178 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
| 179 |
+
parser.add_argument("--top_p", type=float, default=None)
|
| 180 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
| 181 |
+
parser.add_argument("--all-rounds", action="store_true")
|
| 182 |
+
parser.add_argument("--single-pred-prompt", action="store_true")
|
| 183 |
+
parser.add_argument("--lang", type=str, default="en")
|
| 184 |
+
parser.add_argument("--fted_encoder", type=bool, default=True)
|
| 185 |
+
args = parser.parse_args()
|
| 186 |
+
|
| 187 |
+
eval_model(args)
|
VLMEvalKit-sudoku/llava/mm_utils.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
import base64
|
| 4 |
+
import math
|
| 5 |
+
import ast
|
| 6 |
+
import re
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import StoppingCriteria
|
| 9 |
+
from llava.constants import IMAGE_TOKEN_INDEX
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def resize_and_center_crop(image, shortest_edge_length):
|
| 13 |
+
# Calculate new dimensions and resize
|
| 14 |
+
aspect_ratio = float(image.width) / float(image.height)
|
| 15 |
+
if aspect_ratio > 1:
|
| 16 |
+
new_width = int(shortest_edge_length * aspect_ratio)
|
| 17 |
+
new_height = shortest_edge_length
|
| 18 |
+
else:
|
| 19 |
+
new_width = shortest_edge_length
|
| 20 |
+
new_height = int(shortest_edge_length / aspect_ratio)
|
| 21 |
+
resized_image = image.resize((new_width, new_height), Image.ANTIALIAS)
|
| 22 |
+
|
| 23 |
+
# Calculate the position and perform the center crop
|
| 24 |
+
left = (new_width - shortest_edge_length) / 2
|
| 25 |
+
top = (new_height - shortest_edge_length) / 2
|
| 26 |
+
right = (new_width + shortest_edge_length) / 2
|
| 27 |
+
bottom = (new_height + shortest_edge_length) / 2
|
| 28 |
+
cropped_image = resized_image.crop((left, top, right, bottom))
|
| 29 |
+
|
| 30 |
+
return cropped_image
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def auto_pad_images(image, grid_params):
|
| 34 |
+
assert isinstance(image, Image.Image), "Input should be a Pillow Image"
|
| 35 |
+
assert len(grid_params) > 0, "Grid parameters should not be empty"
|
| 36 |
+
|
| 37 |
+
# Step 1: Calculate and find the closest aspect ratio
|
| 38 |
+
input_width, input_height = image.size
|
| 39 |
+
input_aspect_ratio = input_width / input_height
|
| 40 |
+
candidate_resolutions = [(w / h, w, h) for w in grid_params for h in grid_params]
|
| 41 |
+
closest_aspect_ratio = min(candidate_resolutions, key=lambda x: abs(input_aspect_ratio - x[0]))
|
| 42 |
+
|
| 43 |
+
candidate_resolutions = [(x[1], x[2]) for x in candidate_resolutions if abs(x[0] - closest_aspect_ratio[0]) < 1e-3]
|
| 44 |
+
|
| 45 |
+
target_resolution = min(candidate_resolutions, key=lambda res: abs(max(input_width, input_height) / max(res) - 1))
|
| 46 |
+
|
| 47 |
+
resize_width, resize_height = target_resolution
|
| 48 |
+
if input_width > input_height:
|
| 49 |
+
resize_height = int(resize_width / input_aspect_ratio)
|
| 50 |
+
else:
|
| 51 |
+
resize_width = int(resize_height * input_aspect_ratio)
|
| 52 |
+
resized_image = image.resize((resize_width, resize_height), Image.ANTIALIAS)
|
| 53 |
+
|
| 54 |
+
# Step 5: Pad the resized image if necessary to match the target resolution
|
| 55 |
+
pad_width = target_resolution[0] - resize_width
|
| 56 |
+
pad_height = target_resolution[1] - resize_height
|
| 57 |
+
padded_image = Image.new("RGB", target_resolution, color=(0, 0, 0))
|
| 58 |
+
padded_image.paste(resized_image, (pad_width // 2, pad_height // 2))
|
| 59 |
+
|
| 60 |
+
return padded_image
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def extract_patches(image, patch_size, overlap_ratio):
|
| 64 |
+
assert isinstance(image, Image.Image), "Input should be a Pillow Image"
|
| 65 |
+
assert patch_size > 0, "Patch size should be greater than 0"
|
| 66 |
+
assert 0 <= overlap_ratio < 1, "Overlap ratio should be between 0 and 1"
|
| 67 |
+
|
| 68 |
+
W, H = image.size
|
| 69 |
+
patches = []
|
| 70 |
+
|
| 71 |
+
stride = int(patch_size * (1 - overlap_ratio))
|
| 72 |
+
|
| 73 |
+
num_patches_y = (H - patch_size) // stride + 1
|
| 74 |
+
num_patches_x = (W - patch_size) // stride + 1
|
| 75 |
+
|
| 76 |
+
y_start = (H - (num_patches_y - 1) * stride - patch_size) // 2
|
| 77 |
+
x_start = (W - (num_patches_x - 1) * stride - patch_size) // 2
|
| 78 |
+
|
| 79 |
+
for y in range(y_start, y_start + num_patches_y * stride, stride):
|
| 80 |
+
for x in range(x_start, x_start + num_patches_x * stride, stride):
|
| 81 |
+
patch = image.crop((x, y, x + patch_size, y + patch_size))
|
| 82 |
+
patches.append(patch)
|
| 83 |
+
|
| 84 |
+
return patches
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def process_highres_image_crop_split(image, data_args, processor=None):
|
| 88 |
+
crop_resolution = data_args.image_crop_resolution
|
| 89 |
+
split_resolution = data_args.image_split_resolution
|
| 90 |
+
if processor is None:
|
| 91 |
+
processor = data_args.image_processor
|
| 92 |
+
image_crop = resize_and_center_crop(image, crop_resolution)
|
| 93 |
+
image_patches = extract_patches(image_crop, patch_size=split_resolution, overlap_ratio=0)
|
| 94 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
| 95 |
+
return torch.stack(image_patches, dim=0)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def process_highres_image(image, processor, grid_pinpoints):
|
| 99 |
+
grid_params = [int(x) for x in grid_pinpoints.split(",")]
|
| 100 |
+
width_height = max(image.size)
|
| 101 |
+
fit_grid_params = [x for x in grid_params if x >= width_height]
|
| 102 |
+
if len(fit_grid_params) == 0:
|
| 103 |
+
select_size = max(grid_params)
|
| 104 |
+
else:
|
| 105 |
+
select_size = min(fit_grid_params)
|
| 106 |
+
# FIXME: always select the 448
|
| 107 |
+
select_size = max(grid_params)
|
| 108 |
+
image_padded = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
|
| 109 |
+
|
| 110 |
+
# FIXME: this seems to be a bug that it always resizes instead of padding
|
| 111 |
+
image_original_resize = image.resize((processor.size["shortest_edge"], processor.size["shortest_edge"]))
|
| 112 |
+
image_padded = image_padded.resize((select_size, select_size))
|
| 113 |
+
image_patches = extract_patches(image_padded, patch_size=processor.size["shortest_edge"], overlap_ratio=0)
|
| 114 |
+
image_patches = [image_original_resize] + image_patches
|
| 115 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
| 116 |
+
return torch.stack(image_patches, dim=0)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def select_best_resolution(original_size, possible_resolutions):
|
| 120 |
+
"""
|
| 121 |
+
Selects the best resolution from a list of possible resolutions based on the original size.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
original_size (tuple): The original size of the image in the format (width, height).
|
| 125 |
+
possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
tuple: The best fit resolution in the format (width, height).
|
| 129 |
+
"""
|
| 130 |
+
original_width, original_height = original_size
|
| 131 |
+
best_fit = None
|
| 132 |
+
max_effective_resolution = 0
|
| 133 |
+
min_wasted_resolution = float("inf")
|
| 134 |
+
|
| 135 |
+
for width, height in possible_resolutions:
|
| 136 |
+
# Calculate the downscaled size to keep the aspect ratio
|
| 137 |
+
scale = min(width / original_width, height / original_height)
|
| 138 |
+
downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
|
| 139 |
+
|
| 140 |
+
# Calculate effective and wasted resolutions
|
| 141 |
+
effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
|
| 142 |
+
wasted_resolution = (width * height) - effective_resolution
|
| 143 |
+
|
| 144 |
+
if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
|
| 145 |
+
max_effective_resolution = effective_resolution
|
| 146 |
+
min_wasted_resolution = wasted_resolution
|
| 147 |
+
best_fit = (width, height)
|
| 148 |
+
|
| 149 |
+
return best_fit
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def resize_and_pad_image(image, target_resolution):
|
| 153 |
+
"""
|
| 154 |
+
Resize and pad an image to a target resolution while maintaining aspect ratio.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
image (PIL.Image.Image): The input image.
|
| 158 |
+
target_resolution (tuple): The target resolution (width, height) of the image.
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
PIL.Image.Image: The resized and padded image.
|
| 162 |
+
"""
|
| 163 |
+
original_width, original_height = image.size
|
| 164 |
+
target_width, target_height = target_resolution
|
| 165 |
+
|
| 166 |
+
# Determine which dimension (width or height) to fill
|
| 167 |
+
scale_w = target_width / original_width
|
| 168 |
+
scale_h = target_height / original_height
|
| 169 |
+
|
| 170 |
+
if scale_w < scale_h:
|
| 171 |
+
# Width will be filled completely
|
| 172 |
+
new_width = target_width
|
| 173 |
+
new_height = min(math.ceil(original_height * scale_w), target_height)
|
| 174 |
+
else:
|
| 175 |
+
# Height will be filled completely
|
| 176 |
+
new_height = target_height
|
| 177 |
+
new_width = min(math.ceil(original_width * scale_h), target_width)
|
| 178 |
+
|
| 179 |
+
# Resize the image
|
| 180 |
+
resized_image = image.resize((new_width, new_height))
|
| 181 |
+
|
| 182 |
+
# Create a new image with the target size and paste the resized image onto it
|
| 183 |
+
new_image = Image.new("RGB", (target_width, target_height), (0, 0, 0))
|
| 184 |
+
paste_x = (target_width - new_width) // 2
|
| 185 |
+
paste_y = (target_height - new_height) // 2
|
| 186 |
+
new_image.paste(resized_image, (paste_x, paste_y))
|
| 187 |
+
|
| 188 |
+
return new_image
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def divide_to_patches(image, patch_size):
|
| 192 |
+
"""
|
| 193 |
+
Divides an image into patches of a specified size.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
image (PIL.Image.Image): The input image.
|
| 197 |
+
patch_size (int): The size of each patch.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
list: A list of PIL.Image.Image objects representing the patches.
|
| 201 |
+
"""
|
| 202 |
+
patches = []
|
| 203 |
+
width, height = image.size
|
| 204 |
+
for i in range(0, height, patch_size):
|
| 205 |
+
for j in range(0, width, patch_size):
|
| 206 |
+
box = (j, i, j + patch_size, i + patch_size)
|
| 207 |
+
patch = image.crop(box)
|
| 208 |
+
patches.append(patch)
|
| 209 |
+
|
| 210 |
+
return patches
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
|
| 214 |
+
"""
|
| 215 |
+
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
image_size (tuple): The size of the input image in the format (width, height).
|
| 219 |
+
grid_pinpoints (str): A string representation of a list of possible resolutions.
|
| 220 |
+
patch_size (int): The size of each image patch.
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
tuple: The shape of the image patch grid in the format (width, height).
|
| 224 |
+
"""
|
| 225 |
+
if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
|
| 226 |
+
assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
|
| 227 |
+
# Use regex to extract the range from the input string
|
| 228 |
+
matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
|
| 229 |
+
range_start = tuple(map(int, matches[0]))
|
| 230 |
+
range_end = tuple(map(int, matches[-1]))
|
| 231 |
+
# Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
|
| 232 |
+
grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
|
| 233 |
+
# Multiply all elements by patch_size
|
| 234 |
+
grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
|
| 235 |
+
if type(grid_pinpoints) is list:
|
| 236 |
+
possible_resolutions = grid_pinpoints
|
| 237 |
+
else:
|
| 238 |
+
possible_resolutions = ast.literal_eval(grid_pinpoints)
|
| 239 |
+
width, height = select_best_resolution(image_size, possible_resolutions)
|
| 240 |
+
return width // patch_size, height // patch_size
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def process_anyres_image(image, processor, grid_pinpoints):
|
| 244 |
+
"""
|
| 245 |
+
Process an image with variable resolutions.
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
image (PIL.Image.Image): The input image to be processed.
|
| 249 |
+
processor: The image processor object.
|
| 250 |
+
grid_pinpoints (str): A string representation of a list of possible resolutions.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
torch.Tensor: A tensor containing the processed image patches.
|
| 254 |
+
"""
|
| 255 |
+
# Convert grid_pinpoints from string to list
|
| 256 |
+
if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
|
| 257 |
+
try:
|
| 258 |
+
patch_size = processor.size[0]
|
| 259 |
+
except Exception as e:
|
| 260 |
+
patch_size = processor.size["shortest_edge"]
|
| 261 |
+
assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
|
| 262 |
+
# Use regex to extract the range from the input string
|
| 263 |
+
matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
|
| 264 |
+
range_start = tuple(map(int, matches[0]))
|
| 265 |
+
range_end = tuple(map(int, matches[-1]))
|
| 266 |
+
# Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
|
| 267 |
+
grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
|
| 268 |
+
# Multiply all elements by patch_size
|
| 269 |
+
grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
|
| 270 |
+
|
| 271 |
+
if type(grid_pinpoints) is list:
|
| 272 |
+
possible_resolutions = grid_pinpoints
|
| 273 |
+
else:
|
| 274 |
+
possible_resolutions = ast.literal_eval(grid_pinpoints)
|
| 275 |
+
best_resolution = select_best_resolution(image.size, possible_resolutions)
|
| 276 |
+
image_padded = resize_and_pad_image(image, best_resolution)
|
| 277 |
+
|
| 278 |
+
patches = divide_to_patches(image_padded, processor.crop_size["height"])
|
| 279 |
+
|
| 280 |
+
# FIXME: this seems to be a bug that it resizes instead of pad.
|
| 281 |
+
# but to keep it consistent with previous, i will keep it as it is
|
| 282 |
+
# TODO: uncomment below to ablate with the padding
|
| 283 |
+
if isinstance(processor.size, dict):
|
| 284 |
+
shortest_edge = processor.size["shortest_edge"]
|
| 285 |
+
else:
|
| 286 |
+
shortest_edge = min(processor.size)
|
| 287 |
+
image_original_resize = image.resize((shortest_edge, shortest_edge))
|
| 288 |
+
# image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
| 289 |
+
# image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
|
| 290 |
+
|
| 291 |
+
image_patches = [image_original_resize] + patches
|
| 292 |
+
image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
|
| 293 |
+
return torch.stack(image_patches, dim=0)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def load_image_from_base64(image):
|
| 297 |
+
return Image.open(BytesIO(base64.b64decode(image)))
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def expand2square(pil_img, background_color):
|
| 301 |
+
width, height = pil_img.size
|
| 302 |
+
if width == height:
|
| 303 |
+
return pil_img
|
| 304 |
+
elif width > height:
|
| 305 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
| 306 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
| 307 |
+
return result
|
| 308 |
+
else:
|
| 309 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
| 310 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
| 311 |
+
return result
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def process_images(images, image_processor, model_cfg):
|
| 315 |
+
image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
|
| 316 |
+
new_images = []
|
| 317 |
+
if image_aspect_ratio == "highres":
|
| 318 |
+
for image in images:
|
| 319 |
+
image = process_highres_image(image, image_processor, model_cfg.image_grid_pinpoints)
|
| 320 |
+
new_images.append(image)
|
| 321 |
+
elif image_aspect_ratio == "anyres" or "anyres_max" in image_aspect_ratio:
|
| 322 |
+
for image in images:
|
| 323 |
+
image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
|
| 324 |
+
new_images.append(image)
|
| 325 |
+
elif image_aspect_ratio == "crop_split":
|
| 326 |
+
for image in images:
|
| 327 |
+
image = process_highres_image_crop_split(image, model_cfg, image_processor)
|
| 328 |
+
new_images.append(image)
|
| 329 |
+
elif image_aspect_ratio == "pad":
|
| 330 |
+
for image in images:
|
| 331 |
+
image = expand2square(image, tuple(int(x * 255) for x in image_processor.image_mean))
|
| 332 |
+
image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
|
| 333 |
+
new_images.append(image)
|
| 334 |
+
else:
|
| 335 |
+
return image_processor.preprocess(images, return_tensors="pt")["pixel_values"]
|
| 336 |
+
if all(x.shape == new_images[0].shape for x in new_images):
|
| 337 |
+
new_images = torch.stack(new_images, dim=0)
|
| 338 |
+
return new_images
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
|
| 342 |
+
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
|
| 343 |
+
|
| 344 |
+
def insert_separator(X, sep):
|
| 345 |
+
return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
|
| 346 |
+
|
| 347 |
+
input_ids = []
|
| 348 |
+
offset = 0
|
| 349 |
+
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
|
| 350 |
+
offset = 1
|
| 351 |
+
input_ids.append(prompt_chunks[0][0])
|
| 352 |
+
|
| 353 |
+
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
|
| 354 |
+
input_ids.extend(x[offset:])
|
| 355 |
+
|
| 356 |
+
if return_tensors is not None:
|
| 357 |
+
if return_tensors == "pt":
|
| 358 |
+
return torch.tensor(input_ids, dtype=torch.long)
|
| 359 |
+
raise ValueError(f"Unsupported tensor type: {return_tensors}")
|
| 360 |
+
return input_ids
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def get_model_name_from_path(model_path):
|
| 364 |
+
model_path = model_path.strip("/")
|
| 365 |
+
model_paths = model_path.split("/")
|
| 366 |
+
if model_paths[-1].startswith("checkpoint-"):
|
| 367 |
+
return model_paths[-2] + "_" + model_paths[-1]
|
| 368 |
+
else:
|
| 369 |
+
return model_paths[-1]
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class KeywordsStoppingCriteria(StoppingCriteria):
|
| 373 |
+
def __init__(self, keywords, tokenizer, input_ids):
|
| 374 |
+
self.keywords = keywords
|
| 375 |
+
self.keyword_ids = []
|
| 376 |
+
for keyword in keywords:
|
| 377 |
+
cur_keyword_ids = tokenizer(keyword).input_ids
|
| 378 |
+
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
|
| 379 |
+
cur_keyword_ids = cur_keyword_ids[1:]
|
| 380 |
+
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
|
| 381 |
+
self.tokenizer = tokenizer
|
| 382 |
+
self.start_len = input_ids.shape[1]
|
| 383 |
+
|
| 384 |
+
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 385 |
+
assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
|
| 386 |
+
offset = min(output_ids.shape[1] - self.start_len, 3)
|
| 387 |
+
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
|
| 388 |
+
for keyword_id in self.keyword_ids:
|
| 389 |
+
if output_ids[0, -keyword_id.shape[0] :] == keyword_id:
|
| 390 |
+
return True
|
| 391 |
+
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
|
| 392 |
+
for keyword in self.keywords:
|
| 393 |
+
if keyword in outputs:
|
| 394 |
+
return True
|
| 395 |
+
return False
|
VLMEvalKit-sudoku/llava/model/apply_delta.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Usage:
|
| 3 |
+
python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 11 |
+
from llava import LlavaLlamaForCausalLM
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def apply_delta(base_model_path, target_model_path, delta_path):
|
| 15 |
+
print("Loading base model")
|
| 16 |
+
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
| 17 |
+
|
| 18 |
+
print("Loading delta")
|
| 19 |
+
delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
| 20 |
+
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
|
| 21 |
+
|
| 22 |
+
print("Applying delta")
|
| 23 |
+
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
|
| 24 |
+
if name not in base.state_dict():
|
| 25 |
+
assert name in ["model.mm_projector.weight", "model.mm_projector.bias"], f"{name} not in base model"
|
| 26 |
+
continue
|
| 27 |
+
if param.data.shape == base.state_dict()[name].shape:
|
| 28 |
+
param.data += base.state_dict()[name]
|
| 29 |
+
else:
|
| 30 |
+
assert name in ["model.embed_tokens.weight", "lm_head.weight"], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
|
| 31 |
+
bparam = base.state_dict()[name]
|
| 32 |
+
param.data[: bparam.shape[0], : bparam.shape[1]] += bparam
|
| 33 |
+
|
| 34 |
+
print("Saving target model")
|
| 35 |
+
delta.save_pretrained(target_model_path)
|
| 36 |
+
delta_tokenizer.save_pretrained(target_model_path)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if __name__ == "__main__":
|
| 40 |
+
parser = argparse.ArgumentParser()
|
| 41 |
+
parser.add_argument("--base-model-path", type=str, required=True)
|
| 42 |
+
parser.add_argument("--target-model-path", type=str, required=True)
|
| 43 |
+
parser.add_argument("--delta-path", type=str, required=True)
|
| 44 |
+
|
| 45 |
+
args = parser.parse_args()
|
| 46 |
+
|
| 47 |
+
apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
|
VLMEvalKit-sudoku/llava/model/consolidate.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Usage:
|
| 3 |
+
python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 10 |
+
from llava.model import *
|
| 11 |
+
from llava.model.utils import auto_upgrade
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def consolidate_ckpt(src_path, dst_path):
|
| 15 |
+
print("Loading model")
|
| 16 |
+
auto_upgrade(src_path)
|
| 17 |
+
src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
| 18 |
+
src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
|
| 19 |
+
src_model.save_pretrained(dst_path)
|
| 20 |
+
src_tokenizer.save_pretrained(dst_path)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
parser = argparse.ArgumentParser()
|
| 25 |
+
parser.add_argument("--src", type=str, required=True)
|
| 26 |
+
parser.add_argument("--dst", type=str, required=True)
|
| 27 |
+
|
| 28 |
+
args = parser.parse_args()
|
| 29 |
+
|
| 30 |
+
consolidate_ckpt(args.src, args.dst)
|
VLMEvalKit-sudoku/llava/model/language_model/llava_llama.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
|
| 21 |
+
from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig
|
| 22 |
+
|
| 23 |
+
from torch.nn import CrossEntropyLoss
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# , LlamaModel, LlamaForCausalLM, GenerationConfig
|
| 27 |
+
# from .modeling_llama import LlamaModel, LlamaForCausalLM
|
| 28 |
+
from transformers import LlamaModel, LlamaForCausalLM
|
| 29 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 30 |
+
from transformers.generation.utils import GenerateOutput
|
| 31 |
+
|
| 32 |
+
from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class LlavaConfig(LlamaConfig):
|
| 36 |
+
model_type = "llava_llama"
|
| 37 |
+
temperature: float = 0.0 # reset to 0.0, previously 0.9 for Vicuna
|
| 38 |
+
max_new_tokens: int = 1024
|
| 39 |
+
do_sample: bool = False
|
| 40 |
+
top_p: Optional[float] = None
|
| 41 |
+
# rope_scaling: Optional[dict] = {}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
|
| 45 |
+
config_class = LlavaConfig
|
| 46 |
+
|
| 47 |
+
def __init__(self, config: LlamaConfig):
|
| 48 |
+
super(LlavaLlamaModel, self).__init__(config)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
|
| 52 |
+
config_class = LlavaConfig
|
| 53 |
+
|
| 54 |
+
def __init__(self, config):
|
| 55 |
+
LlamaForCausalLM.__init__(self, config)
|
| 56 |
+
|
| 57 |
+
# configure default generation settings
|
| 58 |
+
config.model_type = "llava_llama"
|
| 59 |
+
# config.rope_scaling = None
|
| 60 |
+
|
| 61 |
+
self.model = LlavaLlamaModel(config)
|
| 62 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 63 |
+
# Initialize weights and apply final processing
|
| 64 |
+
self.post_init()
|
| 65 |
+
|
| 66 |
+
def get_model(self):
|
| 67 |
+
return self.model
|
| 68 |
+
|
| 69 |
+
def forward(
|
| 70 |
+
self,
|
| 71 |
+
input_ids: torch.LongTensor = None,
|
| 72 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 73 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 74 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 75 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 76 |
+
labels: Optional[torch.LongTensor] = None,
|
| 77 |
+
use_cache: Optional[bool] = None,
|
| 78 |
+
output_attentions: Optional[bool] = None,
|
| 79 |
+
output_hidden_states: Optional[bool] = None,
|
| 80 |
+
images: Optional[torch.FloatTensor] = None,
|
| 81 |
+
image_sizes: Optional[List[List[int]]] = None,
|
| 82 |
+
return_dict: Optional[bool] = None,
|
| 83 |
+
modalities: Optional[List[str]] = ["image"],
|
| 84 |
+
dpo_forward: Optional[bool] = None,
|
| 85 |
+
cache_position=None,
|
| 86 |
+
patch_images: Optional[torch.FloatTensor] = None,
|
| 87 |
+
ind_tokens: Optional[List[int]] = None,
|
| 88 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 89 |
+
|
| 90 |
+
if inputs_embeds is None:
|
| 91 |
+
(input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,patch_images=patch_images,ind_tokens=ind_tokens)
|
| 92 |
+
|
| 93 |
+
if dpo_forward:
|
| 94 |
+
outputs = self.model(
|
| 95 |
+
input_ids=input_ids,
|
| 96 |
+
attention_mask=attention_mask,
|
| 97 |
+
position_ids=position_ids,
|
| 98 |
+
past_key_values=past_key_values,
|
| 99 |
+
inputs_embeds=inputs_embeds,
|
| 100 |
+
use_cache=use_cache,
|
| 101 |
+
output_attentions=output_attentions,
|
| 102 |
+
output_hidden_states=output_hidden_states,
|
| 103 |
+
return_dict=return_dict,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
hidden_states = outputs[0]
|
| 107 |
+
logits = self.lm_head(hidden_states)
|
| 108 |
+
return logits, labels
|
| 109 |
+
|
| 110 |
+
else:
|
| 111 |
+
return super().forward(
|
| 112 |
+
input_ids=input_ids,
|
| 113 |
+
attention_mask=attention_mask,
|
| 114 |
+
position_ids=position_ids,
|
| 115 |
+
past_key_values=past_key_values,
|
| 116 |
+
inputs_embeds=inputs_embeds,
|
| 117 |
+
labels=labels,
|
| 118 |
+
use_cache=use_cache,
|
| 119 |
+
output_attentions=output_attentions,
|
| 120 |
+
output_hidden_states=output_hidden_states,
|
| 121 |
+
return_dict=return_dict,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
@torch.no_grad()
|
| 125 |
+
def generate(
|
| 126 |
+
self,
|
| 127 |
+
inputs: Optional[torch.Tensor] = None,
|
| 128 |
+
images: Optional[torch.Tensor] = None,
|
| 129 |
+
image_sizes: Optional[torch.Tensor] = None,
|
| 130 |
+
modalities: Optional[List[str]] = ["image"],
|
| 131 |
+
patch_images: Optional[torch.FloatTensor] = None,
|
| 132 |
+
ind_tokens: Optional[List[int]] = None,
|
| 133 |
+
**kwargs,
|
| 134 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 135 |
+
modalities = kwargs.pop("modalities", None) if "modalities" in kwargs and modalities is None else modalities
|
| 136 |
+
position_ids = kwargs.pop("position_ids", None)
|
| 137 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
| 138 |
+
if "inputs_embeds" in kwargs:
|
| 139 |
+
raise NotImplementedError("`inputs_embeds` is not supported")
|
| 140 |
+
|
| 141 |
+
if images is not None:
|
| 142 |
+
(inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes,
|
| 143 |
+
patch_images=patch_images,
|
| 144 |
+
ind_tokens=ind_tokens)
|
| 145 |
+
else:
|
| 146 |
+
inputs_embeds = self.get_model().embed_tokens(inputs)
|
| 147 |
+
|
| 148 |
+
return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
|
| 149 |
+
|
| 150 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
| 151 |
+
images = kwargs.pop("images", None)
|
| 152 |
+
image_sizes = kwargs.pop("image_sizes", None)
|
| 153 |
+
patch_images = kwargs.pop("patch_images", None)
|
| 154 |
+
ind_tokens = kwargs.pop("ind_tokens", None)
|
| 155 |
+
inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
|
| 156 |
+
if images is not None:
|
| 157 |
+
inputs["images"] = images
|
| 158 |
+
if image_sizes is not None:
|
| 159 |
+
inputs["image_sizes"] = image_sizes
|
| 160 |
+
if patch_images is not None:
|
| 161 |
+
inputs['patch_images'] = patch_images
|
| 162 |
+
if ind_tokens is not None:
|
| 163 |
+
inputs['ind_tokens'] = ind_tokens
|
| 164 |
+
return inputs
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
AutoConfig.register("llava_llama", LlavaConfig)
|
| 168 |
+
AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
|
VLMEvalKit-sudoku/llava/model/llava_arch.py
ADDED
|
@@ -0,0 +1,808 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from abc import ABC, abstractmethod
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
import re
|
| 20 |
+
import time
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn as nn
|
| 23 |
+
from .multimodal_encoder.builder import build_vision_tower
|
| 24 |
+
from .multimodal_resampler.builder import build_vision_resampler
|
| 25 |
+
from .multimodal_projector.builder import build_vision_projector
|
| 26 |
+
|
| 27 |
+
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
| 28 |
+
|
| 29 |
+
from llava.mm_utils import get_anyres_image_grid_shape
|
| 30 |
+
from llava.utils import rank0_print, rank_print
|
| 31 |
+
import random
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class LlavaMetaModel:
|
| 35 |
+
|
| 36 |
+
def __init__(self, config):
|
| 37 |
+
super(LlavaMetaModel, self).__init__(config)
|
| 38 |
+
|
| 39 |
+
if hasattr(config, "mm_vision_tower"):
|
| 40 |
+
delay_load = getattr(config, "delay_load", False)
|
| 41 |
+
self.vision_tower = build_vision_tower(config, delay_load=delay_load)
|
| 42 |
+
self.vision_resampler = build_vision_resampler(config, vision_tower=self.vision_tower)
|
| 43 |
+
self.mm_projector = build_vision_projector(config, vision_cfg=self.vision_tower.config)
|
| 44 |
+
|
| 45 |
+
if "unpad" in getattr(config, "mm_patch_merge_type", ""):
|
| 46 |
+
self.image_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
|
| 47 |
+
|
| 48 |
+
def get_vision_tower(self):
|
| 49 |
+
vision_tower = getattr(self, "vision_tower", None)
|
| 50 |
+
if type(vision_tower) is list:
|
| 51 |
+
vision_tower = vision_tower[0]
|
| 52 |
+
return vision_tower
|
| 53 |
+
|
| 54 |
+
def initialize_vision_modules(self, model_args, fsdp=None):
|
| 55 |
+
vision_tower = model_args.vision_tower
|
| 56 |
+
mm_vision_select_layer = model_args.mm_vision_select_layer
|
| 57 |
+
mm_vision_select_feature = model_args.mm_vision_select_feature
|
| 58 |
+
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
|
| 59 |
+
mm_patch_merge_type = model_args.mm_patch_merge_type
|
| 60 |
+
|
| 61 |
+
self.config.mm_vision_tower = vision_tower
|
| 62 |
+
self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
|
| 63 |
+
|
| 64 |
+
if self.get_vision_tower() is None:
|
| 65 |
+
vision_tower = build_vision_tower(model_args)
|
| 66 |
+
vision_resampler = build_vision_resampler(model_args, vision_tower=vision_tower)
|
| 67 |
+
for k, v in vision_resampler.config.items():
|
| 68 |
+
setattr(self.config, k, v)
|
| 69 |
+
|
| 70 |
+
if fsdp is not None and len(fsdp) > 0:
|
| 71 |
+
self.vision_tower = [vision_tower]
|
| 72 |
+
self.vision_resampler = [vision_resampler]
|
| 73 |
+
else:
|
| 74 |
+
self.vision_tower = vision_tower
|
| 75 |
+
self.vision_resampler = vision_resampler
|
| 76 |
+
else:
|
| 77 |
+
if fsdp is not None and len(fsdp) > 0:
|
| 78 |
+
vision_resampler = self.vision_resampler[0]
|
| 79 |
+
vision_tower = self.vision_tower[0]
|
| 80 |
+
else:
|
| 81 |
+
vision_resampler = self.vision_resampler
|
| 82 |
+
vision_tower = self.vision_tower
|
| 83 |
+
vision_tower.load_model()
|
| 84 |
+
|
| 85 |
+
# In case it is frozen by LoRA
|
| 86 |
+
for p in self.vision_resampler.parameters():
|
| 87 |
+
p.requires_grad = True
|
| 88 |
+
|
| 89 |
+
self.config.use_mm_proj = True
|
| 90 |
+
self.config.mm_projector_type = getattr(model_args, "mm_projector_type", "linear")
|
| 91 |
+
self.config.mm_hidden_size = getattr(vision_resampler, "hidden_size", vision_tower.hidden_size)
|
| 92 |
+
self.config.mm_vision_select_layer = mm_vision_select_layer
|
| 93 |
+
self.config.mm_vision_select_feature = mm_vision_select_feature
|
| 94 |
+
self.config.mm_patch_merge_type = mm_patch_merge_type
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if not hasattr(self.config, 'add_faster_video'):
|
| 98 |
+
if model_args.add_faster_video:
|
| 99 |
+
embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
|
| 100 |
+
self.faster_token = nn.Parameter(
|
| 101 |
+
torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
if getattr(self, "mm_projector", None) is None:
|
| 105 |
+
self.mm_projector = build_vision_projector(self.config, vision_cfg=vision_tower.config)
|
| 106 |
+
|
| 107 |
+
if "unpad" in mm_patch_merge_type:
|
| 108 |
+
embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
|
| 109 |
+
self.image_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
|
| 110 |
+
else:
|
| 111 |
+
# In case it is frozen by LoRA
|
| 112 |
+
for p in self.mm_projector.parameters():
|
| 113 |
+
p.requires_grad = True
|
| 114 |
+
|
| 115 |
+
if pretrain_mm_mlp_adapter is not None:
|
| 116 |
+
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location="cpu")
|
| 117 |
+
|
| 118 |
+
def get_w(weights, keyword):
|
| 119 |
+
return {k.split(keyword + ".")[1]: v for k, v in weights.items() if keyword in k}
|
| 120 |
+
|
| 121 |
+
incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"))
|
| 122 |
+
rank0_print(f"Loaded mm projector weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
|
| 123 |
+
incompatible_keys = self.vision_resampler.load_state_dict(get_w(mm_projector_weights, "vision_resampler"), strict=False)
|
| 124 |
+
rank0_print(f"Loaded vision resampler weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
|
| 125 |
+
|
| 126 |
+
def unpad_image(tensor, original_size):
|
| 127 |
+
"""
|
| 128 |
+
Unpads a PyTorch tensor of a padded and resized image.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
|
| 132 |
+
original_size (tuple): The original size of the image (height, width).
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
torch.Tensor: The unpadded image tensor.
|
| 136 |
+
"""
|
| 137 |
+
original_width, original_height = original_size
|
| 138 |
+
current_height, current_width = tensor.shape[1:]
|
| 139 |
+
|
| 140 |
+
# Compute aspect ratios
|
| 141 |
+
original_aspect_ratio = original_width / original_height
|
| 142 |
+
current_aspect_ratio = current_width / current_height
|
| 143 |
+
|
| 144 |
+
# Determine padding size and direction
|
| 145 |
+
if original_aspect_ratio > current_aspect_ratio:
|
| 146 |
+
# Padding was added to the height
|
| 147 |
+
scale_factor = current_width / original_width
|
| 148 |
+
new_height = int(original_height * scale_factor)
|
| 149 |
+
padding = (current_height - new_height) // 2
|
| 150 |
+
unpadded_tensor = tensor[:, padding : current_height - padding, :]
|
| 151 |
+
else:
|
| 152 |
+
# Padding was added to the width
|
| 153 |
+
scale_factor = current_height / original_height
|
| 154 |
+
new_width = int(original_width * scale_factor)
|
| 155 |
+
padding = (current_width - new_width) // 2
|
| 156 |
+
unpadded_tensor = tensor[:, :, padding : current_width - padding]
|
| 157 |
+
|
| 158 |
+
return unpadded_tensor
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
class LlavaMetaForCausalLM(ABC):
|
| 162 |
+
|
| 163 |
+
@abstractmethod
|
| 164 |
+
def get_model(self):
|
| 165 |
+
pass
|
| 166 |
+
|
| 167 |
+
def get_vision_tower(self):
|
| 168 |
+
return self.get_model().get_vision_tower()
|
| 169 |
+
|
| 170 |
+
def get_2dPool(self, image_feature, stride=2):
|
| 171 |
+
height = width = self.get_vision_tower().num_patches_per_side
|
| 172 |
+
num_frames, num_tokens, num_dim = image_feature.shape
|
| 173 |
+
image_feature = image_feature.view(num_frames, height, width, -1)
|
| 174 |
+
image_feature = image_feature.permute(0, 3, 1, 2).contiguous()
|
| 175 |
+
# image_feature = nn.functional.max_pool2d(image_feature, self.config.mm_spatial_pool_stride)
|
| 176 |
+
if self.config.mm_spatial_pool_mode == "average":
|
| 177 |
+
image_feature = nn.functional.avg_pool2d(image_feature, stride)
|
| 178 |
+
elif self.config.mm_spatial_pool_mode == "max":
|
| 179 |
+
image_feature = nn.functional.max_pool2d(image_feature, stride)
|
| 180 |
+
elif self.config.mm_spatial_pool_mode == "bilinear":
|
| 181 |
+
height, width = image_feature.shape[2:]
|
| 182 |
+
scaled_shape = [math.ceil(height / stride), math.ceil(width / stride)]
|
| 183 |
+
image_feature = nn.functional.interpolate(image_feature, size=scaled_shape, mode='bilinear')
|
| 184 |
+
|
| 185 |
+
else:
|
| 186 |
+
raise ValueError(f"Unexpected mm_spatial_pool_mode: {self.config.mm_spatial_pool_mode}")
|
| 187 |
+
image_feature = image_feature.permute(0, 2, 3, 1)
|
| 188 |
+
image_feature = image_feature.view(num_frames, -1, num_dim)
|
| 189 |
+
return image_feature
|
| 190 |
+
|
| 191 |
+
def concat_src_patch_images(self, images, patch_images, ind_tokens, per_patch_size = 14):
|
| 192 |
+
all_images = []
|
| 193 |
+
patch_sizes = []
|
| 194 |
+
for src_image, patches, ind_token in zip(images, patch_images, ind_tokens):
|
| 195 |
+
if len(ind_token) == 0:
|
| 196 |
+
all_images += [src_image]
|
| 197 |
+
img_h, img_w = src_image.shape[-2:]
|
| 198 |
+
patch_sizes.append((img_h // per_patch_size, img_w // per_patch_size))
|
| 199 |
+
else:
|
| 200 |
+
patches = [patch for patch in patches]
|
| 201 |
+
slice_img_h, slice_img_w = patches[0].shape[-2:]
|
| 202 |
+
patch_sizes += [(slice_img_h // per_patch_size, slice_img_w // per_patch_size)] * len(patches)
|
| 203 |
+
|
| 204 |
+
patches += [src_image]
|
| 205 |
+
abs_img_h, abs_img_w = src_image.shape[-2:]
|
| 206 |
+
patch_sizes.append((abs_img_h // per_patch_size, abs_img_w // per_patch_size))
|
| 207 |
+
|
| 208 |
+
all_images += patches
|
| 209 |
+
|
| 210 |
+
return all_images, patch_sizes
|
| 211 |
+
|
| 212 |
+
def encode_images(self, images): #torch.Size([4, 3, 336, 336])
|
| 213 |
+
patch_sizes = []
|
| 214 |
+
for _ in range(images.shape[0]):
|
| 215 |
+
patch_sizes.append((images.shape[2] // 14, images.shape[3] // 14))
|
| 216 |
+
tgt_sizes = torch.tensor(patch_sizes, dtype=torch.long, device=images[0].device)
|
| 217 |
+
|
| 218 |
+
image_features = self.get_model().get_vision_tower()(images, tgt_sizes)
|
| 219 |
+
image_features = torch.cat(image_features, dim=0)
|
| 220 |
+
# image_features = self.get_model().vision_resampler(image_features, images=images)
|
| 221 |
+
image_features = self.get_model().mm_projector(image_features)
|
| 222 |
+
return image_features
|
| 223 |
+
|
| 224 |
+
def partition_list(self, input_list, lengths):
|
| 225 |
+
"""
|
| 226 |
+
按照指定的长度划分列表。
|
| 227 |
+
|
| 228 |
+
参数:
|
| 229 |
+
input_list (list): 要划分的原始列表。
|
| 230 |
+
lengths (list): 一个包含划分长度的整数列表。
|
| 231 |
+
|
| 232 |
+
返回:
|
| 233 |
+
list: 一个包含子列表的列表,每个子列表的长度由 lengths 指定。
|
| 234 |
+
"""
|
| 235 |
+
result = []
|
| 236 |
+
current_index = 0
|
| 237 |
+
for length in lengths:
|
| 238 |
+
if current_index + length > len(input_list):
|
| 239 |
+
raise ValueError("划分长度超过了列表的总长度")
|
| 240 |
+
sublist = input_list[current_index:current_index + length]
|
| 241 |
+
result.append(sublist)
|
| 242 |
+
current_index += length
|
| 243 |
+
if current_index != len(input_list):
|
| 244 |
+
raise ValueError("划分长度和列表总长度不一致")
|
| 245 |
+
return result
|
| 246 |
+
|
| 247 |
+
def encode_images_uhd_v1(self, images, patch_images, ind_tokens):
|
| 248 |
+
num_images = [len(ind_token) + 1 for ind_token in ind_tokens]
|
| 249 |
+
# concat images
|
| 250 |
+
per_patch_size = 14
|
| 251 |
+
down_sample_ratio = 1
|
| 252 |
+
|
| 253 |
+
if 'siglip2' in self.get_vision_tower().vision_tower_name:
|
| 254 |
+
model_config = self.get_model().get_vision_tower().vision_tower.config
|
| 255 |
+
per_patch_size = getattr(model_config, "patch_size", 16)
|
| 256 |
+
# per_patch_size = 14
|
| 257 |
+
if hasattr(model_config, "vision_config"):
|
| 258 |
+
vision_model_config = model_config.vision_config
|
| 259 |
+
if vision_model_config.get('merger_layer_index', False):
|
| 260 |
+
merger_layer_index = vision_model_config['merger_layer_index']
|
| 261 |
+
down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
|
| 262 |
+
else:
|
| 263 |
+
if hasattr(model_config, 'merger_layer_index'):
|
| 264 |
+
merger_layer_index = model_config.merger_layer_index
|
| 265 |
+
down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
|
| 266 |
+
|
| 267 |
+
elif 'moonvit' in self.get_vision_tower().vision_tower_name:
|
| 268 |
+
model_config = self.get_model().get_vision_tower().vision_tower.config
|
| 269 |
+
per_patch_size = getattr(model_config, "patch_size", 14)
|
| 270 |
+
if hasattr(model_config, "vision_config"):
|
| 271 |
+
vision_model_config = model_config.vision_config
|
| 272 |
+
if vision_model_config.get('merger_layer_index', False):
|
| 273 |
+
merger_layer_index = vision_model_config['merger_layer_index']
|
| 274 |
+
down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
|
| 275 |
+
else:
|
| 276 |
+
if hasattr(model_config, 'merger_layer_index'):
|
| 277 |
+
merger_layer_index = model_config.merger_layer_index
|
| 278 |
+
down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
|
| 279 |
+
|
| 280 |
+
elif 'qwen2_5vl' in self.get_vision_tower().vision_tower_name:
|
| 281 |
+
model_config = self.get_model().get_vision_tower().vision_tower.config
|
| 282 |
+
per_patch_size = getattr(model_config, "patch_size", 14)
|
| 283 |
+
|
| 284 |
+
images, patch_sizes = self.concat_src_patch_images(images, patch_images, ind_tokens, per_patch_size)
|
| 285 |
+
image_features = self.get_model().get_vision_tower()(images, patch_sizes)
|
| 286 |
+
max_patch_sizes = max([patch_size[0] * patch_size[1] for patch_size in patch_sizes])
|
| 287 |
+
projected_image_features = []
|
| 288 |
+
# breakpoint()
|
| 289 |
+
for image_feature, patch_size in zip(image_features, patch_sizes):
|
| 290 |
+
# import pdb; pdb.set_trace()
|
| 291 |
+
# breakpoint()
|
| 292 |
+
image_feature = tuple(feat.to(torch.bfloat16) for feat in image_feature)
|
| 293 |
+
# image_feature = image_feature.to(torch.bfloat16)
|
| 294 |
+
patch_size = (patch_size[0] // down_sample_ratio, patch_size[1] // down_sample_ratio)
|
| 295 |
+
|
| 296 |
+
if self.config.mm_projector_type == "resampler" and 'siglip2' in self.get_vision_tower().vision_tower_name:
|
| 297 |
+
projected_image_feature = self.get_model().mm_projector(image_feature, tgt_size=patch_size, max_patch_sizes=max_patch_sizes)
|
| 298 |
+
else:
|
| 299 |
+
projected_image_feature = self.get_model().mm_projector(image_feature, tgt_size=patch_size) # 1, n, c
|
| 300 |
+
projected_image_feature = projected_image_feature[0]
|
| 301 |
+
projected_image_features.append(projected_image_feature)
|
| 302 |
+
|
| 303 |
+
# chunk features
|
| 304 |
+
projected_image_features = self.partition_list(projected_image_features, num_images)
|
| 305 |
+
# import pdb; pdb.set_trace()
|
| 306 |
+
return projected_image_features
|
| 307 |
+
|
| 308 |
+
# def encode_images_uhd_v2(self, images, patch_images, ind_tokens):
|
| 309 |
+
# # start = time.time()
|
| 310 |
+
# num_images = [len(ind_token) + 1 for ind_token in ind_tokens]
|
| 311 |
+
# # concat images
|
| 312 |
+
# images, patch_sizes = self.concat_src_patch_images(images, patch_images, ind_tokens)
|
| 313 |
+
|
| 314 |
+
# tgt_sizes = torch.tensor(patch_sizes, dtype=torch.long, device=images[0].device)
|
| 315 |
+
|
| 316 |
+
# features_1x = self.get_model().get_vision_tower().forward_uhd_v2(images, tgt_sizes) #list torch.Size([1, 550, 1024])
|
| 317 |
+
|
| 318 |
+
# return self.get_model().mm_projector.forward_with_featup(features_1x, patch_sizes, images, num_images)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def encode_multimodals(self, videos_or_images, video_idx_in_batch, split_sizes=None):
|
| 322 |
+
videos_or_images_features = self.get_model().get_vision_tower()(videos_or_images)
|
| 323 |
+
per_videos_or_images_features = torch.split(videos_or_images_features, split_sizes, dim=0) # tuple, (dim_1, 576, 4096)
|
| 324 |
+
all_videos_or_images_features = []
|
| 325 |
+
all_faster_video_features = []
|
| 326 |
+
cur_mm_spatial_pool_stride = self.config.mm_spatial_pool_stride
|
| 327 |
+
|
| 328 |
+
for idx, feat in enumerate(per_videos_or_images_features):
|
| 329 |
+
|
| 330 |
+
feat = self.get_model().mm_projector(feat)
|
| 331 |
+
faster_video_feature = 0
|
| 332 |
+
slower_img_feat = 0
|
| 333 |
+
if idx in video_idx_in_batch and cur_mm_spatial_pool_stride > 1:
|
| 334 |
+
slower_img_feat = self.get_2dPool(feat,cur_mm_spatial_pool_stride)
|
| 335 |
+
if self.config.add_faster_video:
|
| 336 |
+
cur_mm_spatial_pool_stride = cur_mm_spatial_pool_stride * 2
|
| 337 |
+
faster_video_feature = self.get_2dPool(feat,cur_mm_spatial_pool_stride)
|
| 338 |
+
if slower_img_feat != 0:
|
| 339 |
+
all_videos_or_images_features.append(slower_img_feat)
|
| 340 |
+
else:
|
| 341 |
+
all_videos_or_images_features.append(feat)
|
| 342 |
+
all_faster_video_features.append(faster_video_feature)
|
| 343 |
+
return all_videos_or_images_features,all_faster_video_features
|
| 344 |
+
|
| 345 |
+
def add_token_per_grid(self, image_feature):
|
| 346 |
+
resize_h = int(math.sqrt(image_feature.shape[1]))
|
| 347 |
+
num_frames = image_feature.shape[0]
|
| 348 |
+
feature_dim = image_feature.shape[-1]
|
| 349 |
+
|
| 350 |
+
image_feature = image_feature.view(num_frames, 1, resize_h, resize_h, -1)
|
| 351 |
+
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
| 352 |
+
image_feature = image_feature.flatten(1, 2).flatten(2, 3) #torch.Size([3584, 224, 14])
|
| 353 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)#torch.Size([3584, 224, 15])
|
| 354 |
+
if getattr(self.config, "add_faster_video", False):
|
| 355 |
+
# import pdb; pdb.set_trace()
|
| 356 |
+
# (3584, 832, 14) -> (3584, 64, 13, 14)
|
| 357 |
+
image_feature = image_feature.view(feature_dim, num_frames,resize_h, -1)
|
| 358 |
+
# (3584, 64, 13, 14) -> (64, 13, 14, 3584)
|
| 359 |
+
image_feature = image_feature.permute(1, 2, 3, 0).contiguous()
|
| 360 |
+
# (64, 13, 14, 3584) -> (64, 13*14, 3584)
|
| 361 |
+
image_feature = image_feature.flatten(1, 2)
|
| 362 |
+
# import pdb; pdb.set_trace()
|
| 363 |
+
return image_feature
|
| 364 |
+
# import pdb; pdb.set_trace()
|
| 365 |
+
image_feature = image_feature.flatten(1, 2).transpose(0, 1) #torch.Size([3360, 3584])
|
| 366 |
+
return image_feature
|
| 367 |
+
|
| 368 |
+
def add_token_per_frame(self, image_feature):
|
| 369 |
+
image_feature = image_feature.permute(2, 0, 1).contiguous()
|
| 370 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
|
| 371 |
+
image_feature = image_feature.permute(1, 2, 0).contiguous()
|
| 372 |
+
return image_feature
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def prepare_inputs_labels_for_multimodal(self, input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities=["image"], image_sizes=None, patch_images=None, ind_tokens=None):
|
| 376 |
+
vision_tower = self.get_vision_tower()
|
| 377 |
+
# rank_print(modalities)
|
| 378 |
+
if vision_tower is None or images is None or input_ids.shape[1] == 1:
|
| 379 |
+
return input_ids, position_ids, attention_mask, past_key_values, None, labels
|
| 380 |
+
|
| 381 |
+
if isinstance(modalities, str):
|
| 382 |
+
modalities = [modalities]
|
| 383 |
+
|
| 384 |
+
model_mode = getattr(self.config, 'model_mode', 'llava')
|
| 385 |
+
# import pdb; pdb.set_trace()
|
| 386 |
+
if model_mode == 'llava' and (type(images) is list or images.ndim == 5):
|
| 387 |
+
if type(images) is list:
|
| 388 |
+
images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] #torch.Size([16, 3, 384, 384])
|
| 389 |
+
|
| 390 |
+
video_idx_in_batch = []
|
| 391 |
+
for _ in range(len(modalities)):
|
| 392 |
+
if modalities[_] == "video":
|
| 393 |
+
video_idx_in_batch.append(_)
|
| 394 |
+
|
| 395 |
+
images_list = []
|
| 396 |
+
for image in images:
|
| 397 |
+
if image.ndim == 4:
|
| 398 |
+
images_list.append(image)
|
| 399 |
+
else:
|
| 400 |
+
images_list.append(image.unsqueeze(0))
|
| 401 |
+
|
| 402 |
+
concat_images = torch.cat([image for image in images_list], dim=0) #torch.Size([16, 3, 384, 384])
|
| 403 |
+
split_sizes = [image.shape[0] for image in images_list]
|
| 404 |
+
encoded_image_features = self.encode_images(concat_images) #video:torch.Size([16, 729, 3584]), muti: torch.Size([4, 729, 3584])
|
| 405 |
+
# image_features,all_faster_video_features = self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes)
|
| 406 |
+
|
| 407 |
+
# This is a list, each element is [num_images, patch * patch, dim]
|
| 408 |
+
# rank_print(f"Concat images : {concat_images.shape}")
|
| 409 |
+
encoded_image_features = torch.split(encoded_image_features, split_sizes) #[torch.Size([16, 196, 3584])], muti: [4x torch.Size([1, 729, 3584])]
|
| 410 |
+
image_features = []
|
| 411 |
+
for idx, image_feat in enumerate(encoded_image_features):
|
| 412 |
+
if idx in video_idx_in_batch:
|
| 413 |
+
image_features.append(self.get_2dPool(image_feat))
|
| 414 |
+
else:
|
| 415 |
+
image_features.append(image_feat)
|
| 416 |
+
# image_features = self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes)
|
| 417 |
+
# rank_print(f"Encoded image feats : {[x.shape for x in image_features]}")
|
| 418 |
+
# image_features = torch.split(image_features, split_sizes, dim=0)
|
| 419 |
+
mm_patch_merge_type = getattr(self.config, "mm_patch_merge_type", "flat")
|
| 420 |
+
image_aspect_ratio = getattr(self.config, "image_aspect_ratio", "square")
|
| 421 |
+
mm_newline_position = getattr(self.config, "mm_newline_position", "one_token")
|
| 422 |
+
|
| 423 |
+
mm_newline_position = 'grid'
|
| 424 |
+
if mm_patch_merge_type == "flat":
|
| 425 |
+
image_features = [x.flatten(0, 1) for x in image_features]
|
| 426 |
+
|
| 427 |
+
elif mm_patch_merge_type.startswith("spatial"):
|
| 428 |
+
new_image_features = []
|
| 429 |
+
for image_idx, image_feature in enumerate(image_features):
|
| 430 |
+
# FIXME: now assume the image is square, and split to 2x2 patches
|
| 431 |
+
# num_patches = h * w, where h = w = sqrt(num_patches)
|
| 432 |
+
# currently image_feature is a tensor of shape (4, num_patches, hidden_size)
|
| 433 |
+
# we want to first unflatten it to (2, 2, h, w, hidden_size)
|
| 434 |
+
# rank0_print("At least we are reaching here")
|
| 435 |
+
# import pdb; pdb.set_trace()
|
| 436 |
+
if image_idx in video_idx_in_batch: # video operations
|
| 437 |
+
# rank0_print("Video")
|
| 438 |
+
if mm_newline_position == "grid":
|
| 439 |
+
# Grid-wise
|
| 440 |
+
image_feature = self.add_token_per_grid(image_feature)
|
| 441 |
+
if getattr(self.config, "add_faster_video", False):
|
| 442 |
+
faster_video_feature = self.add_token_per_grid(all_faster_video_features[image_idx])
|
| 443 |
+
# Add a token for each frame
|
| 444 |
+
concat_slow_fater_token = []
|
| 445 |
+
# import pdb; pdb.set_trace()
|
| 446 |
+
for _ in range(image_feature.shape[0]):
|
| 447 |
+
if _ % self.config.faster_token_stride == 0:
|
| 448 |
+
concat_slow_fater_token.append(torch.cat((image_feature[_], self.model.faster_token[None].to(image_feature.device)), dim=0))
|
| 449 |
+
else:
|
| 450 |
+
concat_slow_fater_token.append(torch.cat((faster_video_feature[_], self.model.faster_token[None].to(image_feature.device)), dim=0))
|
| 451 |
+
# import pdb; pdb.set_trace()
|
| 452 |
+
image_feature = torch.cat(concat_slow_fater_token)
|
| 453 |
+
|
| 454 |
+
# print("!!!!!!!!!!!!")
|
| 455 |
+
|
| 456 |
+
new_image_features.append(image_feature)
|
| 457 |
+
elif mm_newline_position == "frame":
|
| 458 |
+
# Frame-wise
|
| 459 |
+
image_feature = self.add_token_per_frame(image_feature)
|
| 460 |
+
|
| 461 |
+
new_image_features.append(image_feature.flatten(0, 1))
|
| 462 |
+
|
| 463 |
+
elif mm_newline_position == "one_token":
|
| 464 |
+
# one-token
|
| 465 |
+
image_feature = image_feature.flatten(0, 1)
|
| 466 |
+
if 'unpad' in mm_patch_merge_type:
|
| 467 |
+
image_feature = torch.cat((
|
| 468 |
+
image_feature,
|
| 469 |
+
self.model.image_newline[None].to(image_feature.device)
|
| 470 |
+
), dim=0)
|
| 471 |
+
new_image_features.append(image_feature)
|
| 472 |
+
elif mm_newline_position == "no_token":
|
| 473 |
+
new_image_features.append(image_feature.flatten(0, 1))
|
| 474 |
+
else:
|
| 475 |
+
raise ValueError(f"Unexpected mm_newline_position: {mm_newline_position}")
|
| 476 |
+
elif image_feature.shape[0] > 1: # multi patches and multi images operations
|
| 477 |
+
# rank0_print("Single-images")
|
| 478 |
+
base_image_feature = image_feature[0]
|
| 479 |
+
image_feature = image_feature[1:]
|
| 480 |
+
height = width = self.get_vision_tower().num_patches_per_side
|
| 481 |
+
assert height * width == base_image_feature.shape[0]
|
| 482 |
+
|
| 483 |
+
if "anyres_max" in image_aspect_ratio:
|
| 484 |
+
matched_anyres_max_num_patches = re.match(r"anyres_max_(\d+)", image_aspect_ratio)
|
| 485 |
+
if matched_anyres_max_num_patches:
|
| 486 |
+
max_num_patches = int(matched_anyres_max_num_patches.group(1))
|
| 487 |
+
|
| 488 |
+
if image_aspect_ratio == "anyres" or "anyres_max" in image_aspect_ratio:
|
| 489 |
+
if hasattr(self.get_vision_tower(), "image_size"):
|
| 490 |
+
vision_tower_image_size = self.get_vision_tower().image_size
|
| 491 |
+
else:
|
| 492 |
+
raise ValueError("vision_tower_image_size is not found in the vision tower.")
|
| 493 |
+
try:
|
| 494 |
+
num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, vision_tower_image_size)
|
| 495 |
+
except Exception as e:
|
| 496 |
+
rank0_print(f"Error: {e}")
|
| 497 |
+
num_patch_width, num_patch_height = 2, 2
|
| 498 |
+
image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
|
| 499 |
+
else:
|
| 500 |
+
image_feature = image_feature.view(2, 2, height, width, -1)
|
| 501 |
+
|
| 502 |
+
if "maxpool2x2" in mm_patch_merge_type:
|
| 503 |
+
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
| 504 |
+
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
|
| 505 |
+
image_feature = nn.functional.max_pool2d(image_feature, 2)
|
| 506 |
+
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
|
| 507 |
+
elif "unpad" in mm_patch_merge_type and "anyres_max" in image_aspect_ratio and matched_anyres_max_num_patches:
|
| 508 |
+
unit = image_feature.shape[2]
|
| 509 |
+
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
| 510 |
+
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
|
| 511 |
+
image_feature = unpad_image(image_feature, image_sizes[image_idx])
|
| 512 |
+
c, h, w = image_feature.shape
|
| 513 |
+
times = math.sqrt(h * w / (max_num_patches * unit**2))
|
| 514 |
+
if times > 1.1:
|
| 515 |
+
image_feature = image_feature[None]
|
| 516 |
+
image_feature = nn.functional.interpolate(image_feature, [int(h // times), int(w // times)], mode="bilinear")[0]
|
| 517 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
|
| 518 |
+
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
|
| 519 |
+
elif "unpad" in mm_patch_merge_type:
|
| 520 |
+
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
|
| 521 |
+
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
|
| 522 |
+
image_feature = unpad_image(image_feature, image_sizes[image_idx])
|
| 523 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
|
| 524 |
+
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
|
| 525 |
+
else:
|
| 526 |
+
image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
|
| 527 |
+
image_feature = image_feature.flatten(0, 3)
|
| 528 |
+
if "nobase" in mm_patch_merge_type:
|
| 529 |
+
pass
|
| 530 |
+
else:
|
| 531 |
+
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
|
| 532 |
+
new_image_features.append(image_feature)
|
| 533 |
+
else: # single image operations
|
| 534 |
+
image_feature = image_feature[0]
|
| 535 |
+
if "unpad" in mm_patch_merge_type:
|
| 536 |
+
image_feature = torch.cat((image_feature, self.model.image_newline[None]), dim=0)
|
| 537 |
+
|
| 538 |
+
new_image_features.append(image_feature)
|
| 539 |
+
image_features = new_image_features
|
| 540 |
+
else:
|
| 541 |
+
raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
|
| 542 |
+
# elif model_mode == 'uhd_v2':
|
| 543 |
+
# image_features = self.encode_images_uhd_v2(images, patch_images, ind_tokens)
|
| 544 |
+
elif model_mode == 'uhd_v1':
|
| 545 |
+
image_features = self.encode_images_uhd_v1(images, patch_images, ind_tokens)
|
| 546 |
+
else:
|
| 547 |
+
image_features = self.encode_images(images)
|
| 548 |
+
# [2x[3xtorch.Size([144, 3584])]]
|
| 549 |
+
# TODO: image start / end is not implemented here to support pretraining.
|
| 550 |
+
if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(self.config, "mm_use_im_start_end", False):
|
| 551 |
+
raise NotImplementedError
|
| 552 |
+
# rank_print(f"Total images : {len(image_features)}")
|
| 553 |
+
|
| 554 |
+
# Let's just add dummy tensors if they do not exist,
|
| 555 |
+
# it is a headache to deal with None all the time.
|
| 556 |
+
# But it is not ideal, and if you have a better idea,
|
| 557 |
+
# please open an issue / submit a PR, thanks.
|
| 558 |
+
_labels = labels
|
| 559 |
+
_position_ids = position_ids
|
| 560 |
+
_attention_mask = attention_mask
|
| 561 |
+
if attention_mask is None:
|
| 562 |
+
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
|
| 563 |
+
else:
|
| 564 |
+
attention_mask = attention_mask.bool()
|
| 565 |
+
if position_ids is None:
|
| 566 |
+
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
|
| 567 |
+
if labels is None:
|
| 568 |
+
labels = torch.full_like(input_ids, IGNORE_INDEX)
|
| 569 |
+
|
| 570 |
+
# remove the padding using attention_mask -- FIXME
|
| 571 |
+
_input_ids = input_ids
|
| 572 |
+
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
|
| 573 |
+
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
|
| 574 |
+
|
| 575 |
+
new_input_embeds = []
|
| 576 |
+
new_labels = []
|
| 577 |
+
cur_image_idx = 0
|
| 578 |
+
# rank_print("Inserting Images embedding")
|
| 579 |
+
for batch_idx, cur_input_ids in enumerate(input_ids):
|
| 580 |
+
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
|
| 581 |
+
# rank0_print(num_images)
|
| 582 |
+
if num_images == 0:
|
| 583 |
+
cur_image_features = image_features[cur_image_idx]
|
| 584 |
+
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
|
| 585 |
+
if type(cur_image_features) is list:
|
| 586 |
+
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0][0:0]], dim=0)
|
| 587 |
+
else:
|
| 588 |
+
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
|
| 589 |
+
new_input_embeds.append(cur_input_embeds)
|
| 590 |
+
new_labels.append(labels[batch_idx])
|
| 591 |
+
cur_image_idx += 1
|
| 592 |
+
continue
|
| 593 |
+
|
| 594 |
+
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
|
| 595 |
+
cur_input_ids_noim = []
|
| 596 |
+
cur_labels = labels[batch_idx]
|
| 597 |
+
cur_labels_noim = []
|
| 598 |
+
for i in range(len(image_token_indices) - 1):
|
| 599 |
+
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1 : image_token_indices[i + 1]])
|
| 600 |
+
cur_labels_noim.append(cur_labels[image_token_indices[i] + 1 : image_token_indices[i + 1]])
|
| 601 |
+
split_sizes = [x.shape[0] for x in cur_labels_noim]
|
| 602 |
+
cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
|
| 603 |
+
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
|
| 604 |
+
cur_new_input_embeds = []
|
| 605 |
+
cur_new_labels = []
|
| 606 |
+
|
| 607 |
+
for i in range(num_images + 1):
|
| 608 |
+
cur_new_input_embeds.append(cur_input_embeds_no_im[i])
|
| 609 |
+
cur_new_labels.append(cur_labels_noim[i])
|
| 610 |
+
if i < num_images:
|
| 611 |
+
try:
|
| 612 |
+
cur_image_features = image_features[cur_image_idx]
|
| 613 |
+
except IndexError:
|
| 614 |
+
cur_image_features = image_features[cur_image_idx - 1]
|
| 615 |
+
|
| 616 |
+
if model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
|
| 617 |
+
# slice features need 'for'
|
| 618 |
+
cur_ind_tokens = ind_tokens[cur_image_idx]
|
| 619 |
+
cur_image_idx += 1
|
| 620 |
+
cur_ind_tokens_embeds = self.get_model().embed_tokens(
|
| 621 |
+
torch.as_tensor(cur_ind_tokens, # \n , -> 13, 1919
|
| 622 |
+
dtype=torch.long,
|
| 623 |
+
device=cur_image_features[0].device))
|
| 624 |
+
else:
|
| 625 |
+
cur_image_idx += 1
|
| 626 |
+
cur_ind_tokens_embeds = []
|
| 627 |
+
|
| 628 |
+
if len(cur_ind_tokens_embeds) == 0: # 没有切片
|
| 629 |
+
if model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
|
| 630 |
+
cur_image_features = cur_image_features[-1]
|
| 631 |
+
else:
|
| 632 |
+
# whether not use the permute strategy
|
| 633 |
+
UsePermute = False
|
| 634 |
+
if not UsePermute:
|
| 635 |
+
abs_image_features = cur_image_features[-1]
|
| 636 |
+
slice_image_features = cur_image_features[:-1]
|
| 637 |
+
_cur_image_features = []
|
| 638 |
+
for image_feature_, ind_token_embeds_ in zip(slice_image_features, cur_ind_tokens_embeds):
|
| 639 |
+
_cur_image_features.append(torch.cat([image_feature_, ind_token_embeds_[None]], dim=0))
|
| 640 |
+
_cur_image_features.append(abs_image_features)
|
| 641 |
+
cur_image_features = torch.cat(_cur_image_features, dim=0)
|
| 642 |
+
elif model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
|
| 643 |
+
# import pdb;pdb.set_trace()
|
| 644 |
+
abs_image_features = cur_image_features[-1]
|
| 645 |
+
slice_image_features = cur_image_features[:-1] # list
|
| 646 |
+
|
| 647 |
+
slice_image_features_with_batch = [slice_feat.unsqueeze(0) for slice_feat in slice_image_features]
|
| 648 |
+
|
| 649 |
+
slice_image_features_with_batch = torch.cat(slice_image_features_with_batch, dim=0)
|
| 650 |
+
slice_number, grid , channels = slice_image_features_with_batch.shape
|
| 651 |
+
edge = int(grid ** 0.5)
|
| 652 |
+
|
| 653 |
+
# slice_number_check = len(cur_ind_tokens)
|
| 654 |
+
assert slice_number == len(cur_ind_tokens), "slice_number != len(cur_ind_tokens)"
|
| 655 |
+
|
| 656 |
+
slice_in_row = 0
|
| 657 |
+
for i in range(slice_number):
|
| 658 |
+
if cur_ind_tokens[i] == 29892:
|
| 659 |
+
slice_in_row += 1
|
| 660 |
+
elif cur_ind_tokens[i] == 13:
|
| 661 |
+
slice_in_row += 1
|
| 662 |
+
break
|
| 663 |
+
else:
|
| 664 |
+
raise ValueError(f"Unexpected ind_token: {cur_ind_tokens[i]}")
|
| 665 |
+
assert slice_in_row >= 1, "no slices at all!"
|
| 666 |
+
slice_in_column = slice_number // slice_in_row
|
| 667 |
+
h_w_ratio = (slice_in_column*1.0) / slice_in_row
|
| 668 |
+
if h_w_ratio > 1:
|
| 669 |
+
ori_patch_size = (edge, int(edge/h_w_ratio))
|
| 670 |
+
else:
|
| 671 |
+
ori_patch_size = (int(edge*h_w_ratio), edge)
|
| 672 |
+
# import pdb;pdb.set_trace()
|
| 673 |
+
# 144, 4096
|
| 674 |
+
abs_image_features= abs_image_features.reshape(edge, edge, channels).permute(2, 0, 1).unsqueeze(0)
|
| 675 |
+
# abs_image_features = F.interpolate(abs_image_features, size=ori_patch_size, mode='bilinear', align_corners=False)
|
| 676 |
+
abs_image_features = abs_image_features.squeeze(0).permute(1, 2, 0).reshape(-1, channels)
|
| 677 |
+
|
| 678 |
+
# slice_in_row: how many slices in a row
|
| 679 |
+
# slice_in_column: how many slices in a column
|
| 680 |
+
# slice_number: how many slices in total
|
| 681 |
+
comma_notation = cur_ind_tokens_embeds[0] # what does a comma say in embed
|
| 682 |
+
enter_notation = cur_ind_tokens_embeds[slice_in_row-1] # what does a enter say in embed
|
| 683 |
+
|
| 684 |
+
slice_stack = slice_image_features_with_batch.reshape(slice_in_column, slice_in_row, edge, edge, channels)
|
| 685 |
+
slice_stack = slice_stack.permute(0, 2, 1, 3, 4).reshape(slice_in_column * edge, slice_in_row * edge, channels)
|
| 686 |
+
# import pdb;pdb.set_trace()
|
| 687 |
+
enter_notation = enter_notation.unsqueeze(0).unsqueeze(0).expand(slice_in_column * edge, -1, -1)
|
| 688 |
+
slice_stack = torch.cat([slice_stack, enter_notation], dim=1)
|
| 689 |
+
slice_stack = slice_stack.reshape(-1, channels)
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
cur_image_features = torch.cat([slice_stack, comma_notation[None], abs_image_features], dim=0)
|
| 693 |
+
|
| 694 |
+
cur_new_input_embeds.append(cur_image_features)
|
| 695 |
+
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
|
| 696 |
+
|
| 697 |
+
cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
|
| 698 |
+
|
| 699 |
+
# import pdb; pdb.set_trace()
|
| 700 |
+
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
|
| 701 |
+
cur_new_labels = torch.cat(cur_new_labels)
|
| 702 |
+
|
| 703 |
+
new_input_embeds.append(cur_new_input_embeds)
|
| 704 |
+
new_labels.append(cur_new_labels)
|
| 705 |
+
|
| 706 |
+
# Truncate sequences to max length as image embeddings can make the sequence longer
|
| 707 |
+
tokenizer_model_max_length = getattr(self.config, "tokenizer_model_max_length", None)
|
| 708 |
+
# rank_print("Finishing Inserting")
|
| 709 |
+
|
| 710 |
+
new_input_embeds = [x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
|
| 711 |
+
new_labels = [x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
|
| 712 |
+
# TODO: Hard code for control loss spike
|
| 713 |
+
# if tokenizer_model_max_length is not None:
|
| 714 |
+
# new_input_embeds = [x[:4096] if modality != "video" else x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
|
| 715 |
+
# new_labels = [x[:4096] if modality != "video" else x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
|
| 716 |
+
|
| 717 |
+
# Combine them
|
| 718 |
+
max_len = max(x.shape[0] for x in new_input_embeds)
|
| 719 |
+
batch_size = len(new_input_embeds)
|
| 720 |
+
|
| 721 |
+
new_input_embeds_padded = []
|
| 722 |
+
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
|
| 723 |
+
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
|
| 724 |
+
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
|
| 725 |
+
# rank0_print("Prepare pos id")
|
| 726 |
+
|
| 727 |
+
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
|
| 728 |
+
cur_len = cur_new_embed.shape[0]
|
| 729 |
+
if getattr(self.config, "tokenizer_padding_side", "right") == "left":
|
| 730 |
+
new_input_embeds_padded.append(torch.cat((torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed), dim=0))
|
| 731 |
+
if cur_len > 0:
|
| 732 |
+
new_labels_padded[i, -cur_len:] = cur_new_labels
|
| 733 |
+
attention_mask[i, -cur_len:] = True
|
| 734 |
+
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
| 735 |
+
else:
|
| 736 |
+
new_input_embeds_padded.append(torch.cat((cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0))
|
| 737 |
+
if cur_len > 0:
|
| 738 |
+
new_labels_padded[i, :cur_len] = cur_new_labels
|
| 739 |
+
attention_mask[i, :cur_len] = True
|
| 740 |
+
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
|
| 741 |
+
|
| 742 |
+
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
|
| 743 |
+
# rank0_print("tokenizer padding")
|
| 744 |
+
|
| 745 |
+
if _labels is None:
|
| 746 |
+
new_labels = None
|
| 747 |
+
else:
|
| 748 |
+
new_labels = new_labels_padded
|
| 749 |
+
|
| 750 |
+
if _attention_mask is None:
|
| 751 |
+
attention_mask = None
|
| 752 |
+
else:
|
| 753 |
+
attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
|
| 754 |
+
|
| 755 |
+
if _position_ids is None:
|
| 756 |
+
position_ids = None
|
| 757 |
+
if getattr(self.config, "use_pos_skipping", False) and self.training:
|
| 758 |
+
position_ids = torch.arange(new_input_embeds.size(1), device=new_input_embeds.device).unsqueeze(0).to(new_input_embeds.device)
|
| 759 |
+
split_position = random.randint(0, new_input_embeds.size(1))
|
| 760 |
+
left_add = random.randint(0, self.config.pos_skipping_range)
|
| 761 |
+
right_add = random.randint(left_add, self.config.pos_skipping_range)
|
| 762 |
+
position_ids[:, :split_position] += left_add
|
| 763 |
+
position_ids[:, split_position:] += right_add
|
| 764 |
+
# import pdb; pdb.set_trace()
|
| 765 |
+
# rank0_print("Finish preparing")
|
| 766 |
+
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
|
| 767 |
+
|
| 768 |
+
def initialize_vision_tokenizer(self, model_args, tokenizer):
|
| 769 |
+
if model_args.mm_use_im_patch_token:
|
| 770 |
+
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
|
| 771 |
+
self.resize_token_embeddings(len(tokenizer))
|
| 772 |
+
|
| 773 |
+
if model_args.mm_use_im_start_end:
|
| 774 |
+
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
|
| 775 |
+
self.resize_token_embeddings(len(tokenizer))
|
| 776 |
+
|
| 777 |
+
if num_new_tokens > 0:
|
| 778 |
+
input_embeddings = self.get_input_embeddings().weight.data
|
| 779 |
+
output_embeddings = self.get_output_embeddings().weight.data
|
| 780 |
+
|
| 781 |
+
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
|
| 782 |
+
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
|
| 783 |
+
|
| 784 |
+
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
| 785 |
+
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
| 786 |
+
|
| 787 |
+
if model_args.tune_mm_mlp_adapter:
|
| 788 |
+
for p in self.get_input_embeddings().parameters():
|
| 789 |
+
p.requires_grad = True
|
| 790 |
+
for p in self.get_output_embeddings().parameters():
|
| 791 |
+
p.requires_grad = False
|
| 792 |
+
|
| 793 |
+
if model_args.pretrain_mm_mlp_adapter:
|
| 794 |
+
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location="cpu")
|
| 795 |
+
embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
|
| 796 |
+
assert num_new_tokens == 2
|
| 797 |
+
if input_embeddings.shape == embed_tokens_weight.shape:
|
| 798 |
+
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
|
| 799 |
+
elif embed_tokens_weight.shape[0] == num_new_tokens:
|
| 800 |
+
input_embeddings[-num_new_tokens:] = embed_tokens_weight
|
| 801 |
+
else:
|
| 802 |
+
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
|
| 803 |
+
elif model_args.mm_use_im_patch_token:
|
| 804 |
+
if model_args.tune_mm_mlp_adapter:
|
| 805 |
+
for p in self.get_input_embeddings().parameters():
|
| 806 |
+
p.requires_grad = False
|
| 807 |
+
for p in self.get_output_embeddings().parameters():
|
| 808 |
+
p.requires_grad = False
|
VLMEvalKit-sudoku/llava/model/make_delta.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Usage:
|
| 3 |
+
python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 11 |
+
from llava.model.utils import auto_upgrade
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
|
| 15 |
+
print("Loading base model")
|
| 16 |
+
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
| 17 |
+
|
| 18 |
+
print("Loading target model")
|
| 19 |
+
auto_upgrade(target_model_path)
|
| 20 |
+
target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
| 21 |
+
|
| 22 |
+
print("Calculating delta")
|
| 23 |
+
for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
|
| 24 |
+
if name not in base.state_dict():
|
| 25 |
+
assert name in ["model.mm_projector.weight", "model.mm_projector.bias"], f"{name} not in base model"
|
| 26 |
+
continue
|
| 27 |
+
if param.data.shape == base.state_dict()[name].shape:
|
| 28 |
+
param.data -= base.state_dict()[name]
|
| 29 |
+
else:
|
| 30 |
+
assert name in ["model.embed_tokens.weight", "lm_head.weight"], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
|
| 31 |
+
bparam = base.state_dict()[name]
|
| 32 |
+
param.data[: bparam.shape[0], : bparam.shape[1]] -= bparam
|
| 33 |
+
|
| 34 |
+
print("Saving delta")
|
| 35 |
+
if hub_repo_id:
|
| 36 |
+
kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
|
| 37 |
+
else:
|
| 38 |
+
kwargs = {}
|
| 39 |
+
target.save_pretrained(delta_path, **kwargs)
|
| 40 |
+
target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
|
| 41 |
+
target_tokenizer.save_pretrained(delta_path, **kwargs)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
parser = argparse.ArgumentParser()
|
| 46 |
+
parser.add_argument("--base-model-path", type=str, required=True)
|
| 47 |
+
parser.add_argument("--target-model-path", type=str, required=True)
|
| 48 |
+
parser.add_argument("--delta-path", type=str, required=True)
|
| 49 |
+
parser.add_argument("--hub-repo-id", type=str, default=None)
|
| 50 |
+
args = parser.parse_args()
|
| 51 |
+
|
| 52 |
+
make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (1.67 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_qwen2_5vl.cpython-310.pyc
ADDED
|
Binary file (7.23 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2.cpython-310.pyc
ADDED
|
Binary file (42.1 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc
ADDED
|
Binary file (22.8 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/adapt_clip_vision_model.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
|
| 7 |
+
from transformers.models.clip.modeling_clip import CLIPVisionTransformer, CLIPEncoder, CLIPVisionEmbeddings, CLIPConfig, BaseModelOutput
|
| 8 |
+
from transformers.activations import ACT2FN
|
| 9 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
|
| 10 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
| 11 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 12 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 13 |
+
from transformers.utils import (
|
| 14 |
+
ModelOutput,
|
| 15 |
+
add_start_docstrings,
|
| 16 |
+
add_start_docstrings_to_model_forward,
|
| 17 |
+
is_flash_attn_2_available,
|
| 18 |
+
logging,
|
| 19 |
+
replace_return_docstrings,
|
| 20 |
+
)
|
| 21 |
+
from transformers.utils import logging
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
CLIP_VISION_INPUTS_DOCSTRING = r"""
|
| 26 |
+
Args:
|
| 27 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
| 28 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
| 29 |
+
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
|
| 30 |
+
output_attentions (`bool`, *optional*):
|
| 31 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 32 |
+
tensors for more detail.
|
| 33 |
+
output_hidden_states (`bool`, *optional*):
|
| 34 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 35 |
+
more detail.
|
| 36 |
+
return_dict (`bool`, *optional*):
|
| 37 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
class AdaptCLIPVisionEmbeddings(CLIPVisionEmbeddings):
|
| 41 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 42 |
+
super().__init__(config)
|
| 43 |
+
self.config = config
|
| 44 |
+
self.embed_dim = config.hidden_size
|
| 45 |
+
self.image_size = config.image_size
|
| 46 |
+
self.patch_size = config.patch_size
|
| 47 |
+
|
| 48 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
| 49 |
+
|
| 50 |
+
self.patch_embedding = nn.Conv2d(
|
| 51 |
+
in_channels=config.num_channels,
|
| 52 |
+
out_channels=self.embed_dim,
|
| 53 |
+
kernel_size=self.patch_size,
|
| 54 |
+
stride=self.patch_size,
|
| 55 |
+
bias=False,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
| 59 |
+
self.num_positions = self.num_patches + 1
|
| 60 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
| 61 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
| 62 |
+
|
| 63 |
+
def resize_pos_embedding(self, position_embedding, dst_size=(24, 24), square_size=24):
|
| 64 |
+
_dtype = position_embedding.dtype
|
| 65 |
+
patch_height, patch_width = dst_size
|
| 66 |
+
class_position_embedding = position_embedding[:, :1] # 1, 1, c
|
| 67 |
+
patch_position_embedding = position_embedding[:, 1:] # 1, 576, c
|
| 68 |
+
|
| 69 |
+
patch_position_embedding = patch_position_embedding.permute(0, 2, 1).unflatten(-1, [square_size, square_size])
|
| 70 |
+
patch_position_embedding = torch.nn.functional.interpolate(
|
| 71 |
+
patch_position_embedding, size=(patch_height, patch_width), mode='bicubic'
|
| 72 |
+
).to(dtype=_dtype) # 1, c, ph, pw
|
| 73 |
+
patch_position_embedding = patch_position_embedding.flatten(-2).permute(0, 2, 1) # 1, n, c
|
| 74 |
+
position_embedding = torch.cat([class_position_embedding, patch_position_embedding], dim=1) # 1, n+1, c
|
| 75 |
+
return position_embedding
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
| 79 |
+
batch_size = pixel_values.shape[0]
|
| 80 |
+
target_dtype = self.patch_embedding.weight.dtype
|
| 81 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] torch.Size([1024, 19, 29])
|
| 82 |
+
|
| 83 |
+
# add
|
| 84 |
+
patch_height, patch_width = patch_embeds.shape[-2:]
|
| 85 |
+
|
| 86 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
| 87 |
+
|
| 88 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1) #torch.Size([3, 1, 1024])
|
| 89 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
| 90 |
+
|
| 91 |
+
# embeddings = embeddings + self.position_embedding(self.position_ids)
|
| 92 |
+
|
| 93 |
+
# add
|
| 94 |
+
square_size = self.config.image_size // self.config.patch_size
|
| 95 |
+
if patch_height == square_size and patch_width == square_size:
|
| 96 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
| 97 |
+
else:
|
| 98 |
+
position_embedding = self.position_embedding(self.position_ids)
|
| 99 |
+
position_embedding = self.resize_pos_embedding(position_embedding, dst_size=(patch_height, patch_width), square_size=square_size)
|
| 100 |
+
embeddings = embeddings + position_embedding
|
| 101 |
+
return embeddings
|
| 102 |
+
|
| 103 |
+
class AdaptCLIPVisionTransformer(CLIPVisionTransformer):
|
| 104 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 105 |
+
super().__init__(config)
|
| 106 |
+
self.config = config
|
| 107 |
+
embed_dim = config.hidden_size
|
| 108 |
+
|
| 109 |
+
self.embeddings = AdaptCLIPVisionEmbeddings(config)
|
| 110 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 111 |
+
self.encoder = CLIPEncoder(config)
|
| 112 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
| 113 |
+
|
| 114 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 115 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 116 |
+
def forward(
|
| 117 |
+
self,
|
| 118 |
+
pixel_values: Optional[list] = None,
|
| 119 |
+
tgt_sizes: Optional[torch.IntTensor] = None,
|
| 120 |
+
output_attentions: Optional[bool] = None,
|
| 121 |
+
output_hidden_states: Optional[bool] = None,
|
| 122 |
+
return_dict: Optional[bool] = None,
|
| 123 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 124 |
+
r"""
|
| 125 |
+
Returns:
|
| 126 |
+
|
| 127 |
+
"""
|
| 128 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 129 |
+
output_hidden_states = (
|
| 130 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 131 |
+
)
|
| 132 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 133 |
+
|
| 134 |
+
if pixel_values is None:
|
| 135 |
+
raise ValueError("You have to specify pixel_values")
|
| 136 |
+
|
| 137 |
+
batch_size = len(pixel_values)
|
| 138 |
+
|
| 139 |
+
# add
|
| 140 |
+
max_patches = max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
|
| 141 |
+
|
| 142 |
+
hidden_states = []
|
| 143 |
+
for i in range(batch_size):
|
| 144 |
+
hidden_state = self.embeddings(pixel_values=pixel_values[i].unsqueeze(0)) #torch.Size([1, 552, 1024])
|
| 145 |
+
hidden_state = self.pre_layrnorm(hidden_state)
|
| 146 |
+
padding_size = max_patches + 1 - hidden_state.shape[1]
|
| 147 |
+
padding = torch.zeros((1, padding_size, hidden_state.shape[2]), dtype=hidden_state.dtype, device=hidden_state.device) #torch.Size([1, 25, 1024])
|
| 148 |
+
state = torch.cat([hidden_state, padding], dim=1)
|
| 149 |
+
hidden_states.append(state)
|
| 150 |
+
|
| 151 |
+
hidden_states = torch.cat(hidden_states, dim=0)
|
| 152 |
+
|
| 153 |
+
patch_attention_mask = torch.zeros((batch_size, 1, max_patches + 1), dtype=torch.bool, device=hidden_states.device)#torch.Size([10, 577])
|
| 154 |
+
for i in range(batch_size):
|
| 155 |
+
patch_attention_mask[i, 0, :tgt_sizes[i][0] * tgt_sizes[i][1] + 1] = True
|
| 156 |
+
|
| 157 |
+
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
|
| 158 |
+
# The call to `_upad_input` in `_flash_attention_forward` is expensive
|
| 159 |
+
# So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
|
| 160 |
+
# avoiding passing the attention_mask, which is equivalent to attending to the full sequence
|
| 161 |
+
if not torch.any(~patch_attention_mask):
|
| 162 |
+
attention_mask=None
|
| 163 |
+
else:
|
| 164 |
+
attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) #torch.Size([10, 1, 577, 577])
|
| 165 |
+
|
| 166 |
+
encoder_outputs = self.encoder(
|
| 167 |
+
inputs_embeds=hidden_states,
|
| 168 |
+
attention_mask=attention_mask,
|
| 169 |
+
output_attentions=output_attentions,
|
| 170 |
+
output_hidden_states=output_hidden_states,
|
| 171 |
+
return_dict=return_dict,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
#FIXME the pooled_output here is incorrect for post_layernorm on padded features
|
| 175 |
+
last_hidden_state = encoder_outputs[0]
|
| 176 |
+
pooled_output = last_hidden_state[:, 0, :]
|
| 177 |
+
pooled_output = self.post_layernorm(pooled_output)
|
| 178 |
+
|
| 179 |
+
if not return_dict:
|
| 180 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
| 181 |
+
|
| 182 |
+
return BaseModelOutputWithPooling(
|
| 183 |
+
last_hidden_state=last_hidden_state,
|
| 184 |
+
pooler_output=pooled_output,
|
| 185 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 186 |
+
attentions=encoder_outputs.attentions,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
class AdaptCLIPVisionModel(CLIPVisionModel):
|
| 190 |
+
def __init__(self, config: CLIPVisionConfig):
|
| 191 |
+
super().__init__(config)
|
| 192 |
+
self.vision_model = AdaptCLIPVisionTransformer(config)
|
| 193 |
+
# Initialize weights and apply final processing
|
| 194 |
+
self.post_init()
|
| 195 |
+
|
| 196 |
+
@add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
|
| 197 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
|
| 198 |
+
def forward(
|
| 199 |
+
self,
|
| 200 |
+
pixel_values: Optional[list] = None,
|
| 201 |
+
tgt_sizes: Optional[torch.IntTensor] = None,
|
| 202 |
+
output_attentions: Optional[bool] = None,
|
| 203 |
+
output_hidden_states: Optional[bool] = None,
|
| 204 |
+
return_dict: Optional[bool] = None,
|
| 205 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
| 206 |
+
r"""
|
| 207 |
+
Returns:
|
| 208 |
+
|
| 209 |
+
Examples:
|
| 210 |
+
|
| 211 |
+
```python
|
| 212 |
+
>>> from PIL import Image
|
| 213 |
+
>>> import requests
|
| 214 |
+
>>> from transformers import AutoProcessor, CLIPVisionModel
|
| 215 |
+
|
| 216 |
+
>>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 217 |
+
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 218 |
+
|
| 219 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 220 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
| 221 |
+
|
| 222 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
| 223 |
+
|
| 224 |
+
>>> outputs = model(**inputs)
|
| 225 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
| 226 |
+
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
| 227 |
+
```"""
|
| 228 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 229 |
+
|
| 230 |
+
return self.vision_model(
|
| 231 |
+
pixel_values=pixel_values,
|
| 232 |
+
tgt_sizes=tgt_sizes,
|
| 233 |
+
output_attentions=output_attentions,
|
| 234 |
+
output_hidden_states=output_hidden_states,
|
| 235 |
+
return_dict=return_dict,
|
| 236 |
+
)
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
+
size 1356917
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" huggingface model adapter
|
| 2 |
+
|
| 3 |
+
Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from torch.nn import functional as F
|
| 11 |
+
from torch import TensorType
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
import transformers
|
| 15 |
+
from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
|
| 16 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
|
| 17 |
+
except ImportError as e:
|
| 18 |
+
transformers = None
|
| 19 |
+
|
| 20 |
+
class BaseModelOutput:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
class PretrainedConfig:
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
from .hf_configs import arch_dict
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# utils
|
| 31 |
+
def _camel2snake(s):
|
| 32 |
+
return re.sub(r"(?<!^)(?=[A-Z])", "_", s).lower()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# TODO: ?last - for gpt-like models
|
| 36 |
+
_POOLERS = {}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def register_pooler(cls):
|
| 40 |
+
"""Decorator registering pooler class"""
|
| 41 |
+
_POOLERS[_camel2snake(cls.__name__)] = cls
|
| 42 |
+
return cls
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@register_pooler
|
| 46 |
+
class MeanPooler(nn.Module):
|
| 47 |
+
"""Mean pooling"""
|
| 48 |
+
|
| 49 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 50 |
+
masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
|
| 51 |
+
return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@register_pooler
|
| 55 |
+
class MaxPooler(nn.Module):
|
| 56 |
+
"""Max pooling"""
|
| 57 |
+
|
| 58 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 59 |
+
masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
|
| 60 |
+
return masked_output.max(1).values
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@register_pooler
|
| 64 |
+
class ClsPooler(nn.Module):
|
| 65 |
+
"""CLS token pooling"""
|
| 66 |
+
|
| 67 |
+
def __init__(self, use_pooler_output=True):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.cls_token_position = 0
|
| 70 |
+
self.use_pooler_output = use_pooler_output
|
| 71 |
+
|
| 72 |
+
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
|
| 73 |
+
|
| 74 |
+
if self.use_pooler_output and isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and (x.pooler_output is not None):
|
| 75 |
+
return x.pooler_output
|
| 76 |
+
|
| 77 |
+
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class HFTextEncoder(nn.Module):
|
| 81 |
+
"""HuggingFace model adapter"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, model_name_or_path: str, output_dim: int, tokenizer_name: str = None, config: PretrainedConfig = None, pooler_type: str = None, proj: str = None, pretrained: bool = True, masked_language_modeling: bool = False):
|
| 84 |
+
super().__init__()
|
| 85 |
+
|
| 86 |
+
self.output_dim = output_dim
|
| 87 |
+
|
| 88 |
+
# TODO: find better way to get this information
|
| 89 |
+
uses_transformer_pooler = pooler_type == "cls_pooler"
|
| 90 |
+
|
| 91 |
+
if transformers is None:
|
| 92 |
+
raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
|
| 93 |
+
if config is None:
|
| 94 |
+
self.config = AutoConfig.from_pretrained(model_name_or_path)
|
| 95 |
+
if masked_language_modeling:
|
| 96 |
+
create_func, model_args = (AutoModelForMaskedLM.from_pretrained, model_name_or_path) if pretrained else (AutoModelForMaskedLM.from_config, self.config)
|
| 97 |
+
else:
|
| 98 |
+
create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (AutoModel.from_config, self.config)
|
| 99 |
+
# TODO: do all model configs have this attribute? PretrainedConfig does so yes??
|
| 100 |
+
if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
|
| 101 |
+
self.transformer = create_func(model_args)
|
| 102 |
+
self.transformer = self.transformer.encoder
|
| 103 |
+
else:
|
| 104 |
+
self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
|
| 105 |
+
else:
|
| 106 |
+
self.config = config
|
| 107 |
+
if masked_language_modeling:
|
| 108 |
+
self.transformer = AutoModelForMaskedLM.from_config(config)
|
| 109 |
+
else:
|
| 110 |
+
self.transformer = AutoModel.from_config(config)
|
| 111 |
+
|
| 112 |
+
if pooler_type is None: # get default arch pooler
|
| 113 |
+
self.pooler = _POOLERS[(arch_dict[self.config.model_type]["pooler"])]()
|
| 114 |
+
else:
|
| 115 |
+
self.pooler = _POOLERS[pooler_type]()
|
| 116 |
+
|
| 117 |
+
d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
|
| 118 |
+
if (d_model == output_dim) and (proj is None): # do we always need a proj?
|
| 119 |
+
self.proj = nn.Identity()
|
| 120 |
+
elif proj == "linear":
|
| 121 |
+
self.proj = nn.Linear(d_model, output_dim, bias=False)
|
| 122 |
+
elif proj == "mlp":
|
| 123 |
+
hidden_size = (d_model + output_dim) // 2
|
| 124 |
+
self.proj = nn.Sequential(
|
| 125 |
+
nn.Linear(d_model, hidden_size, bias=False),
|
| 126 |
+
nn.GELU(),
|
| 127 |
+
nn.Linear(hidden_size, output_dim, bias=False),
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
# self.itm_proj = nn.Linear(d_model, 2, bias=False)
|
| 131 |
+
# self.mlm_proj = nn.Linear(d_model, self.config.vocab_size), bias=False)
|
| 132 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
| 133 |
+
|
| 134 |
+
# def forward_itm(self, x:TensorType, image_embeds:TensorType) -> TensorType:
|
| 135 |
+
# image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
|
| 136 |
+
# attn_mask = (x != self.config.pad_token_id).long()
|
| 137 |
+
# out = self.transformer(
|
| 138 |
+
# input_ids=x,
|
| 139 |
+
# attention_mask=attn_mask,
|
| 140 |
+
# encoder_hidden_states = image_embeds,
|
| 141 |
+
# encoder_attention_mask = image_atts,
|
| 142 |
+
# )
|
| 143 |
+
# pooled_out = self.pooler(out, attn_mask)
|
| 144 |
+
|
| 145 |
+
# return self.itm_proj(pooled_out)
|
| 146 |
+
|
| 147 |
+
def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
|
| 148 |
+
if masked_indices is None:
|
| 149 |
+
masked_indices = torch.bernoulli(probability_matrix).bool()
|
| 150 |
+
|
| 151 |
+
masked_indices[input_ids == self.tokenizer.pad_token_id] = False
|
| 152 |
+
masked_indices[input_ids == self.tokenizer.cls_token_id] = False
|
| 153 |
+
|
| 154 |
+
if targets is not None:
|
| 155 |
+
targets[~masked_indices] = -100 # We only compute loss on masked tokens
|
| 156 |
+
|
| 157 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
| 158 |
+
indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
|
| 159 |
+
input_ids[indices_replaced] = self.tokenizer.mask_token_id
|
| 160 |
+
|
| 161 |
+
# 10% of the time, we replace masked input tokens with random word
|
| 162 |
+
indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
| 163 |
+
random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
|
| 164 |
+
input_ids[indices_random] = random_words[indices_random]
|
| 165 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
| 166 |
+
|
| 167 |
+
if targets is not None:
|
| 168 |
+
return input_ids, targets
|
| 169 |
+
else:
|
| 170 |
+
return input_ids
|
| 171 |
+
|
| 172 |
+
def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
|
| 173 |
+
labels = input_ids.clone()
|
| 174 |
+
attn_mask = (input_ids != self.config.pad_token_id).long()
|
| 175 |
+
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(input_ids.device)
|
| 176 |
+
vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
|
| 177 |
+
probability_matrix = torch.full(labels.shape, mlm_probability)
|
| 178 |
+
input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels, probability_matrix=probability_matrix)
|
| 179 |
+
mlm_output = self.transformer(
|
| 180 |
+
input_ids,
|
| 181 |
+
attention_mask=attn_mask,
|
| 182 |
+
encoder_hidden_states=image_embeds,
|
| 183 |
+
encoder_attention_mask=image_atts,
|
| 184 |
+
return_dict=True,
|
| 185 |
+
labels=labels,
|
| 186 |
+
)
|
| 187 |
+
return mlm_output.loss
|
| 188 |
+
# mlm_output = self.transformer(input_ids,
|
| 189 |
+
# attention_mask = attn_mask,
|
| 190 |
+
# encoder_hidden_states = image_embeds,
|
| 191 |
+
# encoder_attention_mask = image_atts,
|
| 192 |
+
# return_dict = True,
|
| 193 |
+
# ).last_hidden_state
|
| 194 |
+
# logits = self.mlm_proj(mlm_output)
|
| 195 |
+
|
| 196 |
+
# # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
|
| 197 |
+
# logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
|
| 198 |
+
# labels = labels[:, 1:].contiguous().view(-1)
|
| 199 |
+
|
| 200 |
+
# mlm_loss = F.cross_entropy(
|
| 201 |
+
# logits,
|
| 202 |
+
# labels,
|
| 203 |
+
# # label_smoothing=0.1,
|
| 204 |
+
# )
|
| 205 |
+
# return mlm_loss
|
| 206 |
+
|
| 207 |
+
def forward(self, x: TensorType) -> TensorType:
|
| 208 |
+
attn_mask = (x != self.config.pad_token_id).long()
|
| 209 |
+
out = self.transformer(input_ids=x, attention_mask=attn_mask)
|
| 210 |
+
pooled_out = self.pooler(out, attn_mask)
|
| 211 |
+
|
| 212 |
+
return self.proj(pooled_out)
|
| 213 |
+
|
| 214 |
+
def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
|
| 215 |
+
if not unlocked_layers: # full freezing
|
| 216 |
+
for n, p in self.transformer.named_parameters():
|
| 217 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 218 |
+
return
|
| 219 |
+
|
| 220 |
+
encoder = self.transformer.encoder if hasattr(self.transformer, "encoder") else self.transformer
|
| 221 |
+
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 222 |
+
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
|
| 223 |
+
embeddings = getattr(self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
|
| 224 |
+
modules = [embeddings, *layer_list][:-unlocked_layers]
|
| 225 |
+
# freeze layers
|
| 226 |
+
for module in modules:
|
| 227 |
+
for n, p in module.named_parameters():
|
| 228 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 229 |
+
|
| 230 |
+
@torch.jit.ignore
|
| 231 |
+
def set_grad_checkpointing(self, enable=True):
|
| 232 |
+
self.transformer.gradient_checkpointing_enable()
|
| 233 |
+
|
| 234 |
+
def get_num_layers(self):
|
| 235 |
+
encoder = self.transformer.encoder if hasattr(self.transformer, "encoder") else self.transformer
|
| 236 |
+
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 237 |
+
return len(layer_list)
|
| 238 |
+
|
| 239 |
+
def init_parameters(self):
|
| 240 |
+
pass
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1280,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 32,
|
| 6 |
+
"width": 4096,
|
| 7 |
+
"head_width": 128,
|
| 8 |
+
"mlp_ratio": 5,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-8b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"qkv_bias": false,
|
| 13 |
+
"xattn": true,
|
| 14 |
+
"postnorm": false,
|
| 15 |
+
"fusedLN": false,
|
| 16 |
+
"use_rms_norm": true
|
| 17 |
+
},
|
| 18 |
+
"text_cfg": {
|
| 19 |
+
"context_length": 77,
|
| 20 |
+
"vocab_size": 49408,
|
| 21 |
+
"width": 1280,
|
| 22 |
+
"heads": 20,
|
| 23 |
+
"layers": 32,
|
| 24 |
+
"xattn": false,
|
| 25 |
+
"fusedLN": false
|
| 26 |
+
}
|
| 27 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" CLIP tokenizer
|
| 2 |
+
|
| 3 |
+
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gzip
|
| 7 |
+
import html
|
| 8 |
+
import os
|
| 9 |
+
from functools import lru_cache
|
| 10 |
+
from typing import Union, List
|
| 11 |
+
|
| 12 |
+
import ftfy
|
| 13 |
+
import regex as re
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
# https://stackoverflow.com/q/62691279
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@lru_cache()
|
| 23 |
+
def default_bpe():
|
| 24 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@lru_cache()
|
| 28 |
+
def bytes_to_unicode():
|
| 29 |
+
"""
|
| 30 |
+
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 31 |
+
The reversible bpe codes work on unicode strings.
|
| 32 |
+
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
| 33 |
+
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
| 34 |
+
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
| 35 |
+
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
| 36 |
+
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 37 |
+
"""
|
| 38 |
+
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
| 39 |
+
cs = bs[:]
|
| 40 |
+
n = 0
|
| 41 |
+
for b in range(2**8):
|
| 42 |
+
if b not in bs:
|
| 43 |
+
bs.append(b)
|
| 44 |
+
cs.append(2**8 + n)
|
| 45 |
+
n += 1
|
| 46 |
+
cs = [chr(n) for n in cs]
|
| 47 |
+
return dict(zip(bs, cs))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_pairs(word):
|
| 51 |
+
"""Return set of symbol pairs in a word.
|
| 52 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 53 |
+
"""
|
| 54 |
+
pairs = set()
|
| 55 |
+
prev_char = word[0]
|
| 56 |
+
for char in word[1:]:
|
| 57 |
+
pairs.add((prev_char, char))
|
| 58 |
+
prev_char = char
|
| 59 |
+
return pairs
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def basic_clean(text):
|
| 63 |
+
text = ftfy.fix_text(text)
|
| 64 |
+
text = html.unescape(html.unescape(text))
|
| 65 |
+
return text.strip()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def whitespace_clean(text):
|
| 69 |
+
text = re.sub(r"\s+", " ", text)
|
| 70 |
+
text = text.strip()
|
| 71 |
+
return text
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class SimpleTokenizer(object):
|
| 75 |
+
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
|
| 76 |
+
self.byte_encoder = bytes_to_unicode()
|
| 77 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 78 |
+
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
|
| 79 |
+
merges = merges[1 : 49152 - 256 - 2 + 1]
|
| 80 |
+
merges = [tuple(merge.split()) for merge in merges]
|
| 81 |
+
vocab = list(bytes_to_unicode().values())
|
| 82 |
+
vocab = vocab + [v + "</w>" for v in vocab]
|
| 83 |
+
for merge in merges:
|
| 84 |
+
vocab.append("".join(merge))
|
| 85 |
+
if not special_tokens:
|
| 86 |
+
special_tokens = ["<start_of_text>", "<end_of_text>"]
|
| 87 |
+
else:
|
| 88 |
+
special_tokens = ["<start_of_text>", "<end_of_text>"] + special_tokens
|
| 89 |
+
vocab.extend(special_tokens)
|
| 90 |
+
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 91 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 92 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 93 |
+
self.cache = {t: t for t in special_tokens}
|
| 94 |
+
special = "|".join(special_tokens)
|
| 95 |
+
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
| 96 |
+
|
| 97 |
+
self.vocab_size = len(self.encoder)
|
| 98 |
+
self.all_special_ids = [self.encoder[t] for t in special_tokens]
|
| 99 |
+
|
| 100 |
+
def bpe(self, token):
|
| 101 |
+
if token in self.cache:
|
| 102 |
+
return self.cache[token]
|
| 103 |
+
word = tuple(token[:-1]) + (token[-1] + "</w>",)
|
| 104 |
+
pairs = get_pairs(word)
|
| 105 |
+
|
| 106 |
+
if not pairs:
|
| 107 |
+
return token + "</w>"
|
| 108 |
+
|
| 109 |
+
while True:
|
| 110 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 111 |
+
if bigram not in self.bpe_ranks:
|
| 112 |
+
break
|
| 113 |
+
first, second = bigram
|
| 114 |
+
new_word = []
|
| 115 |
+
i = 0
|
| 116 |
+
while i < len(word):
|
| 117 |
+
try:
|
| 118 |
+
j = word.index(first, i)
|
| 119 |
+
new_word.extend(word[i:j])
|
| 120 |
+
i = j
|
| 121 |
+
except:
|
| 122 |
+
new_word.extend(word[i:])
|
| 123 |
+
break
|
| 124 |
+
|
| 125 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 126 |
+
new_word.append(first + second)
|
| 127 |
+
i += 2
|
| 128 |
+
else:
|
| 129 |
+
new_word.append(word[i])
|
| 130 |
+
i += 1
|
| 131 |
+
new_word = tuple(new_word)
|
| 132 |
+
word = new_word
|
| 133 |
+
if len(word) == 1:
|
| 134 |
+
break
|
| 135 |
+
else:
|
| 136 |
+
pairs = get_pairs(word)
|
| 137 |
+
word = " ".join(word)
|
| 138 |
+
self.cache[token] = word
|
| 139 |
+
return word
|
| 140 |
+
|
| 141 |
+
def encode(self, text):
|
| 142 |
+
bpe_tokens = []
|
| 143 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 144 |
+
for token in re.findall(self.pat, text):
|
| 145 |
+
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
|
| 146 |
+
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
|
| 147 |
+
return bpe_tokens
|
| 148 |
+
|
| 149 |
+
def decode(self, tokens):
|
| 150 |
+
text = "".join([self.decoder[token] for token in tokens])
|
| 151 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors="replace").replace("</w>", " ")
|
| 152 |
+
return text
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
_tokenizer = SimpleTokenizer()
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
|
| 159 |
+
"""
|
| 160 |
+
Returns the tokenized representation of given input string(s)
|
| 161 |
+
|
| 162 |
+
Parameters
|
| 163 |
+
----------
|
| 164 |
+
texts : Union[str, List[str]]
|
| 165 |
+
An input string or a list of input strings to tokenize
|
| 166 |
+
context_length : int
|
| 167 |
+
The context length to use; all CLIP models use 77 as the context length
|
| 168 |
+
|
| 169 |
+
Returns
|
| 170 |
+
-------
|
| 171 |
+
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
|
| 172 |
+
"""
|
| 173 |
+
if isinstance(texts, str):
|
| 174 |
+
texts = [texts]
|
| 175 |
+
|
| 176 |
+
sot_token = _tokenizer.encoder["<start_of_text>"]
|
| 177 |
+
eot_token = _tokenizer.encoder["<end_of_text>"]
|
| 178 |
+
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
| 179 |
+
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
| 180 |
+
|
| 181 |
+
for i, tokens in enumerate(all_tokens):
|
| 182 |
+
if len(tokens) > context_length:
|
| 183 |
+
tokens = tokens[:context_length] # Truncate
|
| 184 |
+
tokens[-1] = eot_token
|
| 185 |
+
result[i, : len(tokens)] = torch.tensor(tokens)
|
| 186 |
+
|
| 187 |
+
return result
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class HFTokenizer:
|
| 191 |
+
"HuggingFace tokenizer wrapper"
|
| 192 |
+
|
| 193 |
+
def __init__(self, tokenizer_name: str):
|
| 194 |
+
from transformers import AutoTokenizer
|
| 195 |
+
|
| 196 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
| 197 |
+
|
| 198 |
+
def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:
|
| 199 |
+
# same cleaning as for default tokenizer, except lowercasing
|
| 200 |
+
# adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
|
| 201 |
+
if isinstance(texts, str):
|
| 202 |
+
texts = [texts]
|
| 203 |
+
texts = [whitespace_clean(basic_clean(text)) for text in texts]
|
| 204 |
+
input_ids = self.tokenizer(texts, return_tensors="pt", max_length=context_length, padding="max_length", truncation=True).input_ids
|
| 205 |
+
return input_ids
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
# Adapted from https://github.com/baaivision/EVA/tree/master/EVA-CLIP
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from torchvision import transforms
|
| 6 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 7 |
+
from transformers.image_processing_utils import BatchFeature
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from transformers.image_transforms import convert_to_rgb
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BaseProcessor:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.transform = lambda x: x
|
| 15 |
+
return
|
| 16 |
+
|
| 17 |
+
def __call__(self, item):
|
| 18 |
+
return self.transform(item)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class EvaClipImageBaseProcessor(BaseProcessor):
|
| 22 |
+
def __init__(self, mean=None, std=None):
|
| 23 |
+
self.mean = (0.48145466, 0.4578275, 0.40821073) if mean is None else mean
|
| 24 |
+
self.std = (0.26862954, 0.26130258, 0.27577711) if std is None else std
|
| 25 |
+
|
| 26 |
+
self.normalize = transforms.Normalize(self.mean, self.std)
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def image_mean(self):
|
| 30 |
+
return self.mean
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class EvaClipImageTrainProcessor(EvaClipImageBaseProcessor):
|
| 34 |
+
def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0):
|
| 35 |
+
super().__init__(mean=mean, std=std)
|
| 36 |
+
|
| 37 |
+
self.transform = transforms.Compose(
|
| 38 |
+
[
|
| 39 |
+
convert_to_rgb,
|
| 40 |
+
transforms.Resize(
|
| 41 |
+
image_size,
|
| 42 |
+
interpolation=InterpolationMode.BICUBIC,
|
| 43 |
+
),
|
| 44 |
+
transforms.CenterCrop(image_size),
|
| 45 |
+
transforms.ToTensor(),
|
| 46 |
+
self.normalize,
|
| 47 |
+
]
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
self.image_size = image_size
|
| 51 |
+
|
| 52 |
+
def preprocess(self, images, return_tensors):
|
| 53 |
+
if isinstance(images, Image.Image):
|
| 54 |
+
images = [images]
|
| 55 |
+
else:
|
| 56 |
+
assert isinstance(images, list)
|
| 57 |
+
|
| 58 |
+
transformed_images = [self.transform(image).numpy() for image in images]
|
| 59 |
+
data = {"pixel_values": transformed_images}
|
| 60 |
+
|
| 61 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
| 62 |
+
|
| 63 |
+
def __call__(self, item):
|
| 64 |
+
return self.transform(item)
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def crop_size(self):
|
| 68 |
+
return {"height": self.image_size, "width": self.image_size}
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def size(self):
|
| 72 |
+
return {"shortest_edge": self.image_size}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 768,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 336,
|
| 5 |
+
"layers": 24,
|
| 6 |
+
"width": 1024,
|
| 7 |
+
"drop_path_rate": 0,
|
| 8 |
+
"head_width": 64,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"patch_size": 14,
|
| 11 |
+
"eva_model_name": "eva-clip-l-14-336",
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 768,
|
| 24 |
+
"heads": 12,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": false,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/imagebind.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from transformers import CLIPImageProcessor
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
from imagebind.models import imagebind_model
|
| 8 |
+
from imagebind.models.imagebind_model import ModalityType
|
| 9 |
+
from imagebind.data import load_and_transform_audio_data
|
| 10 |
+
except ImportError:
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ImageBindWrapper(nn.Module):
|
| 15 |
+
def __init__(self, vision_tower, select_layer, select_feature="patch", delay_load=False):
|
| 16 |
+
super().__init__()
|
| 17 |
+
|
| 18 |
+
self.is_loaded = False
|
| 19 |
+
|
| 20 |
+
self.vision_tower_name = vision_tower
|
| 21 |
+
self.select_layer = select_layer
|
| 22 |
+
self.select_feature = select_feature
|
| 23 |
+
|
| 24 |
+
if not delay_load:
|
| 25 |
+
self.load_model()
|
| 26 |
+
|
| 27 |
+
def load_model(self):
|
| 28 |
+
self.image_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
| 29 |
+
self.vision_tower = imagebind_model.imagebind_huge(pretrained=True)
|
| 30 |
+
for p in self.vision_tower.parameters():
|
| 31 |
+
p.requires_grad = False
|
| 32 |
+
self.vision_tower.eval()
|
| 33 |
+
self.is_loaded = True
|
| 34 |
+
|
| 35 |
+
def train(self, mode=True):
|
| 36 |
+
self.training = mode
|
| 37 |
+
|
| 38 |
+
if self.is_loaded:
|
| 39 |
+
self.vision_tower.eval()
|
| 40 |
+
|
| 41 |
+
@torch.no_grad()
|
| 42 |
+
def forward(self, x):
|
| 43 |
+
if type(x) == dict:
|
| 44 |
+
if x["audios"] is not None:
|
| 45 |
+
inputs = {ModalityType.AUDIO: load_and_transform_audio_data(x["audios"], device=self.device).half()}
|
| 46 |
+
embeddings = self.vision_tower(inputs)
|
| 47 |
+
audio_embedding = embeddings[ModalityType.AUDIO]
|
| 48 |
+
return audio_embedding.unsqueeze(1)
|
| 49 |
+
else:
|
| 50 |
+
inputs = {ModalityType.VISION: x.to(dtype=self.dtype)}
|
| 51 |
+
embeddings = self.vision_tower(inputs)
|
| 52 |
+
vision_embedding = embeddings[ModalityType.VISION]
|
| 53 |
+
if vision_embedding.ndim == 2:
|
| 54 |
+
return vision_embedding.unsqueeze(1)
|
| 55 |
+
if vision_embedding.shape[1] == 257:
|
| 56 |
+
return vision_embedding[:, 1:]
|
| 57 |
+
raise ValueError(f"Unexpected shape: {vision_embedding.shape}")
|
| 58 |
+
|
| 59 |
+
@property
|
| 60 |
+
def dummy_feature(self):
|
| 61 |
+
return torch.zeros(1, 1024, device=self.device, dtype=self.dtype)
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def dtype(self):
|
| 65 |
+
return self.vision_tower.modality_preprocessors.vision.cls_token.dtype
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def device(self):
|
| 69 |
+
return self.vision_tower.modality_preprocessors.vision.cls_token.device
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def hidden_size(self):
|
| 73 |
+
return 1024
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/open_clip_encoder.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from transformers import CLIPImageProcessor
|
| 4 |
+
from llava.utils import rank0_print
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import open_clip
|
| 8 |
+
import torchvision
|
| 9 |
+
from open_clip.transformer import _expand_token
|
| 10 |
+
except ImportError:
|
| 11 |
+
print("OpenCLIP not installed")
|
| 12 |
+
open_clip = None
|
| 13 |
+
|
| 14 |
+
HIDDEN_SIZE_DICT = {
|
| 15 |
+
"ViT-H-14-378-quickgelu": 1280,
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class OpenCLIPVisionTower(nn.Module):
|
| 20 |
+
def __init__(self, vision_tower, args, delay_load=False):
|
| 21 |
+
super().__init__()
|
| 22 |
+
|
| 23 |
+
self.is_loaded = False
|
| 24 |
+
self.model_name = vision_tower.replace("open_clip_hub:", "")
|
| 25 |
+
self.pretrained = args.vision_tower_pretrained
|
| 26 |
+
self.select_layer = args.mm_vision_select_layer
|
| 27 |
+
self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
|
| 28 |
+
|
| 29 |
+
if not delay_load:
|
| 30 |
+
rank0_print(f"Loading vision tower: {vision_tower}")
|
| 31 |
+
self.load_model()
|
| 32 |
+
elif getattr(args, "unfreeze_mm_vision_tower", False):
|
| 33 |
+
# TODO: better detector is needed.
|
| 34 |
+
rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
|
| 35 |
+
self.load_model()
|
| 36 |
+
elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts:
|
| 37 |
+
rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
|
| 38 |
+
self.load_model()
|
| 39 |
+
|
| 40 |
+
def load_model(self, device_map="auto"):
|
| 41 |
+
rank0_print(f"Loading OpenCLIP model: {self.model_name}")
|
| 42 |
+
rank0_print(f"Pretrained: {self.pretrained}")
|
| 43 |
+
vision_tower, _, image_processor = open_clip.create_model_and_transforms(model_name=self.model_name, pretrained=self.pretrained, precision="fp32", device="cuda")
|
| 44 |
+
|
| 45 |
+
resize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Resize)][0]
|
| 46 |
+
normalize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Normalize)][0]
|
| 47 |
+
self.resize_transform_size = resize_transform.size # 224 or 384
|
| 48 |
+
self.patch_size = vision_tower.visual.conv1.kernel_size[0] # 14 or 16
|
| 49 |
+
|
| 50 |
+
self.image_processor = CLIPImageProcessor.from_pretrained(
|
| 51 |
+
"openai/clip-vit-large-patch14",
|
| 52 |
+
crop_size=resize_transform.size,
|
| 53 |
+
size={"shortest_edge": resize_transform.size},
|
| 54 |
+
image_mean=list(normalize_transform.mean),
|
| 55 |
+
image_std=list(normalize_transform.std),
|
| 56 |
+
)
|
| 57 |
+
rank0_print(f"Loaded image processor: {self.image_processor}")
|
| 58 |
+
self.vision_tower = vision_tower.visual
|
| 59 |
+
self.vision_tower.requires_grad_(False)
|
| 60 |
+
|
| 61 |
+
self.is_loaded = True
|
| 62 |
+
|
| 63 |
+
def feature_select(self, image_forward_outs):
|
| 64 |
+
image_features = image_forward_outs[self.select_layer]
|
| 65 |
+
if self.select_feature == "patch":
|
| 66 |
+
image_features = image_features[:, 1:]
|
| 67 |
+
elif self.select_feature == "cls_patch":
|
| 68 |
+
image_features = image_features
|
| 69 |
+
elif self.select_feature == "conv_flatten":
|
| 70 |
+
image_features = image_features.flatten(2).transpose(1, 2)
|
| 71 |
+
else:
|
| 72 |
+
raise ValueError(f"Unexpected select feature: {self.select_feature}")
|
| 73 |
+
return image_features
|
| 74 |
+
|
| 75 |
+
def forward_visual(self, x, output_hidden_states=False):
|
| 76 |
+
if hasattr(self.vision_tower, "trunk") and hasattr(self.vision_tower.trunk, "_intermediate_layers"):
|
| 77 |
+
return self.vision_tower.trunk._intermediate_layers(x, abs(self.select_layer))
|
| 78 |
+
else:
|
| 79 |
+
|
| 80 |
+
def forward_openclip(self, x: torch.Tensor):
|
| 81 |
+
features = []
|
| 82 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 83 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 84 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 85 |
+
|
| 86 |
+
# class embeddings and positional embeddings
|
| 87 |
+
x = torch.cat(
|
| 88 |
+
[_expand_token(self.class_embedding, x.shape[0]).to(x.dtype), x],
|
| 89 |
+
dim=1,
|
| 90 |
+
)
|
| 91 |
+
# shape = [*, grid ** 2 + 1, width]
|
| 92 |
+
x = x + self.positional_embedding.to(x.dtype)
|
| 93 |
+
|
| 94 |
+
x = self.patch_dropout(x)
|
| 95 |
+
x = self.ln_pre(x)
|
| 96 |
+
|
| 97 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 98 |
+
for r in self.transformer.resblocks:
|
| 99 |
+
x = r(x, attn_mask=None)
|
| 100 |
+
features.append(x)
|
| 101 |
+
return features
|
| 102 |
+
|
| 103 |
+
return forward_openclip(self.vision_tower, x)
|
| 104 |
+
|
| 105 |
+
def forward(self, images):
|
| 106 |
+
if type(images) is list:
|
| 107 |
+
image_features = []
|
| 108 |
+
for image in images:
|
| 109 |
+
image_forward_out = self.forward_visual(image.to(self.dtype).unsqueeze(0), output_hidden_states=True)
|
| 110 |
+
image_feature = self.feature_select(image_forward_out).to(image.dtype)
|
| 111 |
+
image_features.append(image_feature)
|
| 112 |
+
else:
|
| 113 |
+
image_forward_outs = self.forward_visual(images.to(self.dtype), output_hidden_states=True)
|
| 114 |
+
image_features = self.feature_select(image_forward_outs).to(images.dtype)
|
| 115 |
+
|
| 116 |
+
return image_features
|
| 117 |
+
|
| 118 |
+
@property
|
| 119 |
+
def dummy_feature(self):
|
| 120 |
+
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
|
| 121 |
+
|
| 122 |
+
@property
|
| 123 |
+
def dtype(self):
|
| 124 |
+
if hasattr(self.vision_tower, "conv1"):
|
| 125 |
+
return self.vision_tower.conv1.weight.dtype
|
| 126 |
+
if hasattr(self.vision_tower, "trunk"):
|
| 127 |
+
return self.vision_tower.trunk.patch_embed.proj.weight.dtype
|
| 128 |
+
raise NotImplementedError
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def device(self):
|
| 132 |
+
if hasattr(self.vision_tower, "conv1"):
|
| 133 |
+
return self.vision_tower.conv1.weight.device
|
| 134 |
+
if hasattr(self.vision_tower, "trunk"):
|
| 135 |
+
return self.vision_tower.trunk.patch_embed.proj.weight.device
|
| 136 |
+
raise NotImplementedError
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def config(self):
|
| 140 |
+
return None
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def hidden_size(self):
|
| 144 |
+
if self.model_name in HIDDEN_SIZE_DICT:
|
| 145 |
+
return HIDDEN_SIZE_DICT[self.model_name]
|
| 146 |
+
else:
|
| 147 |
+
raise NotImplementedError
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def num_patches(self):
|
| 151 |
+
image_size = self.resize_transform_size if isinstance(self.resize_transform_size, int) else self.resize_transform_size[0]
|
| 152 |
+
_num_patches = (image_size // self.patch_size) ** 2
|
| 153 |
+
if "cls_patch" in self.select_feature:
|
| 154 |
+
_num_patches += 1
|
| 155 |
+
return _num_patches
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def image_size(self):
|
| 159 |
+
return self.resize_transform_size
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def num_patches_per_side(self):
|
| 163 |
+
return self.resize_transform_size // self.patch_size
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/adapt_spatial_resampler.cpython-310.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/llava_mlp.cpython-310.pyc
ADDED
|
Binary file (2.95 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/mlp.cpython-310.pyc
ADDED
|
Binary file (7.36 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/percive_sampler.cpython-310.pyc
ADDED
|
Binary file (7.79 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/adapt_spatial_resampler.py
ADDED
|
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Alibaba Cloud.
|
| 2 |
+
#
|
| 3 |
+
# This source code is licensed under the license found in the
|
| 4 |
+
# LICENSE file in the root directory of this source tree.
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import math
|
| 8 |
+
import requests
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from functools import partial
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import Callable, Optional, Sequence, Tuple, List, Union
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
from torch.nn import functional as F
|
| 18 |
+
from torch.nn.init import trunc_normal_
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
from torchvision.transforms import InterpolationMode
|
| 21 |
+
|
| 22 |
+
from llava.slice_process import slice_image_feature_minicpm
|
| 23 |
+
import torchvision.ops.roi_align as RoIAlign
|
| 24 |
+
from einops import rearrange
|
| 25 |
+
import time
|
| 26 |
+
# from llava.model.multimodal_encoder.hubconf import featup
|
| 27 |
+
|
| 28 |
+
def get_abs_pos(abs_pos, tgt_size):
|
| 29 |
+
# abs_pos: L, C
|
| 30 |
+
# tgt_size: (H, W)
|
| 31 |
+
# return: M, C
|
| 32 |
+
src_size = int(math.sqrt(abs_pos.size(0)))
|
| 33 |
+
dtype = abs_pos.dtype
|
| 34 |
+
|
| 35 |
+
return F.interpolate(
|
| 36 |
+
abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
|
| 37 |
+
size=(tgt_size[0], tgt_size[1]),
|
| 38 |
+
mode="bicubic",
|
| 39 |
+
align_corners=False,
|
| 40 |
+
).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
|
| 44 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
| 45 |
+
"""
|
| 46 |
+
grid_size: int of the grid height and width
|
| 47 |
+
return:
|
| 48 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
| 49 |
+
"""
|
| 50 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
| 51 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
| 52 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 53 |
+
grid = np.stack(grid, axis=0)
|
| 54 |
+
|
| 55 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
| 56 |
+
|
| 57 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 58 |
+
if cls_token:
|
| 59 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
| 60 |
+
return pos_embed
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
| 64 |
+
assert embed_dim % 2 == 0
|
| 65 |
+
|
| 66 |
+
# use half of dimensions to encode grid_h
|
| 67 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
| 68 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
| 69 |
+
|
| 70 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
| 71 |
+
return emb
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
| 75 |
+
"""
|
| 76 |
+
embed_dim: output dimension for each position
|
| 77 |
+
pos: a list of positions to be encoded: size (M,)
|
| 78 |
+
out: (M, D)
|
| 79 |
+
"""
|
| 80 |
+
assert embed_dim % 2 == 0
|
| 81 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
| 82 |
+
omega /= embed_dim / 2.
|
| 83 |
+
omega = 1. / 10000 ** omega # (D/2,)
|
| 84 |
+
|
| 85 |
+
pos = pos.reshape(-1) # (M,)
|
| 86 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
| 87 |
+
|
| 88 |
+
emb_sin = np.sin(out) # (M, D/2)
|
| 89 |
+
emb_cos = np.cos(out) # (M, D/2)
|
| 90 |
+
|
| 91 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
| 92 |
+
return emb
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class AdaptSpatialResampler(nn.Module):
|
| 96 |
+
"""
|
| 97 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 98 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 99 |
+
Outputs:
|
| 100 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
config,
|
| 106 |
+
grid_size,
|
| 107 |
+
embed_dim,
|
| 108 |
+
num_heads,
|
| 109 |
+
kv_dim=None,
|
| 110 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6)
|
| 111 |
+
):
|
| 112 |
+
super().__init__()
|
| 113 |
+
self.config = config
|
| 114 |
+
self.grid_size = grid_size
|
| 115 |
+
self.num_queries = grid_size ** 2
|
| 116 |
+
self.embed_dim = embed_dim
|
| 117 |
+
self.num_heads = num_heads
|
| 118 |
+
self.mm_hidden_size = self.config.mm_hidden_size
|
| 119 |
+
self.feature_scale_mask = getattr(self.config, 'feature_scale_mask', 7)
|
| 120 |
+
vision_tower = getattr(self.config, 'mm_vision_tower', '')
|
| 121 |
+
self.vision_tower_name = 'clip-large'
|
| 122 |
+
if 'clip' in vision_tower:
|
| 123 |
+
self.vision_tower_name = 'clip-large'
|
| 124 |
+
elif 'siglip' in vision_tower:
|
| 125 |
+
self.vision_tower_name = 'siglip'
|
| 126 |
+
|
| 127 |
+
self.pos_embed = nn.Parameter(
|
| 128 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
| 129 |
+
).requires_grad_(False)
|
| 130 |
+
|
| 131 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 132 |
+
trunc_normal_(self.query, std=.02)
|
| 133 |
+
|
| 134 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
| 135 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
| 136 |
+
else:
|
| 137 |
+
self.kv_proj = nn.Identity()
|
| 138 |
+
|
| 139 |
+
if self.feature_scale_mask & 8:
|
| 140 |
+
self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='8x')
|
| 141 |
+
elif self.feature_scale_mask & 4:
|
| 142 |
+
self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='4x')
|
| 143 |
+
elif self.feature_scale_mask & 2:
|
| 144 |
+
self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='2x')
|
| 145 |
+
|
| 146 |
+
# four learnable expert embeddings
|
| 147 |
+
self.feature_1x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
|
| 148 |
+
self.feature_2x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
|
| 149 |
+
self.feature_4x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
|
| 150 |
+
self.feature_8x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
|
| 151 |
+
|
| 152 |
+
# It is a 144 diverse embedding, not
|
| 153 |
+
self.query_1 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 154 |
+
trunc_normal_(self.query_1, std=.02)
|
| 155 |
+
|
| 156 |
+
self.query_2 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 157 |
+
trunc_normal_(self.query_2, std=.02)
|
| 158 |
+
|
| 159 |
+
self.query_3 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 160 |
+
trunc_normal_(self.query_3, std=.02)
|
| 161 |
+
|
| 162 |
+
self.query_4 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 163 |
+
trunc_normal_(self.query_4, std=.02)
|
| 164 |
+
|
| 165 |
+
self.features_1x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
|
| 166 |
+
self.features_2x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
|
| 167 |
+
self.features_4x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
|
| 168 |
+
self.features_8x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
|
| 169 |
+
|
| 170 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
| 171 |
+
self.ln_q = norm_layer(embed_dim)
|
| 172 |
+
self.ln_kv = norm_layer(embed_dim)
|
| 173 |
+
self.ln_proj = norm_layer(embed_dim)
|
| 174 |
+
self.ln_post = norm_layer(embed_dim)
|
| 175 |
+
self.cat_proj = nn.Linear(4*embed_dim, embed_dim)
|
| 176 |
+
self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
|
| 177 |
+
|
| 178 |
+
self.apply(self._init_weights)
|
| 179 |
+
|
| 180 |
+
def _init_weights(self, m):
|
| 181 |
+
if isinstance(m, nn.Linear):
|
| 182 |
+
trunc_normal_(m.weight, std=.02)
|
| 183 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 184 |
+
nn.init.constant_(m.bias, 0)
|
| 185 |
+
elif isinstance(m, nn.LayerNorm):
|
| 186 |
+
nn.init.constant_(m.bias, 0)
|
| 187 |
+
nn.init.constant_(m.weight, 1.0)
|
| 188 |
+
|
| 189 |
+
def cal_best_pooling_size(self, feature_wh_ratio=1.0):
|
| 190 |
+
candidate_pooling_sizes = [
|
| 191 |
+
(4, 2), (3, 2), (4, 3), (3, 3),
|
| 192 |
+
(2, 4), (2, 3), (3, 4)
|
| 193 |
+
] # w, h
|
| 194 |
+
log_feature_wh_ratio = math.log(feature_wh_ratio)
|
| 195 |
+
best_pooling_size = (3, 3) # w, h
|
| 196 |
+
min_error = float("inf")
|
| 197 |
+
for candidate_pooling_size in candidate_pooling_sizes:
|
| 198 |
+
w, h = candidate_pooling_size
|
| 199 |
+
error = abs(log_feature_wh_ratio - math.log(w/h))
|
| 200 |
+
if error < min_error:
|
| 201 |
+
best_pooling_size = (h, w)
|
| 202 |
+
min_error = error
|
| 203 |
+
return best_pooling_size
|
| 204 |
+
|
| 205 |
+
def adapt_unfold(self, input_embeds, spatial_size=(24, 24), best_grid=(1, 1), sampler_bins=1):
|
| 206 |
+
# input_embeds: bs, n, c
|
| 207 |
+
# spatial_size: feature map height, width
|
| 208 |
+
# sampler_bins越大,采样点越多,细节越多
|
| 209 |
+
input_embeds = input_embeds.permute(0, 3,1,2)
|
| 210 |
+
|
| 211 |
+
resample_regions, best_grid, wh_ratio = slice_image_feature_minicpm(input_embeds, self.num_queries)
|
| 212 |
+
|
| 213 |
+
output_size = self.cal_best_pooling_size(wh_ratio)
|
| 214 |
+
aligned_feature = RoIAlign(input_embeds.float(), resample_regions.float(), output_size,
|
| 215 |
+
spatial_scale=1.0).to(dtype=input_embeds.dtype)
|
| 216 |
+
unfold_input_embeds = aligned_feature.flatten(-2).permute(0, 2, 1)
|
| 217 |
+
# bs*N, c, h, w -> bs*N,c,h*w -> bs*N, h*w, c
|
| 218 |
+
return unfold_input_embeds
|
| 219 |
+
|
| 220 |
+
def unfold(self, input_embeds, spatial_size=(24, 24), kernel_size=2, stride=2):
|
| 221 |
+
# input_embeds: bs, n, c
|
| 222 |
+
# spatial_size: feature map height, width
|
| 223 |
+
input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
|
| 224 |
+
unfold_func = nn.Unfold(kernel_size=kernel_size, stride=stride)
|
| 225 |
+
unfold_input_embeds = unfold_func(input_embeds) # bs, c* k**2, l
|
| 226 |
+
unfold_input_embeds = unfold_input_embeds.unflatten(1, [-1, kernel_size ** 2]).permute(0, 3, 2, 1).flatten(0, 1)
|
| 227 |
+
# bs, c*k**2, l -> bs, c, k**2, l -> bs, l, k**2, c -> bs*l, k**2, c
|
| 228 |
+
return unfold_input_embeds
|
| 229 |
+
|
| 230 |
+
def forward(self, x, tgt_size=(24, 24), attn_mask=None):
|
| 231 |
+
dtype = x.dtype
|
| 232 |
+
bs = x.shape[0]
|
| 233 |
+
key_height, key_width = tgt_size
|
| 234 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
x = self.ln_kv(self.kv_proj(x))
|
| 238 |
+
|
| 239 |
+
q = self.ln_q(self.query) #[:num_valid_query]
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
|
| 243 |
+
key = x + key_pos_embed[None].to(dtype=dtype)
|
| 244 |
+
value = x
|
| 245 |
+
|
| 246 |
+
query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
|
| 247 |
+
key = self.adapt_unflod(key, spatial_size=(key_height, key_width))
|
| 248 |
+
value = self.adapt_unflod(value, spatial_size=(key_height, key_width))
|
| 249 |
+
|
| 250 |
+
out, attn_weights = self.attn(
|
| 251 |
+
query.permute(1, 0, 2),
|
| 252 |
+
key.permute(1, 0, 2),
|
| 253 |
+
value.permute(1, 0, 2),
|
| 254 |
+
attn_mask=attn_mask
|
| 255 |
+
)
|
| 256 |
+
# out->1, bs*l, c
|
| 257 |
+
x = out[0].unflatten(0, [bs, -1]) # bs, l, c
|
| 258 |
+
x = self.ln_post(x)
|
| 259 |
+
x = x @ self.proj
|
| 260 |
+
return x
|
| 261 |
+
|
| 262 |
+
def forward_with_muti_res(self, feature_1x, feature_2x, feature_4x, feature_8x, tgt_size=(24, 24), attn_mask=None, dtype=torch.bfloat16):
|
| 263 |
+
"""Prepare KV in a 4*9 manner"""
|
| 264 |
+
muti_res_feat_keys = []
|
| 265 |
+
muti_res_feat_values = []
|
| 266 |
+
bs = 1
|
| 267 |
+
|
| 268 |
+
feature_list = [feature_1x, feature_2x, feature_4x, feature_8x]
|
| 269 |
+
embedding_list = [self.feature_1x_embedding, self.feature_2x_embedding, self.feature_4x_embedding, self.feature_8x_embedding]
|
| 270 |
+
projector_list = [self.features_1x_projector, self.features_2x_projector, self.features_4x_projector, self.features_8x_projector]
|
| 271 |
+
|
| 272 |
+
for feature, embedding, projector in zip(feature_list, embedding_list, projector_list):
|
| 273 |
+
if feature is None:
|
| 274 |
+
continue
|
| 275 |
+
|
| 276 |
+
feature = feature.to(torch.bfloat16)
|
| 277 |
+
feature = projector(feature.permute(0,2,3,1))
|
| 278 |
+
|
| 279 |
+
key_height = feature.shape[1]
|
| 280 |
+
key_width = feature.shape[2]
|
| 281 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width)) #torch.Size([550, 4096])
|
| 282 |
+
feature = rearrange(feature,'b h w c -> b (h w) c') #torch.Size([1, 50, 44, 4096]) to torch.Size([1, 2200, 4096])
|
| 283 |
+
feature = self.ln_kv(feature) #torch.Size([1, 2304, 4096]) #torch.Size([1, 9216, 4096])
|
| 284 |
+
key = feature + key_pos_embed[None].to(dtype=dtype) + embedding.to(dtype=dtype)
|
| 285 |
+
value = feature
|
| 286 |
+
key = key.reshape(bs, key_height, key_width, self.embed_dim) #torch.Size([1, 48, 48, 4096]) #torch.Size([1, 96, 96, 4096])
|
| 287 |
+
key = self.adapt_unfold(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 288 |
+
value = value.reshape(bs, key_height, key_width, self.embed_dim)
|
| 289 |
+
value = self.adapt_unfold(value)# torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 290 |
+
muti_res_feat_keys.append(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 291 |
+
muti_res_feat_values.append(value)
|
| 292 |
+
|
| 293 |
+
muti_res_feat_keys = torch.cat(muti_res_feat_keys, dim=1) # (144, 36, 5120)
|
| 294 |
+
muti_res_feat_values = torch.cat(muti_res_feat_values, dim=1) # (144, 36, 5120)
|
| 295 |
+
|
| 296 |
+
# achor2 = time.time() - start #0.38
|
| 297 |
+
# print(f'kv: {achor2}')
|
| 298 |
+
|
| 299 |
+
"""Prepare Q and do attn"""
|
| 300 |
+
attn_results = []
|
| 301 |
+
for query_now in [self.query_1, self.query_2, self.query_3, self.query_4]:
|
| 302 |
+
q = self.ln_q(query_now)
|
| 303 |
+
query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
|
| 304 |
+
query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
|
| 305 |
+
|
| 306 |
+
out, attn_weights = self.attn(
|
| 307 |
+
query.permute(1, 0, 2), #torch.Size([1, 144, 4096]) #Q * B * D
|
| 308 |
+
muti_res_feat_keys.permute(1, 0, 2), #torch.Size([18, 144, 4096]) #L * B * D
|
| 309 |
+
muti_res_feat_values.permute(1, 0, 2), #torch.Size([18, 144, 4096])
|
| 310 |
+
attn_mask=attn_mask
|
| 311 |
+
)
|
| 312 |
+
#out : torch.Size([1, 144, 4096])
|
| 313 |
+
# out->1, bs*l, c
|
| 314 |
+
get = out[0].unflatten(0, [bs, -1]) # bs, l, c #torch.Size([1, 144, 4096])
|
| 315 |
+
get = self.ln_proj(get)
|
| 316 |
+
attn_results.append(get)
|
| 317 |
+
|
| 318 |
+
x = torch.cat(attn_results, dim=2) #torch.Size([1, 144, 16384])
|
| 319 |
+
x = self.cat_proj(x) #torch.Size([1, 144, 4096])
|
| 320 |
+
x = self.ln_post(x) #torch.Size([1, 144, 4096])
|
| 321 |
+
x = x @ self.proj #torch.Size([1, 144, 4096])
|
| 322 |
+
|
| 323 |
+
# achor3 = time.time() - start #0.38
|
| 324 |
+
# print(f'query: {achor3 - achor2}')
|
| 325 |
+
|
| 326 |
+
return x
|
| 327 |
+
|
| 328 |
+
def prepare_single_key_value(self, feature_1x, feature_2x, feature_4x, feature_8x, tgt_size=(24, 24), attn_mask=None, dtype=torch.bfloat16):
|
| 329 |
+
"""Prepare KV in a 4*9 manner"""
|
| 330 |
+
muti_res_feat_keys = []
|
| 331 |
+
muti_res_feat_values = []
|
| 332 |
+
bs = feature_1x.shape[0]
|
| 333 |
+
|
| 334 |
+
feature_list = [feature_1x, feature_2x, feature_4x, feature_8x]
|
| 335 |
+
embedding_list = [self.feature_1x_embedding, self.feature_2x_embedding, self.feature_4x_embedding, self.feature_8x_embedding]
|
| 336 |
+
projector_list = [self.features_1x_projector, self.features_2x_projector, self.features_4x_projector, self.features_8x_projector]
|
| 337 |
+
|
| 338 |
+
for feature, embedding, projector in zip(feature_list, embedding_list, projector_list):
|
| 339 |
+
if feature is None:
|
| 340 |
+
continue
|
| 341 |
+
|
| 342 |
+
feature = feature.to(torch.bfloat16)
|
| 343 |
+
feature = projector(feature.permute(0,2,3,1))
|
| 344 |
+
|
| 345 |
+
key_height = feature.shape[1]
|
| 346 |
+
key_width = feature.shape[2]
|
| 347 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width)) #torch.Size([550, 4096])
|
| 348 |
+
feature = rearrange(feature,'b h w c -> b (h w) c') #torch.Size([1, 50, 44, 4096]) to torch.Size([1, 2200, 4096])
|
| 349 |
+
feature = self.ln_kv(feature) #torch.Size([1, 2304, 4096]) #torch.Size([1, 9216, 4096])
|
| 350 |
+
key = feature + key_pos_embed[None].to(dtype=dtype) + embedding.to(dtype=dtype)
|
| 351 |
+
value = feature
|
| 352 |
+
key = key.reshape(bs, key_height, key_width, self.embed_dim) #torch.Size([1, 48, 48, 4096]) #torch.Size([1, 96, 96, 4096])
|
| 353 |
+
key = self.adapt_unfold(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 354 |
+
value = value.reshape(bs, key_height, key_width, self.embed_dim)
|
| 355 |
+
value = self.adapt_unfold(value)# torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 356 |
+
muti_res_feat_keys.append(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
|
| 357 |
+
muti_res_feat_values.append(value)
|
| 358 |
+
|
| 359 |
+
muti_res_feat_keys = torch.cat(muti_res_feat_keys, dim=1) # (144, 36, 5120)
|
| 360 |
+
muti_res_feat_values = torch.cat(muti_res_feat_values, dim=1) # (144, 36, 5120)
|
| 361 |
+
|
| 362 |
+
return muti_res_feat_keys, muti_res_feat_values
|
| 363 |
+
|
| 364 |
+
def query_with_parallel_attn(self, bs, key_list, value_list, dtype=torch.bfloat16):
|
| 365 |
+
"""Prepare Q and do attn"""
|
| 366 |
+
max_len = max([key.shape[1] for key in key_list]) #36
|
| 367 |
+
tgt_lengths = []
|
| 368 |
+
|
| 369 |
+
for i in range(len(key_list)):
|
| 370 |
+
for _ in range(key_list[i].shape[0] // self.num_queries):
|
| 371 |
+
tgt_lengths.append(key_list[i][0].shape[0])
|
| 372 |
+
|
| 373 |
+
padded_key_list = []
|
| 374 |
+
for key in key_list:
|
| 375 |
+
padding_size = max_len - key.shape[1]
|
| 376 |
+
padding = torch.zeros((key.shape[0], padding_size, key.shape[2]), dtype=key.dtype, device=key.device) #torch.Size([144, 36, 1024])
|
| 377 |
+
padded_key = torch.cat([key, padding], dim=1)
|
| 378 |
+
padded_key_list.append(padded_key)
|
| 379 |
+
|
| 380 |
+
padded_value_list = []
|
| 381 |
+
for value in value_list:
|
| 382 |
+
padding_size = max_len - value.shape[1]
|
| 383 |
+
padding = torch.zeros((value.shape[0], padding_size, value.shape[2]), dtype=value.dtype, device=value.device) #torch.Size([144, 36, 1024])
|
| 384 |
+
padded_value = torch.cat([value, padding], dim=1)
|
| 385 |
+
padded_value_list.append(padded_value)
|
| 386 |
+
|
| 387 |
+
padded_keys = torch.cat(padded_key_list, dim=0) # torch.Size([1440, 36, 4096])
|
| 388 |
+
padded_values = torch.cat(padded_value_list, dim=0) # torch.Size([1440, 36, 4096])
|
| 389 |
+
|
| 390 |
+
token_length = int(padded_keys.shape[0] / bs) #144
|
| 391 |
+
key_padding_mask = torch.ones((padded_keys.shape[0], max_len), dtype=torch.bool, device=key_list[0].device) #torch.Size([1440, 36])
|
| 392 |
+
for i in range(bs):
|
| 393 |
+
key_padding_mask[i*token_length : (i+1)*token_length, :tgt_lengths[i]] = False
|
| 394 |
+
|
| 395 |
+
attn_results = []
|
| 396 |
+
for query_now in [self.query_1, self.query_2, self.query_3, self.query_4]:
|
| 397 |
+
q = self.ln_q(query_now)
|
| 398 |
+
query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
|
| 399 |
+
query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1) #torch.Size([1440, 1, 4096])
|
| 400 |
+
|
| 401 |
+
out, attn_weights = self.attn( #[1, 1008, 4096]
|
| 402 |
+
query.permute(1, 0, 2), #torch.Size([1, 1008, 4096])
|
| 403 |
+
padded_keys.permute(1, 0, 2), #torch.Size([36, 1008, 4096])
|
| 404 |
+
padded_values.permute(1, 0, 2), #torch.Size([36, 1008, 4096])
|
| 405 |
+
key_padding_mask=key_padding_mask
|
| 406 |
+
)
|
| 407 |
+
# out->1, bs*l, c
|
| 408 |
+
get = out[0].unflatten(0, [bs, -1]) # bs, l, c #torch.Size([7, 144, 4096])
|
| 409 |
+
get = self.ln_proj(get)
|
| 410 |
+
attn_results.append(get)
|
| 411 |
+
|
| 412 |
+
x = torch.cat(attn_results, dim=2) #torch.Size([7, 144, 16384])
|
| 413 |
+
x = self.cat_proj(x) #torch.Size([7, 144, 4096])
|
| 414 |
+
x = self.ln_post(x) #torch.Size([7, 144, 4096])
|
| 415 |
+
x = x @ self.proj #torch.Size([7, 144, 4096])
|
| 416 |
+
|
| 417 |
+
projected_image_features = [x[i] for i in range(bs)]
|
| 418 |
+
|
| 419 |
+
return projected_image_features
|
| 420 |
+
|
| 421 |
+
def _repeat(self, query, N: int):
|
| 422 |
+
return query.unsqueeze(0).repeat(N, 1, 1)
|
| 423 |
+
|
| 424 |
+
def partition_list(self, input_list, lengths):
|
| 425 |
+
"""
|
| 426 |
+
按照指定的长度划分列表。
|
| 427 |
+
|
| 428 |
+
参数:
|
| 429 |
+
input_list (list): 要划分的原始列表。
|
| 430 |
+
lengths (list): 一个包含划分长度的整数列表。
|
| 431 |
+
|
| 432 |
+
返回:
|
| 433 |
+
list: 一个包含子列表的列表,每个子列表的长度由 lengths 指定。
|
| 434 |
+
"""
|
| 435 |
+
result = []
|
| 436 |
+
current_index = 0
|
| 437 |
+
for length in lengths:
|
| 438 |
+
if current_index + length > len(input_list):
|
| 439 |
+
raise ValueError("划分长度超过了列表的总长度")
|
| 440 |
+
sublist = input_list[current_index:current_index + length]
|
| 441 |
+
result.append(sublist)
|
| 442 |
+
current_index += length
|
| 443 |
+
if current_index != len(input_list):
|
| 444 |
+
raise ValueError("划分长度和列表总长度不一致")
|
| 445 |
+
return result
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def forward_with_featup(self, features, patch_sizes, images, num_images):
|
| 449 |
+
# achor2 = time.time() - start #0.38
|
| 450 |
+
# print(f'achor2: {achor2 - achor1}')
|
| 451 |
+
|
| 452 |
+
bs = len(images)
|
| 453 |
+
|
| 454 |
+
features_1x = [] #list torch.Size([1, 1024, 25, 22])
|
| 455 |
+
|
| 456 |
+
for i in range(len(features)):
|
| 457 |
+
h, w = patch_sizes[i]
|
| 458 |
+
|
| 459 |
+
if type(features) is list:
|
| 460 |
+
feature = features[i][:h * w, :]
|
| 461 |
+
else:
|
| 462 |
+
feature = features[i][:h * w, :].unsqueeze(0)
|
| 463 |
+
feature = feature.permute(0, 2, 1) #torch.Size([1, 1024, 25*22])
|
| 464 |
+
feature = feature.unflatten(2, [h, w]) #torch.Size([1, 1024, 25, 22])
|
| 465 |
+
features_1x.append(feature)
|
| 466 |
+
|
| 467 |
+
# 对features_1x 中的元素分组,如果连续且 shape 相同,则分为一组
|
| 468 |
+
keys = []
|
| 469 |
+
values = []
|
| 470 |
+
feat_group = []
|
| 471 |
+
image_group = []
|
| 472 |
+
for i in range(len(features_1x)):
|
| 473 |
+
if i == 0:
|
| 474 |
+
feat_group.append(features_1x[i])
|
| 475 |
+
image_group.append(images[i])
|
| 476 |
+
elif(features_1x[i].shape != features_1x[i-1].shape):
|
| 477 |
+
key, value = self.get_group_keys(feat_group, image_group)
|
| 478 |
+
keys.append(key)
|
| 479 |
+
values.append(value)
|
| 480 |
+
|
| 481 |
+
feat_group = []
|
| 482 |
+
image_group = []
|
| 483 |
+
feat_group.append(features_1x[i])
|
| 484 |
+
image_group.append(images[i])
|
| 485 |
+
else:
|
| 486 |
+
feat_group.append(features_1x[i])
|
| 487 |
+
image_group.append(images[i])
|
| 488 |
+
|
| 489 |
+
key, value = self.get_group_keys(feat_group, image_group)
|
| 490 |
+
keys.append(key)
|
| 491 |
+
values.append(value)
|
| 492 |
+
|
| 493 |
+
return self.compute_atten(bs, keys, values, num_images)
|
| 494 |
+
|
| 495 |
+
def get_group_keys(self, features_1x, image_group):
|
| 496 |
+
features_1x = torch.cat(features_1x, dim=0)
|
| 497 |
+
image_group = torch.stack(image_group, dim=0)
|
| 498 |
+
|
| 499 |
+
features_2x, features_4x, features_8x = self.upsampler.forward_with_internal_features(image_group, features_1x)
|
| 500 |
+
|
| 501 |
+
if self.feature_scale_mask & 1 == 0:
|
| 502 |
+
features_1x = None
|
| 503 |
+
if self.feature_scale_mask & 2 == 0:
|
| 504 |
+
features_2x = None
|
| 505 |
+
if self.feature_scale_mask & 4 == 0:
|
| 506 |
+
features_4x = None
|
| 507 |
+
if self.feature_scale_mask & 8 == 0:
|
| 508 |
+
features_8x = None
|
| 509 |
+
|
| 510 |
+
return self.prepare_single_key_value(features_1x, features_2x, features_4x, features_8x)
|
| 511 |
+
|
| 512 |
+
def compute_atten(self, bs, key_list, value_list, num_images):
|
| 513 |
+
projected_image_features = self.query_with_parallel_attn(bs, key_list, value_list)
|
| 514 |
+
projected_image_features = self.partition_list(projected_image_features, num_images)
|
| 515 |
+
return projected_image_features
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/llava_mlp.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Alibaba Cloud.
|
| 2 |
+
#
|
| 3 |
+
# This source code is licensed under the license found in the
|
| 4 |
+
# LICENSE file in the root directory of this source tree.
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import math
|
| 8 |
+
import requests
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from functools import partial
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import Callable, Optional, Sequence, Tuple, List, Union
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
from torch.nn import functional as F
|
| 18 |
+
from torch.nn.init import trunc_normal_
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
from torchvision.transforms import InterpolationMode
|
| 21 |
+
from einops import rearrange
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class LLaVA_MLP(nn.Module):
|
| 27 |
+
"""
|
| 28 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 29 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 30 |
+
Outputs:
|
| 31 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
config,
|
| 37 |
+
embed_dim,
|
| 38 |
+
kv_dim=None,
|
| 39 |
+
):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.embed_dim = embed_dim
|
| 42 |
+
self.config = config
|
| 43 |
+
|
| 44 |
+
self.proj = nn.Sequential(
|
| 45 |
+
nn.Linear(kv_dim, embed_dim),
|
| 46 |
+
nn.GELU(),
|
| 47 |
+
nn.Linear(embed_dim, embed_dim),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def forward(self, x, tgt_size=(24, 24)):
|
| 51 |
+
x = x[:, :tgt_size[0] * tgt_size[1], :]
|
| 52 |
+
return self.proj(x)
|
| 53 |
+
|
| 54 |
+
class LLaVA_MLP_norm(nn.Module):
|
| 55 |
+
"""
|
| 56 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 57 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 58 |
+
Outputs:
|
| 59 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
config,
|
| 65 |
+
embed_dim,
|
| 66 |
+
kv_dim=None,
|
| 67 |
+
):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.embed_dim = embed_dim
|
| 70 |
+
self.config = config
|
| 71 |
+
|
| 72 |
+
self.proj = nn.Sequential(
|
| 73 |
+
nn.Linear(kv_dim, embed_dim),
|
| 74 |
+
nn.GELU(),
|
| 75 |
+
nn.Linear(embed_dim, embed_dim),
|
| 76 |
+
)
|
| 77 |
+
self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
|
| 78 |
+
|
| 79 |
+
def forward(self, x, tgt_size=(24, 24)):
|
| 80 |
+
x = x[:, :tgt_size[0] * tgt_size[1], :]
|
| 81 |
+
x = self.proj(x)
|
| 82 |
+
x = self.norm(x)
|
| 83 |
+
return x
|
| 84 |
+
|
| 85 |
+
class LLaVA_MLP_Fused(nn.Module):
|
| 86 |
+
"""
|
| 87 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 88 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 89 |
+
Outputs:
|
| 90 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
def __init__(
|
| 94 |
+
self,
|
| 95 |
+
config,
|
| 96 |
+
embed_dim,
|
| 97 |
+
kv_dim=None,
|
| 98 |
+
):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.embed_dim = embed_dim
|
| 101 |
+
self.config = config
|
| 102 |
+
self.proj = nn.Sequential(
|
| 103 |
+
nn.Linear(3 * kv_dim, embed_dim),
|
| 104 |
+
nn.GELU(),
|
| 105 |
+
nn.Linear(embed_dim, embed_dim),
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
def forward(self, x, tgt_size=(24, 24)):
|
| 109 |
+
image_features, fused_features = x
|
| 110 |
+
image_features = image_features[:, :tgt_size[0] * tgt_size[1], :]
|
| 111 |
+
image_features = torch.cat((image_features, fused_features), dim=0)
|
| 112 |
+
image_features = rearrange(image_features, 'm n d -> n (m d)')
|
| 113 |
+
return self.proj(image_features).unsqueeze(0)
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/pooler_projector.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
import math
|
| 5 |
+
|
| 6 |
+
from transformers.models.clip.modeling_clip import CLIPVisionModel
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PoolerProjector(nn.Module):
|
| 10 |
+
def __init__(self, config, vision_cfg):
|
| 11 |
+
super().__init__()
|
| 12 |
+
self._config = config
|
| 13 |
+
self.hw = vision_cfg.image_size // vision_cfg.patch_size
|
| 14 |
+
|
| 15 |
+
self.conv_pool = nn.Conv2d(config.mm_hidden_size, config.hidden_size, kernel_size=2, stride=2)
|
| 16 |
+
|
| 17 |
+
self.proj = nn.Sequential(
|
| 18 |
+
nn.GELU(),
|
| 19 |
+
nn.Linear(config.hidden_size, config.hidden_size),
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
def forward(self, x, *args, **kwargs):
|
| 23 |
+
height = width = self.hw
|
| 24 |
+
assert height * width == x.shape[1]
|
| 25 |
+
x = x.view(x.shape[0], height, width, -1).permute(0, 3, 1, 2)
|
| 26 |
+
x = self.conv_pool(x)
|
| 27 |
+
x = x.flatten(2).transpose(1, 2)
|
| 28 |
+
x = self.proj(x)
|
| 29 |
+
return x
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def config(self):
|
| 33 |
+
return {"mm_projector_type": "pooler"}
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/uhd_v1_resampler.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Alibaba Cloud.
|
| 2 |
+
#
|
| 3 |
+
# This source code is licensed under the license found in the
|
| 4 |
+
# LICENSE file in the root directory of this source tree.
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import math
|
| 8 |
+
import requests
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from functools import partial
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import Callable, Optional, Sequence, Tuple, List, Union
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
from torch.nn import functional as F
|
| 18 |
+
from torch.nn.init import trunc_normal_
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
from torchvision.transforms import InterpolationMode
|
| 21 |
+
|
| 22 |
+
from llava.slice_process import slice_image_feature_minicpm
|
| 23 |
+
import torchvision.ops.roi_align as RoIAlign
|
| 24 |
+
|
| 25 |
+
def get_abs_pos(abs_pos, tgt_size):
|
| 26 |
+
# abs_pos: L, C
|
| 27 |
+
# tgt_size: (H, W)
|
| 28 |
+
# return: M, C
|
| 29 |
+
src_size = int(math.sqrt(abs_pos.size(0)))
|
| 30 |
+
dtype = abs_pos.dtype
|
| 31 |
+
|
| 32 |
+
return F.interpolate(
|
| 33 |
+
abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
|
| 34 |
+
size=(tgt_size[0], tgt_size[1]),
|
| 35 |
+
mode="bicubic",
|
| 36 |
+
align_corners=False,
|
| 37 |
+
).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
|
| 41 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
| 42 |
+
"""
|
| 43 |
+
grid_size: int of the grid height and width
|
| 44 |
+
return:
|
| 45 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
| 46 |
+
"""
|
| 47 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
| 48 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
| 49 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 50 |
+
grid = np.stack(grid, axis=0)
|
| 51 |
+
|
| 52 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
| 53 |
+
|
| 54 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 55 |
+
if cls_token:
|
| 56 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
| 57 |
+
return pos_embed
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
| 61 |
+
assert embed_dim % 2 == 0
|
| 62 |
+
|
| 63 |
+
# use half of dimensions to encode grid_h
|
| 64 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
| 65 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
| 66 |
+
|
| 67 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
| 68 |
+
return emb
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
| 72 |
+
"""
|
| 73 |
+
embed_dim: output dimension for each position
|
| 74 |
+
pos: a list of positions to be encoded: size (M,)
|
| 75 |
+
out: (M, D)
|
| 76 |
+
"""
|
| 77 |
+
assert embed_dim % 2 == 0
|
| 78 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
| 79 |
+
omega /= embed_dim / 2.
|
| 80 |
+
omega = 1. / 10000 ** omega # (D/2,)
|
| 81 |
+
|
| 82 |
+
pos = pos.reshape(-1) # (M,)
|
| 83 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
| 84 |
+
|
| 85 |
+
emb_sin = np.sin(out) # (M, D/2)
|
| 86 |
+
emb_cos = np.cos(out) # (M, D/2)
|
| 87 |
+
|
| 88 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
| 89 |
+
return emb
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class AdaptSpatialResampler_v1(nn.Module):
|
| 93 |
+
"""
|
| 94 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 95 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 96 |
+
Outputs:
|
| 97 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
grid_size,
|
| 103 |
+
embed_dim,
|
| 104 |
+
num_heads,
|
| 105 |
+
kv_dim=None,
|
| 106 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6)
|
| 107 |
+
):
|
| 108 |
+
super().__init__()
|
| 109 |
+
self.grid_size = grid_size
|
| 110 |
+
self.num_queries = grid_size ** 2
|
| 111 |
+
self.embed_dim = embed_dim
|
| 112 |
+
self.num_heads = num_heads
|
| 113 |
+
|
| 114 |
+
self.pos_embed = nn.Parameter(
|
| 115 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
| 116 |
+
).requires_grad_(False)
|
| 117 |
+
|
| 118 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 119 |
+
trunc_normal_(self.query, std=.02)
|
| 120 |
+
|
| 121 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
| 122 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
| 123 |
+
else:
|
| 124 |
+
self.kv_proj = nn.Identity()
|
| 125 |
+
|
| 126 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
| 127 |
+
self.ln_q = norm_layer(embed_dim)
|
| 128 |
+
self.ln_kv = norm_layer(embed_dim)
|
| 129 |
+
|
| 130 |
+
self.ln_post = norm_layer(embed_dim)
|
| 131 |
+
self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
|
| 132 |
+
|
| 133 |
+
self.apply(self._init_weights)
|
| 134 |
+
|
| 135 |
+
def _init_weights(self, m):
|
| 136 |
+
if isinstance(m, nn.Linear):
|
| 137 |
+
trunc_normal_(m.weight, std=.02)
|
| 138 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 139 |
+
nn.init.constant_(m.bias, 0)
|
| 140 |
+
elif isinstance(m, nn.LayerNorm):
|
| 141 |
+
nn.init.constant_(m.bias, 0)
|
| 142 |
+
nn.init.constant_(m.weight, 1.0)
|
| 143 |
+
|
| 144 |
+
def cal_best_pooling_size(self, feature_wh_ratio=1.0):
|
| 145 |
+
candidate_pooling_sizes = [
|
| 146 |
+
(4, 2), (3, 2), (4, 3), (3, 3),
|
| 147 |
+
(2, 4), (2, 3), (3, 4)
|
| 148 |
+
] # w, h
|
| 149 |
+
log_feature_wh_ratio = math.log(feature_wh_ratio)
|
| 150 |
+
best_pooling_size = (3, 3) # w, h
|
| 151 |
+
min_error = float("inf")
|
| 152 |
+
for candidate_pooling_size in candidate_pooling_sizes:
|
| 153 |
+
w, h = candidate_pooling_size
|
| 154 |
+
error = abs(log_feature_wh_ratio - math.log(w/h))
|
| 155 |
+
if error < min_error:
|
| 156 |
+
best_pooling_size = (h, w)
|
| 157 |
+
min_error = error
|
| 158 |
+
return best_pooling_size
|
| 159 |
+
|
| 160 |
+
def adapt_unflod(self, input_embeds, spatial_size=(24, 24), best_grid=(1, 1), sampler_bins=1):
|
| 161 |
+
# input_embeds: bs, n, c
|
| 162 |
+
# spatial_size: feature map height, width
|
| 163 |
+
# sampler_bins越大,采样点越多,细节越多
|
| 164 |
+
input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
|
| 165 |
+
resample_regions, best_grid, wh_ratio = slice_image_feature_minicpm(input_embeds, self.num_queries)
|
| 166 |
+
|
| 167 |
+
output_size = self.cal_best_pooling_size(wh_ratio)
|
| 168 |
+
aligned_feature = RoIAlign(input_embeds.float(), resample_regions.float(), output_size,
|
| 169 |
+
spatial_scale=1.0).to(dtype=input_embeds.dtype)
|
| 170 |
+
unfold_input_embeds = aligned_feature.flatten(-2).permute(0, 2, 1)
|
| 171 |
+
# bs*N, c, h, w -> bs*N,c,h*w -> bs*N, h*w, c
|
| 172 |
+
return unfold_input_embeds
|
| 173 |
+
|
| 174 |
+
def unfold(self, input_embeds, spatial_size=(24, 24), kernel_size=2, stride=2):
|
| 175 |
+
# input_embeds: bs, n, c
|
| 176 |
+
# spatial_size: feature map height, width
|
| 177 |
+
input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
|
| 178 |
+
unfold_func = nn.Unfold(kernel_size=kernel_size, stride=stride)
|
| 179 |
+
unfold_input_embeds = unfold_func(input_embeds) # bs, c* k**2, l
|
| 180 |
+
unfold_input_embeds = unfold_input_embeds.unflatten(1, [-1, kernel_size ** 2]).permute(0, 3, 2, 1).flatten(0, 1)
|
| 181 |
+
# bs, c*k**2, l -> bs, c, k**2, l -> bs, l, k**2, c -> bs*l, k**2, c
|
| 182 |
+
return unfold_input_embeds
|
| 183 |
+
|
| 184 |
+
def forward(self, x, tgt_size=(24, 24), attn_mask=None):
|
| 185 |
+
x = x.to(torch.bfloat16)
|
| 186 |
+
dtype = x.dtype
|
| 187 |
+
bs = x.shape[0]
|
| 188 |
+
key_height, key_width = tgt_size
|
| 189 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
x = self.ln_kv(self.kv_proj(x))
|
| 193 |
+
|
| 194 |
+
q = self.ln_q(self.query) #[:num_valid_query]
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
|
| 198 |
+
key = x + key_pos_embed[None].to(dtype=dtype)
|
| 199 |
+
value = x
|
| 200 |
+
|
| 201 |
+
query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
|
| 202 |
+
key = self.adapt_unflod(key, spatial_size=(key_height, key_width))
|
| 203 |
+
value = self.adapt_unflod(value, spatial_size=(key_height, key_width))
|
| 204 |
+
|
| 205 |
+
out, attn_weights = self.attn(
|
| 206 |
+
query.permute(1, 0, 2),
|
| 207 |
+
key.permute(1, 0, 2),
|
| 208 |
+
value.permute(1, 0, 2),
|
| 209 |
+
attn_mask=attn_mask
|
| 210 |
+
)
|
| 211 |
+
# out->1, bs*l, c
|
| 212 |
+
x = out[0].unflatten(0, [bs, -1]) # bs, l, c
|
| 213 |
+
x = self.ln_post(x)
|
| 214 |
+
x = x @ self.proj
|
| 215 |
+
return x
|
| 216 |
+
|
| 217 |
+
def _repeat(self, query, N: int):
|
| 218 |
+
return query.unsqueeze(0).repeat(N, 1, 1)
|
VLMEvalKit-sudoku/llava/model/utils.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def auto_upgrade(config):
|
| 5 |
+
cfg = AutoConfig.from_pretrained(config)
|
| 6 |
+
if "llava" in config and "llava" not in cfg.model_type:
|
| 7 |
+
assert cfg.model_type == "llama"
|
| 8 |
+
print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.")
|
| 9 |
+
print("You must upgrade the checkpoint to the new code base (this can be done automatically).")
|
| 10 |
+
confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
|
| 11 |
+
if confirm.lower() in ["y", "yes"]:
|
| 12 |
+
print("Upgrading checkpoint...")
|
| 13 |
+
assert len(cfg.architectures) == 1
|
| 14 |
+
setattr(cfg.__class__, "model_type", "llava")
|
| 15 |
+
cfg.architectures[0] = "LlavaLlamaForCausalLM"
|
| 16 |
+
cfg.save_pretrained(config)
|
| 17 |
+
print("Checkpoint upgraded.")
|
| 18 |
+
else:
|
| 19 |
+
print("Checkpoint upgrade aborted.")
|
| 20 |
+
exit(1)
|