diff --git a/.gitattributes b/.gitattributes
index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..5de611dc266438ef25adab126aecc4eaf80f9960 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
# Video files - compressed
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.webm filter=lfs diff=lfs merge=lfs -text
+eval_results/GNE_ShapeGrid_sudoku.xlsx filter=lfs diff=lfs merge=lfs -text
diff --git a/VLMEvalKit-sudoku/assets/apple.jpg b/VLMEvalKit-sudoku/assets/apple.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a54fdf847d603f7ed44956810d4c18d5d054996c
--- /dev/null
+++ b/VLMEvalKit-sudoku/assets/apple.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdedbfa6896ecdeff000235019adf36199caeafa6eafc8474dea4824e143599b
+size 4576
diff --git a/VLMEvalKit-sudoku/docs/en/.readthedocs.yaml b/VLMEvalKit-sudoku/docs/en/.readthedocs.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c6cf8e2a075ea15f39dc7aba8faa98f464f52fe6
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/.readthedocs.yaml
@@ -0,0 +1,17 @@
+version: 2
+
+# Set the version of Python and other tools you might need
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.8"
+
+formats:
+ - epub
+
+sphinx:
+ configuration: docs/en/conf.py
+
+python:
+ install:
+ - requirements: requirements/docs.txt
diff --git a/VLMEvalKit-sudoku/docs/en/Contributors.md b/VLMEvalKit-sudoku/docs/en/Contributors.md
new file mode 100644
index 0000000000000000000000000000000000000000..ddf50c6c4eb7caf352fe29069e65a93a2d4cac49
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/Contributors.md
@@ -0,0 +1,21 @@
+# Contributors
+
+## Contributors w. 3+ Major Contributions
+
+> In this section, we list all the contributors who have made significant contributions (3+) to the development of VLMEvalKit.
+
+New Qualified Contributors (2024.09):
+
+1. [amitbcp](https://github.com/amitbcp): The contributor helped support MUIRBench, Phi-3.5, Idefics3, VILA, and xGen-MM
+2. [czczup](https://github.com/czczup): The contributor helped support the InternVL Series (V1.5, Mini-InternVL, V2, etc.)
+3. [DseidLi](https://github.com/DseidLi): The contributor helped support LLaVA-OneVision, GQA, and developed the readthedocs site for VLMEvalKit
+4. [mayubo2333](https://github.com/mayubo2333): The contributor helped support MMLongBench, SlideVQA, and DUDE
+5. [sun-hailong](https://github.com/sun-hailong): The contributor helped support A-OKVQA, Parrot, MMMB, and MTL-MMBench
+6. [PhoenixZ810](https://github.com/PhoenixZ810): The contributor helped support Video-ChatGPT, Chat-UniVI, and Llama-VID
+7. [Cuiunbo](https://github.com/Cuiunbo): The contributor helped support OmniLMM-12B, MiniCPM-V Series (V1, V2, V2.5)
+
+## Full Contributor List
+
+> In this section, we list all the contributors as well as their corresponding contributions to the development of VLMEvalKit.
+
+TBD.
diff --git a/VLMEvalKit-sudoku/docs/en/EvalByLMDeploy.md b/VLMEvalKit-sudoku/docs/en/EvalByLMDeploy.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc0a8c38c26542eb44acdc74b28aaca9755735ba
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/EvalByLMDeploy.md
@@ -0,0 +1,27 @@
+# Using LMDeploy to Accelerate Evaluation and Inference
+
+VLMEvalKit supports testing VLM models deployed by LMDeploy. Below, we use InternVL2-8B as an example to show how to test the model.
+
+## Step 0: Install LMDeploy
+
+```bash
+pip install lmdeploy
+```
+For other installation methods, you can refer to LMDeploy's [documentation](https://github.com/InternLM/lmdeploy).
+
+## Step 1: Start the Inference Service
+
+```bash
+lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B
+```
+> [!IMPORTANT]
+> Since models in VLMEvalKit may have custom behaviors when building prompts for different datasets, such as InternVL2's handling of HallusionBench, it is necessary to specify `--model-name` when starting the server. This allows the VLMEvalKit to select appropriate prompt construction strategy based on the name when using the LMDeploy API.
+>
+> If `--server-port`, is specified, the corresponding environment variable `LMDEPLOY_API_BASE` needs to be set.
+
+
+## Step 2: Evaluation
+
+```bash
+python run.py --data MMStar --model lmdeploy --verbose --api-nproc 64
+```
diff --git a/VLMEvalKit-sudoku/docs/en/_templates/autosummary/class.rst b/VLMEvalKit-sudoku/docs/en/_templates/autosummary/class.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4c3a7a9abf5c5b14ac3ef3b00a2f070480295358
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/_templates/autosummary/class.rst
@@ -0,0 +1,13 @@
+.. role:: hidden
+ :class: hidden-section
+.. currentmodule:: {{ module }}
+
+
+{{ name | underline}}
+
+.. autoclass:: {{ name }}
+ :members:
+
+..
+ autogenerated from _templates/autosummary/class.rst
+ note it does not have :inherited-members:
diff --git a/VLMEvalKit-sudoku/docs/en/conf.py b/VLMEvalKit-sudoku/docs/en/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..360c1622dd18fcca8c033af9122383cd66c5f686
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/conf.py
@@ -0,0 +1,234 @@
+# flake8: noqa
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import ast
+import subprocess
+import sys
+
+import pytorch_sphinx_theme
+from sphinx.builders.html import StandaloneHTMLBuilder
+
+sys.path.insert(0, os.path.abspath('../../'))
+
+# -- Project information -----------------------------------------------------
+
+project = 'VLMEvalKit'
+copyright = '2023, VLMEvalKit'
+author = 'VLMEvalKit Authors'
+
+# The full version, including alpha/beta/rc tags
+version_file = '../../vlmeval/__init__.py'
+
+
+def get_version():
+ with open(version_file, 'r') as f:
+ file_content = f.read()
+ # Parse the file content into an abstract syntax tree (AST)
+ tree = ast.parse(file_content, filename=version_file)
+
+ # Iterate through the body of the AST, looking for an assignment to __version__
+ for node in tree.body:
+ if isinstance(node, ast.Assign):
+ for target in node.targets:
+ if isinstance(target, ast.Name) and target.id == '__version__':
+ return node.value.s
+ raise ValueError('__version__ not found')
+
+
+release = get_version()
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.viewcode',
+ 'myst_parser',
+ 'sphinx_copybutton',
+ 'sphinx_tabs.tabs',
+ 'notfound.extension',
+ 'sphinxcontrib.jquery',
+ 'sphinx_design',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+source_suffix = {
+ '.rst': 'restructuredtext',
+ '.md': 'markdown',
+}
+
+language = 'en'
+
+# The master toctree document.
+root_doc = 'index'
+html_context = {
+ 'github_version': 'latest',
+}
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'pytorch_sphinx_theme'
+html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# yapf: disable
+html_theme_options = {
+ 'menu': [
+ {
+ 'name': 'GitHub',
+ 'url': 'https://github.com/open-compass/VLMEvalKit'
+ },
+ ],
+ # Specify the language of shared menu
+ 'menu_lang': 'en',
+ # Disable the default edit on GitHub
+ 'default_edit_on_github': False,
+}
+# yapf: enable
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+html_css_files = [
+ 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css',
+ 'css/readthedocs.css'
+]
+html_js_files = [
+ 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js',
+ 'js/custom.js'
+]
+
+# -- Options for HTMLHelp output ---------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'vlmevalkitdoc'
+
+# -- Options for LaTeX output ------------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author,
+ 'manual'),
+]
+
+# -- Options for manual page output ------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author],
+ 1)]
+
+# -- Options for Texinfo output ----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author,
+ 'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.',
+ 'Miscellaneous'),
+]
+
+# -- Options for Epub output -------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#
+# epub_identifier = ''
+
+# A unique identification for the text.
+#
+# epub_uid = ''
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+# set priority when building html
+StandaloneHTMLBuilder.supported_image_types = [
+ 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg'
+]
+
+# -- Extension configuration -------------------------------------------------
+# Ignore >>> when copying code
+copybutton_prompt_text = r'>>> |\.\.\. '
+copybutton_prompt_is_regexp = True
+
+# Auto-generated header anchors
+myst_heading_anchors = 3
+# Enable "colon_fence" extension of myst.
+myst_enable_extensions = ['colon_fence', 'dollarmath']
+
+# Configuration for intersphinx
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+ 'numpy': ('https://numpy.org/doc/stable', None),
+ 'torch': ('https://pytorch.org/docs/stable/', None),
+ 'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None),
+ 'transformers':
+ ('https://huggingface.co/docs/transformers/main/en/', None),
+}
+napoleon_custom_sections = [
+ # Custom sections for data elements.
+ ('Meta fields', 'params_style'),
+ ('Data fields', 'params_style'),
+]
+
+# Disable docstring inheritance
+autodoc_inherit_docstrings = False
+# Mock some imports during generate API docs.
+autodoc_mock_imports = ['rich', 'attr', 'einops']
+# Disable displaying type annotations, these can be very verbose
+autodoc_typehints = 'none'
+
+# The not found page
+notfound_template = '404.html'
diff --git a/VLMEvalKit-sudoku/docs/en/docutils.conf b/VLMEvalKit-sudoku/docs/en/docutils.conf
new file mode 100644
index 0000000000000000000000000000000000000000..0c00c84688701117f231fd0c8ec295fb747b7d8f
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/en/docutils.conf
@@ -0,0 +1,2 @@
+[html writers]
+table_style: colwidths-auto
diff --git a/VLMEvalKit-sudoku/docs/ja/README_ja.md b/VLMEvalKit-sudoku/docs/ja/README_ja.md
new file mode 100644
index 0000000000000000000000000000000000000000..5bf9564b098bec3748712b150d555ef963c400b9
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/ja/README_ja.md
@@ -0,0 +1,117 @@
+
+
+**VLMEvalKit**(pythonパッケージ名は**vlmeval**)は、**大規模視覚言語モデル(LVLMs)**の**オープンソース評価ツールキット**です。このツールキットは、複数のリポジトリでのデータ準備という重労働なしに、さまざまなベンチマークでLVLMsの**ワンコマンド評価**を可能にします。VLMEvalKitでは、すべてのLVLMsに対して**生成ベースの評価**を採用し、**正確なマッチング**と**LLMベースの回答抽出**の両方で得られた評価結果を提供します。
+
+PS: 日本語の README には最新のアップデートがすべて含まれていない場合があります。英語版をご確認ください。
+
+## 📊 データセット、モデル、および評価結果
+
+**公式のマルチモーダルリーダーボードでのパフォーマンス数値は、ここからダウンロードできます!**
+
+[**OpenVLM Leaderboard**](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard): [すべての詳細な結果をダウンロード](http://opencompass.openxlab.space/assets/OpenVLM.json)。
+
+**Supported Benchmarks** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされているベンチマーク(70以上)を表示してください。
+
+**Supported LMMs** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされている LMMs(200以上)を表示してください。
+
+**Transformersバージョンの推奨事項:**
+
+特定のtransformerバージョンで一部のVLMが実行できない可能性があることに注意してください。各VLMを評価するために、以下の設定を推奨します:
+
+- **`transformers==4.33.0`を使用してください**: `Qwenシリーズ`, `Monkeyシリーズ`, `InternLM-XComposerシリーズ`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICSシリーズ`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4シリーズ`, `InstructBLIPシリーズ`, `PandaGPT`, `VXVERSE`, `GLM-4v-9B`.
+- **`transformers==4.37.0`を使用してください**: `LLaVAシリーズ`, `ShareGPT4Vシリーズ`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLMシリーズ`, `EMU2シリーズ`, `Yi-VLシリーズ`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VLシリーズ`, `InternVLシリーズ`, `Cambrianシリーズ`, `VILA-VLシリーズ`.
+- **`transformers==4.40.0`を使用してください**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`.
+- **`transformers==4.42.0`を使用してください**: `AKI`.
+- **`transformers==latest`を使用してください**: `LLaVA-Nextシリーズ`, `PaliGemma-3B`, `Chameleon-VLシリーズ`, `Video-LLaVA-7B-HF`, `Ovis1.5シリーズ`, `Mantisシリーズ`, `MiniCPM-V2.6`.
+
+```python
+# デモ
+from vlmeval.config import supported_VLM
+model = supported_VLM['idefics_9b_instruct']()
+# 単一画像のフォワード
+ret = model.generate(['assets/apple.jpg', 'この画像には何がありますか?'])
+print(ret) # この画像には葉がついた赤いリンゴがあります。
+# 複数画像のフォワード
+ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', '提供された画像にはリンゴが何個ありますか?'])
+print(ret) # 提供された画像にはリンゴが2個あります。
+```
+
+## 🏗️ クイックスタート
+
+クイックスタートガイドについては、[クイックスタート](/docs/en/Quickstart.md)を参照してください。
+
+## 🛠️ 開発ガイド
+
+カスタムベンチマーク、VLMsを開発するか、単に**VLMEvalKit**に他のコードを貢献する場合は、[開発ガイド](/docs/en/Development.md)を参照してください。
+
+コミュニティからの共有を奨励し、それに応じたクレジットを共有するために、次回のレポート更新では以下のことを実施します:
+
+- 全ての貢献に対して感謝の意を示します
+- 新しいモデル、評価セット、または主要な機能への3つ以上の主要な貢献を持つ貢献者は、テクニカルレポートの著者リストに加わることができます。適格な貢献者は、issueを作成するか、または[VLM評価キット ディスコードチャンネル](https://discord.com/invite/evDT4GZmxN)で kennyutc にDMを送ることができます。私たちはそれに応じてフォローアップします。
+
+## 🎯 VLMEvalKitの目標
+
+**このコードベースは以下を目的として設計されています:**
+
+1. 研究者や開発者が既存のLVLMsを評価し、評価結果を**簡単に再現できるようにする**ための**使いやすい**、**オープンソースの評価ツールキット**を提供します。
+2. VLMの開発者が自分のモデルを簡単に評価できるようにします。複数のサポートされているベンチマークでVLMを評価するには、単一の`generate_inner()`関数を**実装するだけで**、他のすべてのワークロード(データのダウンロード、データの前処理、予測の推論、メトリックの計算)はコードベースによって処理されます。
+
+**このコードベースは以下を目的として設計されていません:**
+
+1. すべての**第三者ベンチマーク**の元の論文で報告された正確な精度数値を再現すること。その理由は2つあります:
+ 1. VLMEvalKitは、すべてのVLMに対して**生成ベースの評価**を使用します(オプションで**LLMベースの回答抽出**を使用)。一方、一部のベンチマークは異なるアプローチを使用する場合があります(SEEDBenchはPPLベースの評価を使用します)。これらのベンチマークについては、対応する結果で両方のスコアを比較します。開発者には、コードベースで他の評価パラダイムをサポートすることをお勧めします。
+ 2. デフォルトでは、すべてのVLMに対して同じプロンプトテンプレートを使用してベンチマークを評価します。一方、**一部のVLMには特定のプロンプトテンプレートがある**場合があります(現時点ではコードベースでカバーされていない場合があります)。VLMの開発者には、現在カバーされていない場合でも、VLMEvalKitで独自のプロンプトテンプレートを実装することをお勧めします。これにより、再現性が向上します。
+
+## 🖊️ 引用
+
+この作業が役立つ場合は、このリポジトリに**スター🌟**を付けてください。サポートありがとうございます!
+
+[](https://github.com/open-compass/VLMEvalKit/stargazers)
+
+研究でVLMEvalKitを使用する場合、または公開されたオープンソースの評価結果を参照する場合は、以下のBibTeXエントリと、使用した特定のVLM/ベンチマークに対応するBibTexエントリを使用してください。
+
+```bib
+@misc{duan2024vlmevalkit,
+ title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models},
+ author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen},
+ year={2024},
+ eprint={2407.11691},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV},
+ url={https://arxiv.org/abs/2407.11691},
+}
+```
+
+🔝Top に戻る
+
+[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors
+[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square
+[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members
+[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square
+[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues
+[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square
+[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE
+[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square
+[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers
+[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/ConfigSystem.md b/VLMEvalKit-sudoku/docs/zh-CN/ConfigSystem.md
new file mode 100644
index 0000000000000000000000000000000000000000..14e8d49564ec5956bb6b31b3bab161be2cee402b
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/ConfigSystem.md
@@ -0,0 +1,69 @@
+
+# 配置系统
+
+默认情况下,VLMEvalKit通过在`run.py`脚本中使用`--model`和`--data`参数设置模型名称(在`/vlmeval/config.py`中定义)和数据集名称(在`vlmeval/dataset/__init__.py` 或 `vlmeval/dataset/video_dataset_config.py` 中定义)来启动评估。这种方法在大多数情况下简单且高效,但当用户希望使用不同设置评估多个模型/数据集时,可能不够灵活。
+
+为了解决这个问题,VLMEvalKit提供了一个更灵活的配置系统。用户可以在json文件中指定模型和数据集设置,并通过`--config`参数将配置文件的路径传递给`run.py`脚本。以下是一个示例配置json:
+
+```json
+{
+ "model": {
+ "GPT4o_20240806_T00_HIGH": {
+ "class": "GPT4V",
+ "model": "gpt-4o-2024-08-06",
+ "temperature": 0,
+ "img_detail": "high"
+ },
+ "GPT4o_20240806_T10_Low": {
+ "class": "GPT4V",
+ "model": "gpt-4o-2024-08-06",
+ "temperature": 1.0,
+ "img_detail": "low"
+ },
+ "GPT4o_20241120": {}
+ },
+ "data": {
+ "MME-RealWorld-Lite": {
+ "class": "MMERealWorld",
+ "dataset": "MME-RealWorld-Lite"
+ },
+ "MMBench_DEV_EN_V11": {
+ "class": "ImageMCQDataset",
+ "dataset": "MMBench_DEV_EN_V11"
+ },
+ "MMBench_Video_8frame_nopack":{},
+ "Video-MME_16frame_subs": {
+ "class": "VideoMME",
+ "dataset": "Video-MME",
+ "nframe": 16,
+ "use_subtitle": true
+ }
+ }
+}
+```
+
+配置json的解释:
+
+1. 现在我们支持两个字段:`model`和`data`,每个字段都是一个字典。字典的键是模型/数据集的名称(由用户设置),值是模型/数据集的设置。
+2. 对于`model`中的项目,值是一个包含以下键的字典:
+ - `class`:模型的类名,应该是`vlmeval/vlm/__init__.py`(开源模型)或`vlmeval/api/__init__.py`(API模型)中定义的类名。
+ - 其他kwargs:其他kwargs是模型特定的参数,请参考模型类的定义以获取详细用法。例如,`model`、`temperature`、`img_detail`是`GPT4V`类的参数。值得注意的是,大多数模型类都需要`model`参数。
+ - Tip:在位于`vlmeval/config.py`的变量`supported_VLM`中的已经被定义的模型可以作为`model`的键,而不需要填对应的值即可启动。例如,`GPT4o_20240806_T00_HIGH: {}`是等价于`GPT4o_20240806_T00_HIGH: {'class': 'GPT4V', 'model': 'gpt-4o-2024-08-06', 'temperature': 0, 'img_size': -1, 'img_detail': 'high', 'retry': 10, 'verbose': False}`。
+3. 对于字典`data`,我们建议用户使用官方数据集名称作为键(或键的一部分),因为我们经常根据数据集名称确定后处理/判断设置。对于`data`中的项目,值是一个包含以下键的字典:
+ - `class`:数据集的类名,应该是`vlmeval/dataset/__init__.py`中定义的类名。
+ - 其他kwargs:其他kwargs是数据集特定的参数,请参考数据集类的定义以获取详细用法。通常,大多数数据集类都需要`dataset`参数。大多数视频数据集类都需要 `nframe` 或 `fps` 参数。
+ - Tip:在位于`vlmeval/dataset/video_dataset_config.py`的变量`supported_video_dataset`中的已经被定义的数据集可以作为`data`的键,而不需要填对应的值即可启动。例如,`MMBench_Video_8frame_nopack: {}`是等价于`MMBench_Video_8frame_nopack: {'class': 'MMBenchVideo', 'dataset': 'MMBench-Video', 'nframe': 8, 'pack': False}`。
+
+将示例配置json保存为`config.json`,您可以通过以下命令启动评估:
+
+```bash
+python run.py --config config.json
+```
+
+这将在工作目录`$WORK_DIR`下生成以下输出文件(格式为`{$WORK_DIR}/{$MODEL_NAME}/{$MODEL_NAME}_{$DATASET_NAME}_*`):
+
+- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MME-RealWorld-Lite*`
+- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MME-RealWorld-Lite*`
+- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MMBench_DEV_EN_V11*`
+- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MMBench_DEV_EN_V11*`
+......
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/EvalByLMDeploy.md b/VLMEvalKit-sudoku/docs/zh-CN/EvalByLMDeploy.md
new file mode 100644
index 0000000000000000000000000000000000000000..cdb46c70f0cc9d2620e0a98471f0c9b354472518
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/EvalByLMDeploy.md
@@ -0,0 +1,28 @@
+# 使用 LMDeploy 加速评测推理
+
+VLMEvalKit 支持测试由 LMDeploy 部署的 VLM 模型,下面以 InternVL2-8B 为例,展示如何测试模型
+
+## 第0步 安装 LMDeploy
+
+```bash
+pip install lmdeploy
+```
+
+其他安装方式可以参考 LMDeploy 的[文档](https://github.com/InternLM/lmdeploy)
+
+## 第1步 启动推理服务
+
+```bash
+lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B
+```
+> [!IMPORTANT]
+> 因为 VLMEvalKit 中的模型对于不同数据集在构建 prompt 时可能有自定义行为,如 InternVL2 对于 HallusionBench 的处理,所以,server 端在启动的时候需要指定 `--model-name`,这样在使用 LMDEploy api 时可以根据名字选择合适的 prompt 构建策略。
+>
+> 如果指定了 `--server-port`,需要设置对应的环境变量 `LMDEPLOY_API_BASE`
+
+
+## 第2步 评测
+
+```bash
+python run.py --data MMStar --model InternVL2-8B --verbose --api-nproc 64
+```
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/README_zh-CN.md b/VLMEvalKit-sudoku/docs/zh-CN/README_zh-CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..92c526fcb3d2d5766469773c9eaa51196df53b4a
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/README_zh-CN.md
@@ -0,0 +1,131 @@
+
+
+
+
+
VLMEvalKit: 一种多模态大模型评测工具
+
+[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link]
+
+[English](/README.md) | 简体中文 | [日本語](/docs/ja/README_ja.md)
+
+
🏆 OpenCompass 排行榜 •
+
🏗️ 快速开始 •
+
📊 数据集和模型 •
+
🛠️ 开发指南 •
+
🎯 我们的目标 •
+
🖊️ 引用
+
+
🤗 HuggingFace 排行榜 (存档全部性能) •
+
🤗 原始评测记录 •
+
🔊 Discord •
+
📝 技术报告
+
+
+**VLMEvalKit** (python 包名为 **vlmeval**) 是一款专为大型视觉语言模型 (Large Vision-Language Models, LVLMs) 评测而设计的开源工具包。该工具支持在各种基准测试上对大型视觉语言模型进行**一键评估**,无需进行繁重的数据准备工作,让评估过程更加简便。在 VLMEvalKit 中,我们对所有大型视觉语言模型生成的结果进行评测,并提供基于**精确匹配**与基于 **LLM 的答案提取**两种评测结果。
+
+## 🆕 更新
+
+- **[2025-04-29]** 优化 `torchrun` 启动逻辑:目前 `torchrun` 启动时,若进程数为 M,机器 GPU 卡数为 N,将会自动调整每个进程分配的 GPU 数量为 `N // M`。目前此分配方式适用于 `transformers`, `lmdeploy` 推理后端,`vllm` 推理后端仅支持使用 python 启动 🔥🔥🔥
+- **[2025-02-20]** 支持新模型:**InternVL2.5 series, QwenVL2.5 series, QVQ-72B, Doubao-VL, Janus-Pro-7B, MiniCPM-o-2.6, InternVL2-MPO, LLaVA-CoT, Hunyuan-Standard-Vision, Ovis2, Valley, SAIL-VL, Ross, Long-VITA, EMU3, SmolVLM**。支持新基准:**MMMU-Pro, WeMath, 3DSRBench, LogicVista, VL-RewardBench, CC-OCR, CG-Bench, CMMMU, WorldSense**。请参考[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)以获取更多信息。感谢社区的各位贡献者 🔥🔥🔥
+- **[2024-11-21]** 集成了一个新的配置系统,以实现更灵活的评估设置。查看[文档](/docs/zh-CN/ConfigSystem.md)或运行`python run.py --help`了解更多详情 🔥🔥🔥
+- **[2024-11-21]** 支持 **[QSpatial](https://andrewliao11.github.io/spatial_prompt/)**,一个用于定量空间推理的多模态基准(例如,确定大小/距离),感谢 **[andrewliao11](https://github.com/andrewliao11)** 提供官方支持 🔥🔥🔥
+- **[2024-11-21]** 支持 **[MM-Math](https://github.com/kge-sun/mm-math)**,一个包含约6K初中多模态推理数学问题的新多模态数学基准。GPT-4o-20240806在该基准上达到了22.5%的准确率 🔥🔥🔥
+- **[2024-11-16]** 支持 **[OlympiadBench](https://github.com/OpenBMB/OlympiadBench)**,一个多模态基准,包含奥林匹克级别的数学和物理问题 🔥🔥🔥
+- **[2024-11-16]** 支持 **[WildVision](https://huggingface.co/datasets/WildVision/wildvision-bench)**,一个基于多模态竞技场数据的主观多模态基准 🔥🔥🔥
+- **[2024-11-13]** 支持 **[MIA-Bench](https://arxiv.org/abs/2407.01509)**,一个多模态指令跟随基准 🔥🔥🔥
+- **[2024-11-08]** 支持 **[Aria](https://arxiv.org/abs/2410.05993)**,一个多模态原生 MoE 模型,感谢 **[teowu](https://github.com/teowu)** 🔥🔥🔥
+- **[2024-11-04]** 支持 **[WorldMedQA-V](https://www.arxiv.org/abs/2410.12722)**,该基准包含 1000 多个医学 VQA 问题,涵盖巴西、以色列、日本、西班牙等四个国家的语言,以及它们的英文翻译 🔥🔥🔥
+
+## 🏗️ 快速开始
+
+请参阅[**快速开始**](/docs/zh-CN/Quickstart.md)获取入门指南。
+
+## 📊 评测结果,支持的数据集和模型
+
+### 评测结果
+
+**[OpenVLM Leaderboard](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard)**: **[下载全部细粒度测试结果](http://opencompass.openxlab.space/assets/OpenVLM.json)**.
+
+请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported Benchmarks** 标签,以查看所有支持的图像和视频基准(70+)。
+
+请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported LMMs** 标签,以查看所有支持的 LMMs,包括商业 API、开源模型等(200+)。
+
+### 其他
+
+**Transformers 的版本推荐:**
+
+**请注意**,某些 VLM 可能无法在某些特定的 transformers 版本下运行,我们建议使用以下设置来评估对应的VLM:
+
+- **请用** `transformers==4.33.0` **来运行**: `Qwen series`, `Monkey series`, `InternLM-XComposer Series`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICS series`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4 series`, `InstructBLIP series`, `PandaGPT`, `VXVERSE`.
+- **请用** `transformers==4.37.0 ` **来运行**: `LLaVA series`, `ShareGPT4V series`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLM Series`, `EMU2 Series`, `Yi-VL Series`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VL series`, `InternVL series`, `Cambrian Series`, `VILA Series`, `Llama-3-MixSenseV1_1`, `Parrot-7B`, `PLLaVA Series`.
+- **请用** `transformers==4.40.0 ` **来运行**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`.
+- **请用** `transformers==4.42.0 ` **来运行**: `AKI`.
+- **请用** `transformers==latest` **来运行**: `LLaVA-Next series`, `PaliGemma-3B`, `Chameleon series`, `Video-LLaVA-7B-HF`, `Ovis series`, `Mantis series`, `MiniCPM-V2.6`, `OmChat-v2.0-13B-sinlge-beta`, `Idefics-3`, `GLM-4v-9B`, `VideoChat2-HD`.
+
+**如何测试一个 VLM 是否可以正常运行:**
+
+```python
+from vlmeval.config import supported_VLM
+model = supported_VLM['idefics_9b_instruct']()
+# 前向单张图片
+ret = model.generate(['assets/apple.jpg', 'What is in this image?'])
+print(ret) # 这张图片上有一个带叶子的红苹果
+# 前向多张图片
+ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', 'How many apples are there in the provided images? '])
+print(ret) # 提供的图片中有两个苹果
+```
+
+## 🛠️ 开发指南
+
+要开发自定义评测数据集,支持其他 VLMs,或为 VLMEvalKit 贡献代码,请参阅[**开发指南**](/docs/zh-CN/Development_zh-CN.md)。
+
+为激励来自社区的共享并分享相应的 credit,在下一次 report 更新中,我们将:
+
+- 致谢所有的 contribution
+- 具备三个或以上主要贡献 (支持新模型、评测集、或是主要特性) 的贡献者将可以加入技术报告的作者列表 。合条件的贡献者可以创建 issue 或是在 [VLMEvalKit Discord Channel](https://discord.com/invite/evDT4GZmxN) 私信 kennyutc,我们将进行跟进
+
+## 🎯 VLMEvalKit 的目标
+
+**该代码库的设计目标是:**
+
+1. 提供一个**易于使用**的**开源评估工具包**,方便研究人员和开发人员评测现有的多模态大模型,并使评测结果**易于复现**。
+2. 使 VLM 开发人员能够轻松地评测自己的模型。在多个支持的基准测试上评估 VLM,只需实现一个 `generate_inner()` 函数,所有其他工作负载(数据下载、数据预处理、预测推理、度量计算)都由代码库处理。
+
+**该代码库的设计目标不是:**
+
+复现所有**第三方基准测试**原始论文中报告的准确数字。有两个相关的原因:
+1. VLMEvalKit 对所有 VLMs 使用基于生成的评估(可选使用基于 LLM 的答案提取)。同时,一些基准测试可能官方使用不同的方法(*例如,SEEDBench 使用基于 PPL 的评估*)。对于这些基准测试,我们在相应的结果中比较两个得分。我们鼓励开发人员在代码库中支持其他评估范式。
+2. 默认情况下,我们对所有多模态模型使用相同的提示模板来评估基准测试。同时,**一些多模态模型可能有他们特定的提示模板**(目前可能未在代码库中涵盖)。我们鼓励 VLM 的开发人员在 VLMEvalKit 中实现自己的提示模板,如果目前未覆盖。这将有助于提高可复现性。
+
+## 🖊️ 引用
+
+如果我们的工作对您有所帮助,请考虑 **star🌟** VLMEvalKit。感谢支持!
+
+[](https://github.com/open-compass/VLMEvalKit/stargazers)
+
+如果您在研究中使用了 VLMEvalKit,或希望参考已发布的开源评估结果,请使用以下 BibTeX 条目以及与您使用的特定 VLM / 基准测试相对应的 BibTex 条目。
+
+```bib
+@misc{duan2024vlmevalkit,
+ title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models},
+ author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen},
+ year={2024},
+ eprint={2407.11691},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV},
+ url={https://arxiv.org/abs/2407.11691},
+}
+```
+
+🔝回到顶部
+
+[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors
+[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square
+[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members
+[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square
+[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues
+[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square
+[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE
+[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square
+[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers
+[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/_static/image/logo.svg b/VLMEvalKit-sudoku/docs/zh-CN/_static/image/logo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..043530572afb48d0eac26b4b53d448aae6e9a9af
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/_static/image/logo.svg
@@ -0,0 +1,24 @@
+
+
+
+Created with Fabric.js 5.3.0
+
+
+
+
+
+
+
+
+
+
+
+
+ VLMEvalKit
+
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/_templates/autosummary/class.rst b/VLMEvalKit-sudoku/docs/zh-CN/_templates/autosummary/class.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4c3a7a9abf5c5b14ac3ef3b00a2f070480295358
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/_templates/autosummary/class.rst
@@ -0,0 +1,13 @@
+.. role:: hidden
+ :class: hidden-section
+.. currentmodule:: {{ module }}
+
+
+{{ name | underline}}
+
+.. autoclass:: {{ name }}
+ :members:
+
+..
+ autogenerated from _templates/autosummary/class.rst
+ note it does not have :inherited-members:
diff --git a/VLMEvalKit-sudoku/docs/zh-CN/conf.py b/VLMEvalKit-sudoku/docs/zh-CN/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..689daa6177913b918b6a01fe1e1ce5a6d4ca505f
--- /dev/null
+++ b/VLMEvalKit-sudoku/docs/zh-CN/conf.py
@@ -0,0 +1,242 @@
+# flake8: noqa
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import ast
+import subprocess
+import sys
+
+import pytorch_sphinx_theme
+from sphinx.builders.html import StandaloneHTMLBuilder
+
+sys.path.insert(0, os.path.abspath('../../'))
+
+# -- Project information -----------------------------------------------------
+
+project = 'VLMEvalKit'
+copyright = '2023, VLMEvalKit'
+author = 'VLMEvalKit Authors'
+
+# The full version, including alpha/beta/rc tags
+version_file = '../../vlmeval/__init__.py'
+
+
+def get_version():
+ with open(version_file, 'r') as f:
+ file_content = f.read()
+ # Parse the file content into an abstract syntax tree (AST)
+ tree = ast.parse(file_content, filename=version_file)
+
+ # Iterate through the body of the AST, looking for an assignment to __version__
+ for node in tree.body:
+ if isinstance(node, ast.Assign):
+ for target in node.targets:
+ if isinstance(target, ast.Name) and target.id == '__version__':
+ return node.value.s
+ raise ValueError('__version__ not found')
+
+
+release = get_version()
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.viewcode',
+ 'myst_parser',
+ 'sphinx_copybutton',
+ 'sphinx_tabs.tabs',
+ 'notfound.extension',
+ 'sphinxcontrib.jquery',
+ 'sphinx_design',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+source_suffix = {
+ '.rst': 'restructuredtext',
+ '.md': 'markdown',
+}
+
+language = 'cn'
+
+# The master toctree document.
+root_doc = 'index'
+html_context = {
+ 'github_version': 'latest',
+}
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'pytorch_sphinx_theme'
+html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# yapf: disable
+html_theme_options = {
+ 'menu': [
+ {
+ 'name': 'GitHub',
+ 'url': 'https://github.com/open-compass/VLMEvalKit'
+ },
+ ],
+ # Specify the language of shared menu
+ 'menu_lang': 'cn',
+ # Disable the default edit on GitHub
+ 'default_edit_on_github': False,
+}
+# yapf: enable
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+html_css_files = [
+ 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css',
+ 'css/readthedocs.css'
+]
+html_js_files = [
+ 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js',
+ 'js/custom.js'
+]
+
+# -- Options for HTMLHelp output ---------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'vlmevalkitdoc'
+
+# -- Options for LaTeX output ------------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author,
+ 'manual'),
+]
+
+# -- Options for manual page output ------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author],
+ 1)]
+
+# -- Options for Texinfo output ----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author,
+ 'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.',
+ 'Miscellaneous'),
+]
+
+# -- Options for Epub output -------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#
+# epub_identifier = ''
+
+# A unique identification for the text.
+#
+# epub_uid = ''
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+# set priority when building html
+StandaloneHTMLBuilder.supported_image_types = [
+ 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg'
+]
+
+# -- Extension configuration -------------------------------------------------
+# Ignore >>> when copying code
+copybutton_prompt_text = r'>>> |\.\.\. '
+copybutton_prompt_is_regexp = True
+
+# Auto-generated header anchors
+myst_heading_anchors = 3
+# Enable "colon_fence" extension of myst.
+myst_enable_extensions = ['colon_fence', 'dollarmath']
+
+# Configuration for intersphinx
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+ 'numpy': ('https://numpy.org/doc/stable', None),
+ 'torch': ('https://pytorch.org/docs/stable/', None),
+ 'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None),
+ 'transformers':
+ ('https://huggingface.co/docs/transformers/main/en/', None),
+}
+napoleon_custom_sections = [
+ # Custom sections for data elements.
+ ('Meta fields', 'params_style'),
+ ('Data fields', 'params_style'),
+]
+
+# Disable docstring inheritance
+autodoc_inherit_docstrings = False
+# Mock some imports during generate API docs.
+autodoc_mock_imports = ['rich', 'attr', 'einops']
+# Disable displaying type annotations, these can be very verbose
+autodoc_typehints = 'none'
+
+# The not found page
+notfound_template = '404.html'
+
+
+def builder_inited_handler(app):
+ subprocess.run(['./cp_origin_docs.sh'])
+
+
+def setup(app):
+ app.connect('builder-inited', builder_inited_handler)
diff --git a/VLMEvalKit-sudoku/llava/eval/eval_chartqa.py b/VLMEvalKit-sudoku/llava/eval/eval_chartqa.py
new file mode 100644
index 0000000000000000000000000000000000000000..041f805b80d38e44edddaf8600e6d6ed3140eb9f
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/eval_chartqa.py
@@ -0,0 +1,74 @@
+import os
+import argparse
+import json
+import re
+import sys
+
+print(sys.path)
+
+# from mova.eval.m4c_evaluator import ChartVQAEvaluator
+from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator, STVQAAccuracyEvaluator
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--annotation-file', type=str)
+ parser.add_argument('--result-file', type=str)
+ parser.add_argument('--result-dir', type=str)
+ parser.add_argument('--mid_result', type=str)
+ parser.add_argument('--output_result', type=str)
+ return parser.parse_args()
+
+
+def eval_single(annotation_file, result_file):
+ experiment_name = os.path.splitext(os.path.basename(result_file))[0]
+ print(experiment_name)
+ # annotations = json.load(open(annotation_file))['data']
+ annotations = [
+ json.loads(q) for q in open(os.path.expanduser(annotation_file), "r")
+ ]
+ annotations = {(annotation['question_id'], annotation['question'].lower()): annotation for annotation in annotations}
+ results = [json.loads(line) for line in open(result_file)]
+
+ pred_list = []
+ mid_list = []
+ for result in results:
+ annotation = annotations[(result['question_id'], result['prompt'].lower())]
+ pred_list.append({
+ "pred_answer": result['text'].lower(),
+ "gt_answers": [annotation['answer'].lower()],
+ })
+ mid_list.append(result)
+ mid_list[-1]["annotation"] = annotation['answer']
+
+ # evaluator = ChartVQAEvaluator()
+ # acc, acc_list = evaluator.evaluate_relaxed_accuracy(pred_list)
+ # evaluator = TextVQAAccuracyEvaluator()
+ evaluator = STVQAAccuracyEvaluator()
+ acc = evaluator.eval_pred_list(pred_list)
+ acc = 100. * acc
+ # for num, d in zip(acc_list, mid_list):
+ # d["acc"] = num
+ print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), acc))
+ return len(pred_list), acc, mid_list
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ if args.result_file is not None:
+ samples, acc, mid_result = eval_single(args.annotation_file, args.result_file)
+
+ if args.result_dir is not None:
+ for result_file in sorted(os.listdir(args.result_dir)):
+ if not result_file.endswith('.jsonl'):
+ print(f'Skipping {result_file}')
+ continue
+ samples, acc, mid_result = eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
+
+ # with open(args.mid_result, 'w') as f:
+ # json.dump(mid_result, f, indent=2)
+ # output_folder = os.path.dirname(args.output_result)
+ # print(output_folder)
+ # os.makedirs(os.path.dirname(output_folder), exist_ok=True)
+ # with open(args.output_result, 'w') as f:
+ # json.dump({'samples': samples, 'acc': acc}, f, indent=2)
diff --git a/VLMEvalKit-sudoku/llava/eval/eval_pope.py b/VLMEvalKit-sudoku/llava/eval/eval_pope.py
new file mode 100644
index 0000000000000000000000000000000000000000..65a04aa9ed614626d3c9d66c85fcb5dc8ea5d7b8
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/eval_pope.py
@@ -0,0 +1,84 @@
+import os
+import json
+import argparse
+
+def eval_pope(answers, label_file):
+ label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
+
+ for answer in answers:
+ text = answer['text']
+
+ # Only keep the first sentence
+ if text.find('.') != -1:
+ text = text.split('.')[0]
+
+ text = text.replace(',', '')
+ words = text.split(' ')
+ if 'No' in words or 'not' in words or 'no' in words:
+ answer['text'] = 'no'
+ else:
+ answer['text'] = 'yes'
+
+ for i in range(len(label_list)):
+ if label_list[i] == 'no':
+ label_list[i] = 0
+ else:
+ label_list[i] = 1
+
+ pred_list = []
+ for answer in answers:
+ if answer['text'] == 'no':
+ pred_list.append(0)
+ else:
+ pred_list.append(1)
+
+ pos = 1
+ neg = 0
+ yes_ratio = pred_list.count(1) / len(pred_list)
+
+ TP, TN, FP, FN = 0, 0, 0, 0
+ for pred, label in zip(pred_list, label_list):
+ if pred == pos and label == pos:
+ TP += 1
+ elif pred == pos and label == neg:
+ FP += 1
+ elif pred == neg and label == neg:
+ TN += 1
+ elif pred == neg and label == pos:
+ FN += 1
+
+ print('TP\tFP\tTN\tFN\t')
+ print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
+
+ precision = float(TP) / float(TP + FP)
+ recall = float(TP) / float(TP + FN)
+ f1 = 2*precision*recall / (precision + recall)
+ acc = (TP + TN) / (TP + TN + FP + FN)
+ print('Accuracy: {}'.format(acc))
+ print('Precision: {}'.format(precision))
+ print('Recall: {}'.format(recall))
+ print('F1 score: {}'.format(f1))
+ print('Yes ratio: {}'.format(yes_ratio))
+ print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--annotation-dir", type=str)
+ parser.add_argument("--question-file", type=str)
+ parser.add_argument("--result-file", type=str)
+ args = parser.parse_args()
+
+ questions = [json.loads(line) for line in open(args.question_file)]
+ questions = {question['question_id']: question for question in questions}
+ answers = [json.loads(q) for q in open(args.result_file)]
+ for file in os.listdir(args.annotation_dir):
+ print(file)
+ print(answers[0]['question_id'])
+ assert file.startswith('coco_pope_')
+ assert file.endswith('.json')
+ category = file[10:-5]
+ print(category)
+ cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
+ print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
+ eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
+ print("====================================")
diff --git a/VLMEvalKit-sudoku/llava/eval/eval_science_qa_gpt4.py b/VLMEvalKit-sudoku/llava/eval/eval_science_qa_gpt4.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2ff17c915481fb556aba6ec816a9e08f519c515
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/eval_science_qa_gpt4.py
@@ -0,0 +1,104 @@
+import argparse
+import json
+import os
+import re
+import random
+from collections import defaultdict
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--base-dir', type=str)
+ parser.add_argument('--gpt4-result', type=str)
+ parser.add_argument('--our-result', type=str)
+ parser.add_argument('--split', type=str, default='test')
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
+ return parser.parse_args()
+
+
+def convert_caps(results):
+ fakecaps = []
+ for result in results:
+ image_id = result['question_id']
+ caption = result['text']
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
+ return fakecaps
+
+
+def get_pred_idx(prediction, choices, options):
+ """
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
+ """
+ if prediction in options[:len(choices)]:
+ return options.index(prediction)
+ else:
+ return random.choice(range(len(choices)))
+
+
+if __name__ == "__main__":
+ args = get_args()
+
+ base_dir = args.base_dir
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
+ our_predictions = [json.loads(line) for line in open(args.our_result)]
+ our_predictions = {pred['question_id']: pred for pred in our_predictions}
+ split_problems = {idx: problems[idx] for idx in split_indices}
+
+ gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
+
+ results = defaultdict(lambda: 0)
+
+ for prob_id, prob in split_problems.items():
+ if prob_id not in our_predictions:
+ continue
+ if prob_id not in gpt4_predictions:
+ continue
+ our_pred = our_predictions[prob_id]['text']
+ gpt4_pred = gpt4_predictions[prob_id]
+
+ pattern = re.compile(r'The answer is ([A-Z]).')
+ our_res = pattern.findall(our_pred)
+ if len(our_res) == 1:
+ our_answer = our_res[0] # 'A', 'B', ...
+ else:
+ our_answer = "FAILED"
+ gpt4_res = pattern.findall(gpt4_pred)
+ if len(gpt4_res) == 1:
+ gpt4_answer = gpt4_res[0] # 'A', 'B', ...
+ else:
+ gpt4_answer = "FAILED"
+
+ our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
+ gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
+
+ if gpt4_answer == 'FAILED':
+ results['gpt4_failed'] += 1
+ # continue
+ gpt4_pred_idx = our_pred_idx
+ # if our_pred_idx != prob['answer']:
+ # print(our_predictions[prob_id]['prompt'])
+ # print('-----------------')
+ # print(f'LECTURE: {prob["lecture"]}')
+ # print(f'SOLUTION: {prob["solution"]}')
+ # print('=====================')
+ else:
+ # continue
+ pass
+ # gpt4_pred_idx = our_pred_idx
+
+ if gpt4_pred_idx == prob['answer']:
+ results['correct'] += 1
+ else:
+ results['incorrect'] += 1
+
+
+ if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
+ results['correct_upperbound'] += 1
+
+ correct = results['correct']
+ total = results['correct'] + results['incorrect']
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
+ print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
+ print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
+
diff --git a/VLMEvalKit-sudoku/llava/eval/m4c_evaluator.py b/VLMEvalKit-sudoku/llava/eval/m4c_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c2e506b9f1d5daef4ba291de3c0ed886fbb74b1
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/m4c_evaluator.py
@@ -0,0 +1,345 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import re
+
+from tqdm import tqdm
+
+
+class EvalAIAnswerProcessor:
+ """
+ Processes an answer similar to Eval AI
+ copied from
+ https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
+ """
+
+ CONTRACTIONS = {
+ "aint": "ain't",
+ "arent": "aren't",
+ "cant": "can't",
+ "couldve": "could've",
+ "couldnt": "couldn't",
+ "couldn'tve": "couldn't've",
+ "couldnt've": "couldn't've",
+ "didnt": "didn't",
+ "doesnt": "doesn't",
+ "dont": "don't",
+ "hadnt": "hadn't",
+ "hadnt've": "hadn't've",
+ "hadn'tve": "hadn't've",
+ "hasnt": "hasn't",
+ "havent": "haven't",
+ "hed": "he'd",
+ "hed've": "he'd've",
+ "he'dve": "he'd've",
+ "hes": "he's",
+ "howd": "how'd",
+ "howll": "how'll",
+ "hows": "how's",
+ "Id've": "I'd've",
+ "I'dve": "I'd've",
+ "Im": "I'm",
+ "Ive": "I've",
+ "isnt": "isn't",
+ "itd": "it'd",
+ "itd've": "it'd've",
+ "it'dve": "it'd've",
+ "itll": "it'll",
+ "let's": "let's",
+ "maam": "ma'am",
+ "mightnt": "mightn't",
+ "mightnt've": "mightn't've",
+ "mightn'tve": "mightn't've",
+ "mightve": "might've",
+ "mustnt": "mustn't",
+ "mustve": "must've",
+ "neednt": "needn't",
+ "notve": "not've",
+ "oclock": "o'clock",
+ "oughtnt": "oughtn't",
+ "ow's'at": "'ow's'at",
+ "'ows'at": "'ow's'at",
+ "'ow'sat": "'ow's'at",
+ "shant": "shan't",
+ "shed've": "she'd've",
+ "she'dve": "she'd've",
+ "she's": "she's",
+ "shouldve": "should've",
+ "shouldnt": "shouldn't",
+ "shouldnt've": "shouldn't've",
+ "shouldn'tve": "shouldn't've",
+ "somebody'd": "somebodyd",
+ "somebodyd've": "somebody'd've",
+ "somebody'dve": "somebody'd've",
+ "somebodyll": "somebody'll",
+ "somebodys": "somebody's",
+ "someoned": "someone'd",
+ "someoned've": "someone'd've",
+ "someone'dve": "someone'd've",
+ "someonell": "someone'll",
+ "someones": "someone's",
+ "somethingd": "something'd",
+ "somethingd've": "something'd've",
+ "something'dve": "something'd've",
+ "somethingll": "something'll",
+ "thats": "that's",
+ "thered": "there'd",
+ "thered've": "there'd've",
+ "there'dve": "there'd've",
+ "therere": "there're",
+ "theres": "there's",
+ "theyd": "they'd",
+ "theyd've": "they'd've",
+ "they'dve": "they'd've",
+ "theyll": "they'll",
+ "theyre": "they're",
+ "theyve": "they've",
+ "twas": "'twas",
+ "wasnt": "wasn't",
+ "wed've": "we'd've",
+ "we'dve": "we'd've",
+ "weve": "we've",
+ "werent": "weren't",
+ "whatll": "what'll",
+ "whatre": "what're",
+ "whats": "what's",
+ "whatve": "what've",
+ "whens": "when's",
+ "whered": "where'd",
+ "wheres": "where's",
+ "whereve": "where've",
+ "whod": "who'd",
+ "whod've": "who'd've",
+ "who'dve": "who'd've",
+ "wholl": "who'll",
+ "whos": "who's",
+ "whove": "who've",
+ "whyll": "why'll",
+ "whyre": "why're",
+ "whys": "why's",
+ "wont": "won't",
+ "wouldve": "would've",
+ "wouldnt": "wouldn't",
+ "wouldnt've": "wouldn't've",
+ "wouldn'tve": "wouldn't've",
+ "yall": "y'all",
+ "yall'll": "y'all'll",
+ "y'allll": "y'all'll",
+ "yall'd've": "y'all'd've",
+ "y'alld've": "y'all'd've",
+ "y'all'dve": "y'all'd've",
+ "youd": "you'd",
+ "youd've": "you'd've",
+ "you'dve": "you'd've",
+ "youll": "you'll",
+ "youre": "you're",
+ "youve": "you've",
+ }
+
+ NUMBER_MAP = {
+ "none": "0",
+ "zero": "0",
+ "one": "1",
+ "two": "2",
+ "three": "3",
+ "four": "4",
+ "five": "5",
+ "six": "6",
+ "seven": "7",
+ "eight": "8",
+ "nine": "9",
+ "ten": "10",
+ }
+ ARTICLES = ["a", "an", "the"]
+ PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
+ COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
+ PUNCTUATIONS = [
+ ";",
+ r"/",
+ "[",
+ "]",
+ '"',
+ "{",
+ "}",
+ "(",
+ ")",
+ "=",
+ "+",
+ "\\",
+ "_",
+ "-",
+ ">",
+ "<",
+ "@",
+ "`",
+ ",",
+ "?",
+ "!",
+ ]
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def word_tokenize(self, word):
+ word = word.lower()
+ word = word.replace(",", "").replace("?", "").replace("'s", " 's")
+ return word.strip()
+
+ def process_punctuation(self, in_text):
+ out_text = in_text
+ for p in self.PUNCTUATIONS:
+ if (p + " " in in_text or " " + p in in_text) or (
+ re.search(self.COMMA_STRIP, in_text) is not None
+ ):
+ out_text = out_text.replace(p, "")
+ else:
+ out_text = out_text.replace(p, " ")
+ out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
+ return out_text
+
+ def process_digit_article(self, in_text):
+ out_text = []
+ temp_text = in_text.lower().split()
+ for word in temp_text:
+ word = self.NUMBER_MAP.setdefault(word, word)
+ if word not in self.ARTICLES:
+ out_text.append(word)
+ else:
+ pass
+ for word_id, word in enumerate(out_text):
+ if word in self.CONTRACTIONS:
+ out_text[word_id] = self.CONTRACTIONS[word]
+ out_text = " ".join(out_text)
+ return out_text
+
+ def __call__(self, item):
+ item = self.word_tokenize(item)
+ item = item.replace("\n", " ").replace("\t", " ").strip()
+ item = self.process_punctuation(item)
+ item = self.process_digit_article(item)
+ return item
+
+
+class TextVQAAccuracyEvaluator:
+ def __init__(self):
+ self.answer_processor = EvalAIAnswerProcessor()
+
+ def _compute_answer_scores(self, raw_answers):
+ """
+ compute the accuracy (soft score) of human answers
+ """
+ answers = [self.answer_processor(a) for a in raw_answers]
+ assert len(answers) == 10
+ gt_answers = list(enumerate(answers))
+ unique_answers = set(answers)
+ unique_answer_scores = {}
+
+ for unique_answer in unique_answers:
+ accs = []
+ for gt_answer in gt_answers:
+ other_answers = [item for item in gt_answers if item != gt_answer]
+ matching_answers = [
+ item for item in other_answers if item[1] == unique_answer
+ ]
+ acc = min(1, float(len(matching_answers)) / 3)
+ accs.append(acc)
+ unique_answer_scores[unique_answer] = sum(accs) / len(accs)
+
+ return unique_answer_scores
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ for entry in tqdm(pred_list):
+ unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
+ pred_answer = self.answer_processor(entry["pred_answer"])
+ score = unique_answer_scores.get(pred_answer, 0.0)
+ pred_scores.append(score)
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class STVQAAccuracyEvaluator:
+ def __init__(self):
+ self.answer_processor = EvalAIAnswerProcessor()
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ import csv
+ for entry in pred_list:
+ pred_answer = self.answer_processor(entry["pred_answer"])
+ gts = [self.answer_processor(a) for a in entry["gt_answers"]]
+ score = 1.0 if pred_answer in gts else 0.0
+ with open('./output.csv', mode='a', newline='') as file:
+ writer = csv.writer(file)
+ # Write the row to the CSV file
+ writer.writerow([pred_answer, gts, score])
+ pred_scores.append(score)
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class STVQAANLSEvaluator:
+ def __init__(self):
+ import editdistance # install with `pip install editdistance`
+
+ self.get_edit_distance = editdistance.eval
+
+ def get_anls(self, s1, s2):
+ s1 = s1.lower().strip()
+ s2 = s2.lower().strip()
+ iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2))
+ anls = iou if iou >= 0.5 else 0.0
+ return anls
+
+ def eval_pred_list(self, pred_list):
+ pred_scores = []
+ import csv
+ for entry in pred_list:
+ anls = max(
+ self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"]
+ )
+ pred_scores.append(anls)
+
+ with open('./output.csv', mode='a', newline='') as file:
+ writer = csv.writer(file)
+ # Write the row to the CSV file
+ writer.writerow([entry["pred_answer"], entry["gt_answers"], anls])
+
+ accuracy = sum(pred_scores) / len(pred_scores)
+ return accuracy
+
+
+class TextCapsBleu4Evaluator:
+ def __init__(self):
+ # The following script requires Java 1.8.0 and pycocotools installed.
+ # The pycocoevalcap can be installed with pip as
+ # pip install git+https://github.com/ronghanghu/coco-caption.git@python23
+ # Original pycocoevalcap code is at https://github.com/tylin/coco-caption
+ # but has no python3 support yet.
+ try:
+ from pycocoevalcap.bleu.bleu import Bleu
+ from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
+ except ModuleNotFoundError:
+ print(
+ "Please install pycocoevalcap module using "
+ "pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa
+ )
+ raise
+
+ self.tokenizer = PTBTokenizer()
+ self.scorer = Bleu(4)
+
+ def eval_pred_list(self, pred_list):
+ # Create reference and hypotheses captions.
+ gts = {}
+ res = {}
+ for idx, entry in enumerate(pred_list):
+ gts[idx] = [{"caption": a} for a in entry["gt_answers"]]
+ res[idx] = [{"caption": entry["pred_answer"]}]
+
+ gts = self.tokenizer.tokenize(gts)
+ res = self.tokenizer.tokenize(res)
+ score, _ = self.scorer.compute_score(gts, res)
+
+ bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4)
+ return bleu4
diff --git a/VLMEvalKit-sudoku/llava/eval/model_qa.py b/VLMEvalKit-sudoku/llava/eval/model_qa.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e254da152ac644ff54fb5fa57e625d9e6ba31d1
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/model_qa.py
@@ -0,0 +1,64 @@
+import argparse
+from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from llava.conversation import default_conversation
+from llava.utils import disable_torch_init
+
+
+@torch.inference_mode()
+def eval_model(model_name, questions_file, answers_file):
+ # Model
+ disable_torch_init()
+ model_name = os.path.expanduser(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(model_name,
+ torch_dtype=torch.float16).cuda()
+
+
+ ques_file = open(os.path.expanduser(questions_file), "r")
+ ans_file = open(os.path.expanduser(answers_file), "w")
+ for i, line in enumerate(tqdm(ques_file)):
+ idx = json.loads(line)["question_id"]
+ qs = json.loads(line)["text"]
+ cat = json.loads(line)["category"]
+ conv = default_conversation.copy()
+ conv.append_message(conv.roles[0], qs)
+ prompt = conv.get_prompt()
+ inputs = tokenizer([prompt])
+ input_ids = torch.as_tensor(inputs.input_ids).cuda()
+ output_ids = model.generate(
+ input_ids,
+ do_sample=True,
+ use_cache=True,
+ temperature=0.7,
+ max_new_tokens=1024,)
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ try:
+ index = outputs.index(conv.sep, len(prompt))
+ except ValueError:
+ outputs += conv.sep
+ index = outputs.index(conv.sep, len(prompt))
+
+ outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ args = parser.parse_args()
+
+ eval_model(args.model_name, args.question_file, args.answers_file)
diff --git a/VLMEvalKit-sudoku/llava/eval/model_vqa.py b/VLMEvalKit-sudoku/llava/eval/model_vqa.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ebceedafe23eaf90e51e0971fbdfcae45555838
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/model_vqa.py
@@ -0,0 +1,240 @@
+import argparse
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+
+from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from llava.conversation import conv_templates, SeparatorStyle
+from llava.model.builder import load_pretrained_model
+from llava.utils import disable_torch_init
+from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
+
+from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX
+from typing import Dict, Optional, Sequence, List
+import transformers
+import re
+
+from PIL import Image
+import math
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict:
+ roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"}
+
+ im_start, im_end = tokenizer.additional_special_tokens_ids
+ nl_tokens = tokenizer("\n").input_ids
+ _system = tokenizer("system").input_ids + nl_tokens
+ _user = tokenizer("user").input_ids + nl_tokens
+ _assistant = tokenizer("assistant").input_ids + nl_tokens
+
+ # Apply prompt templates
+ input_ids, targets = [], []
+
+ source = sources
+ if roles[source[0]["from"]] != roles["human"]:
+ source = source[1:]
+
+ input_id, target = [], []
+ system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
+ input_id += system
+ target += [im_start] + [IGNORE_INDEX] * (len(system) - 3) + [im_end] + nl_tokens
+ assert len(input_id) == len(target)
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ if has_image and sentence["value"] is not None and "" in sentence["value"]:
+ num_image = len(re.findall(DEFAULT_IMAGE_TOKEN, sentence["value"]))
+ texts = sentence["value"].split('')
+ _input_id = tokenizer(role).input_ids + nl_tokens
+ for i,text in enumerate(texts):
+ _input_id += tokenizer(text).input_ids
+ if iuser":
+ _target = [im_start] + [IGNORE_INDEX] * (len(_input_id) - 3) + [im_end] + nl_tokens
+ elif role == "<|im_start|>assistant":
+ _target = [im_start] + [IGNORE_INDEX] * len(tokenizer(role).input_ids) + _input_id[len(tokenizer(role).input_ids) + 1 : -2] + [im_end] + nl_tokens
+ else:
+ raise NotImplementedError
+ target += _target
+
+ input_ids.append(input_id)
+ targets.append(target)
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
+ targets = torch.tensor(targets, dtype=torch.long)
+ return input_ids
+
+def eval_model(args):
+
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
+
+ # Data
+ with open(os.path.expanduser(args.question_file)) as f:
+ questions = json.load(f)
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+
+ for line in tqdm(questions):
+ idx = line["sample_id"]
+ question_type = line["metadata"]["question_type"]
+ dataset_name = line["metadata"]["dataset"]
+ gt = line["conversations"][1]["value"]
+
+ image_files = line["image"]
+ qs = line["conversations"][0]["value"]
+ cur_prompt = args.extra_prompt + qs
+
+ args.conv_mode = "qwen_1_5"
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = preprocess_qwen([line["conversations"][0],{'from': 'gpt','value': None}], tokenizer, has_image=True).cuda()
+ img_num = list(input_ids.squeeze()).count(IMAGE_TOKEN_INDEX)
+
+ image_tensors = []
+ for image_file in image_files:
+ image = Image.open(os.path.join(args.image_folder, image_file))
+ image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values']
+ image_tensors.append(image_tensor.half().cuda())
+ # image_tensors = torch.cat(image_tensors, dim=0)
+
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
+ keywords = [stop_str]
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=image_tensors,
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ # no_repeat_ngram_size=3,
+ max_new_tokens=1024,
+ use_cache=True)
+
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ outputs = outputs.strip()
+ if outputs.endswith(stop_str):
+ outputs = outputs[:-len(stop_str)]
+ outputs = outputs.strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({
+ "dataset": dataset_name,
+ "sample_id": idx,
+ "prompt": cur_prompt,
+ "pred_response": outputs,
+ "gt_response": gt,
+ "shortuuid": ans_id,
+ "model_id": model_name,
+ "question_type": question_type,
+ }) + "\n")
+ ans_file.flush()
+
+ if len(line["conversations"]) > 2:
+
+ for i in range(2, len(line["conversations"]), 2):
+ input_ids = torch.cat((input_ids, output_ids), dim=1)
+
+ gt = line["conversations"][i + 1]["value"]
+ qs = line["conversations"][i]["value"]
+ cur_prompt = args.extra_prompt + qs
+
+ args.conv_mode = "qwen_1_5"
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids_new = preprocess_qwen([line["conversations"][i],{'from': 'gpt','value': None}], tokenizer, has_image=True).cuda()
+ input_ids = torch.cat((input_ids, input_ids_new), dim=1)
+ img_num = list(input_ids_new.squeeze()).count(IMAGE_TOKEN_INDEX)
+
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
+ keywords = [stop_str]
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=image_tensors,
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ # no_repeat_ngram_size=3,
+ max_new_tokens=1024,
+ use_cache=True)
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ outputs = outputs.strip()
+ if outputs.endswith(stop_str):
+ outputs = outputs[:-len(stop_str)]
+ outputs = outputs.strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({
+ "dataset": dataset_name,
+ "sample_id": idx,
+ "prompt": cur_prompt,
+ "pred_response": outputs,
+ "gt_response": gt,
+ "shortuuid": ans_id,
+ "model_id": model_name,
+ "question_type": question_type,
+ }) + "\n")
+ ans_file.flush()
+
+
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--extra-prompt", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--top_p", type=float, default=None)
+ parser.add_argument("--num_beams", type=int, default=1)
+ parser.add_argument("--test_size", type=int, default=10000000)
+ args = parser.parse_args()
+
+ eval_model(args)
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/eval/model_vqa_mmbench.py b/VLMEvalKit-sudoku/llava/eval/model_vqa_mmbench.py
new file mode 100644
index 0000000000000000000000000000000000000000..e63dfe34b92cac2583811abd2ea2851080c282ee
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/eval/model_vqa_mmbench.py
@@ -0,0 +1,187 @@
+import argparse
+import torch
+import os
+import json
+import pandas as pd
+from tqdm import tqdm
+import shortuuid
+
+from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from llava.conversation import conv_templates, SeparatorStyle
+from llava.model.builder import load_pretrained_model
+from llava.utils import disable_torch_init
+from llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
+
+from PIL import Image
+import math
+from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio
+
+
+all_options = ['A', 'B', 'C', 'D']
+
+
+def split_list(lst, n):
+ """Split a list into n (roughly) equal-sized chunks"""
+ chunk_size = math.ceil(len(lst) / n) # integer division
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
+
+
+def get_chunk(lst, n, k):
+ chunks = split_list(lst, n)
+ return chunks[k]
+
+
+def is_none(value):
+ if value is None:
+ return True
+ if type(value) is float and math.isnan(value):
+ return True
+ if type(value) is str and value.lower() == 'nan':
+ return True
+ if type(value) is str and value.lower() == 'none':
+ return True
+ return False
+
+def get_options(row, options):
+ parsed_options = []
+ for option in options:
+ option_value = row[option]
+ if is_none(option_value):
+ break
+ parsed_options.append(option_value)
+ return parsed_options
+
+
+def eval_model(args):
+ # Model
+ disable_torch_init()
+ model_path = os.path.expanduser(args.model_path)
+ model_name = get_model_name_from_path(model_path)
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, _args=args)
+
+ questions = pd.read_table(os.path.expanduser(args.question_file))
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
+ answers_file = os.path.expanduser(args.answers_file)
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
+ ans_file = open(answers_file, "w")
+
+ if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
+ args.conv_mode = args.conv_mode + '_mmtag'
+ print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
+
+ for index, row in tqdm(questions.iterrows(), total=len(questions)):
+ options = get_options(row, all_options)
+ cur_option_char = all_options[:len(options)]
+
+ if args.all_rounds:
+ num_rounds = len(options)
+ else:
+ num_rounds = 1
+
+ for round_idx in range(num_rounds):
+ idx = row['index']
+ question = row['question']
+ hint = row['hint']
+ image = load_image_from_base64(row['image'])
+ if not is_none(hint):
+ question = hint + '\n' + question
+ for option_char, option in zip(all_options[:len(options)], options):
+ question = question + '\n' + option_char + '. ' + option
+ qs = cur_prompt = question
+ if model.config.mm_use_im_start_end:
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
+ else:
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
+
+ if args.single_pred_prompt:
+ if args.lang == 'cn':
+ qs = qs + '\n' + "请直接回答选项字母。"
+ else:
+ qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
+
+ conv = conv_templates[args.conv_mode].copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
+
+ # image_tensor = process_images([image], image_processor, model.config)[0]
+
+ # image = resize_image_keep_ratio(image, max_size=1024)
+ # minicpm-v
+ source_image, patches, best_grid, ind_tokens = slice_image_minicpm(
+ image, max_slice_nums=7, scale_resolution=336, patch_size=14, never_split=False)
+ image_sizes = [source_image.size]
+ processor = image_processor
+ if best_grid is None: #说明没有切片
+ source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
+ do_rescale=True, do_normalize=True,
+ return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
+ crop_size = processor.crop_size
+ patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width'])
+ else:
+ source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
+ do_rescale=True, do_normalize=True,
+ return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
+ patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False,
+ do_rescale=True, do_normalize=True,
+ return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w
+ images = [source_tensors[0].half().cuda()] # 3, h, w
+ patch_images = [patch_tensors.half().cuda()] # bs, 3, h, w
+ ind_tokens = [ind_tokens]
+
+ with torch.inference_mode():
+ output_ids = model.generate(
+ input_ids,
+ images=images,
+ image_sizes=image_sizes,
+ patch_images=patch_images,
+ ind_tokens=ind_tokens,
+ do_sample=True if args.temperature > 0 else False,
+ temperature=args.temperature,
+ top_p=args.top_p,
+ num_beams=args.num_beams,
+ # no_repeat_ngram_size=3,
+ max_new_tokens=1024,
+ use_cache=True)
+
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+
+ ans_id = shortuuid.uuid()
+ ans_file.write(json.dumps({"question_id": idx,
+ "round_id": round_idx,
+ "prompt": cur_prompt,
+ "text": outputs,
+ "options": options,
+ "option_char": cur_option_char,
+ "answer_id": ans_id,
+ "model_id": model_name,
+ "metadata": {}}) + "\n")
+ ans_file.flush()
+
+ # rotate options
+ options = options[1:] + options[:1]
+ cur_option_char = cur_option_char[1:] + cur_option_char[:1]
+ ans_file.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
+ parser.add_argument("--model-base", type=str, default=None)
+ parser.add_argument("--image-folder", type=str, default="")
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
+ parser.add_argument("--num-chunks", type=int, default=1)
+ parser.add_argument("--chunk-idx", type=int, default=0)
+ parser.add_argument("--temperature", type=float, default=0.2)
+ parser.add_argument("--top_p", type=float, default=None)
+ parser.add_argument("--num_beams", type=int, default=1)
+ parser.add_argument("--all-rounds", action="store_true")
+ parser.add_argument("--single-pred-prompt", action="store_true")
+ parser.add_argument("--lang", type=str, default="en")
+ parser.add_argument("--fted_encoder", type=bool, default=True)
+ args = parser.parse_args()
+
+ eval_model(args)
diff --git a/VLMEvalKit-sudoku/llava/mm_utils.py b/VLMEvalKit-sudoku/llava/mm_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..62a3e50905a9c18799ade86af7d62c59eb029115
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/mm_utils.py
@@ -0,0 +1,395 @@
+from PIL import Image
+from io import BytesIO
+import base64
+import math
+import ast
+import re
+import torch
+from transformers import StoppingCriteria
+from llava.constants import IMAGE_TOKEN_INDEX
+
+
+def resize_and_center_crop(image, shortest_edge_length):
+ # Calculate new dimensions and resize
+ aspect_ratio = float(image.width) / float(image.height)
+ if aspect_ratio > 1:
+ new_width = int(shortest_edge_length * aspect_ratio)
+ new_height = shortest_edge_length
+ else:
+ new_width = shortest_edge_length
+ new_height = int(shortest_edge_length / aspect_ratio)
+ resized_image = image.resize((new_width, new_height), Image.ANTIALIAS)
+
+ # Calculate the position and perform the center crop
+ left = (new_width - shortest_edge_length) / 2
+ top = (new_height - shortest_edge_length) / 2
+ right = (new_width + shortest_edge_length) / 2
+ bottom = (new_height + shortest_edge_length) / 2
+ cropped_image = resized_image.crop((left, top, right, bottom))
+
+ return cropped_image
+
+
+def auto_pad_images(image, grid_params):
+ assert isinstance(image, Image.Image), "Input should be a Pillow Image"
+ assert len(grid_params) > 0, "Grid parameters should not be empty"
+
+ # Step 1: Calculate and find the closest aspect ratio
+ input_width, input_height = image.size
+ input_aspect_ratio = input_width / input_height
+ candidate_resolutions = [(w / h, w, h) for w in grid_params for h in grid_params]
+ closest_aspect_ratio = min(candidate_resolutions, key=lambda x: abs(input_aspect_ratio - x[0]))
+
+ candidate_resolutions = [(x[1], x[2]) for x in candidate_resolutions if abs(x[0] - closest_aspect_ratio[0]) < 1e-3]
+
+ target_resolution = min(candidate_resolutions, key=lambda res: abs(max(input_width, input_height) / max(res) - 1))
+
+ resize_width, resize_height = target_resolution
+ if input_width > input_height:
+ resize_height = int(resize_width / input_aspect_ratio)
+ else:
+ resize_width = int(resize_height * input_aspect_ratio)
+ resized_image = image.resize((resize_width, resize_height), Image.ANTIALIAS)
+
+ # Step 5: Pad the resized image if necessary to match the target resolution
+ pad_width = target_resolution[0] - resize_width
+ pad_height = target_resolution[1] - resize_height
+ padded_image = Image.new("RGB", target_resolution, color=(0, 0, 0))
+ padded_image.paste(resized_image, (pad_width // 2, pad_height // 2))
+
+ return padded_image
+
+
+def extract_patches(image, patch_size, overlap_ratio):
+ assert isinstance(image, Image.Image), "Input should be a Pillow Image"
+ assert patch_size > 0, "Patch size should be greater than 0"
+ assert 0 <= overlap_ratio < 1, "Overlap ratio should be between 0 and 1"
+
+ W, H = image.size
+ patches = []
+
+ stride = int(patch_size * (1 - overlap_ratio))
+
+ num_patches_y = (H - patch_size) // stride + 1
+ num_patches_x = (W - patch_size) // stride + 1
+
+ y_start = (H - (num_patches_y - 1) * stride - patch_size) // 2
+ x_start = (W - (num_patches_x - 1) * stride - patch_size) // 2
+
+ for y in range(y_start, y_start + num_patches_y * stride, stride):
+ for x in range(x_start, x_start + num_patches_x * stride, stride):
+ patch = image.crop((x, y, x + patch_size, y + patch_size))
+ patches.append(patch)
+
+ return patches
+
+
+def process_highres_image_crop_split(image, data_args, processor=None):
+ crop_resolution = data_args.image_crop_resolution
+ split_resolution = data_args.image_split_resolution
+ if processor is None:
+ processor = data_args.image_processor
+ image_crop = resize_and_center_crop(image, crop_resolution)
+ image_patches = extract_patches(image_crop, patch_size=split_resolution, overlap_ratio=0)
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
+ return torch.stack(image_patches, dim=0)
+
+
+def process_highres_image(image, processor, grid_pinpoints):
+ grid_params = [int(x) for x in grid_pinpoints.split(",")]
+ width_height = max(image.size)
+ fit_grid_params = [x for x in grid_params if x >= width_height]
+ if len(fit_grid_params) == 0:
+ select_size = max(grid_params)
+ else:
+ select_size = min(fit_grid_params)
+ # FIXME: always select the 448
+ select_size = max(grid_params)
+ image_padded = expand2square(image, tuple(int(x * 255) for x in processor.image_mean))
+
+ # FIXME: this seems to be a bug that it always resizes instead of padding
+ image_original_resize = image.resize((processor.size["shortest_edge"], processor.size["shortest_edge"]))
+ image_padded = image_padded.resize((select_size, select_size))
+ image_patches = extract_patches(image_padded, patch_size=processor.size["shortest_edge"], overlap_ratio=0)
+ image_patches = [image_original_resize] + image_patches
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
+ return torch.stack(image_patches, dim=0)
+
+
+def select_best_resolution(original_size, possible_resolutions):
+ """
+ Selects the best resolution from a list of possible resolutions based on the original size.
+
+ Args:
+ original_size (tuple): The original size of the image in the format (width, height).
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
+
+ Returns:
+ tuple: The best fit resolution in the format (width, height).
+ """
+ original_width, original_height = original_size
+ best_fit = None
+ max_effective_resolution = 0
+ min_wasted_resolution = float("inf")
+
+ for width, height in possible_resolutions:
+ # Calculate the downscaled size to keep the aspect ratio
+ scale = min(width / original_width, height / original_height)
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
+
+ # Calculate effective and wasted resolutions
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
+ wasted_resolution = (width * height) - effective_resolution
+
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
+ max_effective_resolution = effective_resolution
+ min_wasted_resolution = wasted_resolution
+ best_fit = (width, height)
+
+ return best_fit
+
+
+def resize_and_pad_image(image, target_resolution):
+ """
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ target_resolution (tuple): The target resolution (width, height) of the image.
+
+ Returns:
+ PIL.Image.Image: The resized and padded image.
+ """
+ original_width, original_height = image.size
+ target_width, target_height = target_resolution
+
+ # Determine which dimension (width or height) to fill
+ scale_w = target_width / original_width
+ scale_h = target_height / original_height
+
+ if scale_w < scale_h:
+ # Width will be filled completely
+ new_width = target_width
+ new_height = min(math.ceil(original_height * scale_w), target_height)
+ else:
+ # Height will be filled completely
+ new_height = target_height
+ new_width = min(math.ceil(original_width * scale_h), target_width)
+
+ # Resize the image
+ resized_image = image.resize((new_width, new_height))
+
+ # Create a new image with the target size and paste the resized image onto it
+ new_image = Image.new("RGB", (target_width, target_height), (0, 0, 0))
+ paste_x = (target_width - new_width) // 2
+ paste_y = (target_height - new_height) // 2
+ new_image.paste(resized_image, (paste_x, paste_y))
+
+ return new_image
+
+
+def divide_to_patches(image, patch_size):
+ """
+ Divides an image into patches of a specified size.
+
+ Args:
+ image (PIL.Image.Image): The input image.
+ patch_size (int): The size of each patch.
+
+ Returns:
+ list: A list of PIL.Image.Image objects representing the patches.
+ """
+ patches = []
+ width, height = image.size
+ for i in range(0, height, patch_size):
+ for j in range(0, width, patch_size):
+ box = (j, i, j + patch_size, i + patch_size)
+ patch = image.crop(box)
+ patches.append(patch)
+
+ return patches
+
+
+def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
+ """
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
+
+ Args:
+ image_size (tuple): The size of the input image in the format (width, height).
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+ patch_size (int): The size of each image patch.
+
+ Returns:
+ tuple: The shape of the image patch grid in the format (width, height).
+ """
+ if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
+ assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
+ # Use regex to extract the range from the input string
+ matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
+ range_start = tuple(map(int, matches[0]))
+ range_end = tuple(map(int, matches[-1]))
+ # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
+ # Multiply all elements by patch_size
+ grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ width, height = select_best_resolution(image_size, possible_resolutions)
+ return width // patch_size, height // patch_size
+
+
+def process_anyres_image(image, processor, grid_pinpoints):
+ """
+ Process an image with variable resolutions.
+
+ Args:
+ image (PIL.Image.Image): The input image to be processed.
+ processor: The image processor object.
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
+
+ Returns:
+ torch.Tensor: A tensor containing the processed image patches.
+ """
+ # Convert grid_pinpoints from string to list
+ if isinstance(grid_pinpoints, str) and "x" in grid_pinpoints:
+ try:
+ patch_size = processor.size[0]
+ except Exception as e:
+ patch_size = processor.size["shortest_edge"]
+ assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]"
+ # Use regex to extract the range from the input string
+ matches = re.findall(r"\((\d+)x(\d+)\)", grid_pinpoints)
+ range_start = tuple(map(int, matches[0]))
+ range_end = tuple(map(int, matches[-1]))
+ # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1])
+ grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)]
+ # Multiply all elements by patch_size
+ grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints]
+
+ if type(grid_pinpoints) is list:
+ possible_resolutions = grid_pinpoints
+ else:
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
+ image_padded = resize_and_pad_image(image, best_resolution)
+
+ patches = divide_to_patches(image_padded, processor.crop_size["height"])
+
+ # FIXME: this seems to be a bug that it resizes instead of pad.
+ # but to keep it consistent with previous, i will keep it as it is
+ # TODO: uncomment below to ablate with the padding
+ if isinstance(processor.size, dict):
+ shortest_edge = processor.size["shortest_edge"]
+ else:
+ shortest_edge = min(processor.size)
+ image_original_resize = image.resize((shortest_edge, shortest_edge))
+ # image_padded_square = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
+ # image_original_resize = image_padded_square.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
+
+ image_patches = [image_original_resize] + patches
+ image_patches = [processor.preprocess(image_patch, return_tensors="pt")["pixel_values"][0] for image_patch in image_patches]
+ return torch.stack(image_patches, dim=0)
+
+
+def load_image_from_base64(image):
+ return Image.open(BytesIO(base64.b64decode(image)))
+
+
+def expand2square(pil_img, background_color):
+ width, height = pil_img.size
+ if width == height:
+ return pil_img
+ elif width > height:
+ result = Image.new(pil_img.mode, (width, width), background_color)
+ result.paste(pil_img, (0, (width - height) // 2))
+ return result
+ else:
+ result = Image.new(pil_img.mode, (height, height), background_color)
+ result.paste(pil_img, ((height - width) // 2, 0))
+ return result
+
+
+def process_images(images, image_processor, model_cfg):
+ image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
+ new_images = []
+ if image_aspect_ratio == "highres":
+ for image in images:
+ image = process_highres_image(image, image_processor, model_cfg.image_grid_pinpoints)
+ new_images.append(image)
+ elif image_aspect_ratio == "anyres" or "anyres_max" in image_aspect_ratio:
+ for image in images:
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints)
+ new_images.append(image)
+ elif image_aspect_ratio == "crop_split":
+ for image in images:
+ image = process_highres_image_crop_split(image, model_cfg, image_processor)
+ new_images.append(image)
+ elif image_aspect_ratio == "pad":
+ for image in images:
+ image = expand2square(image, tuple(int(x * 255) for x in image_processor.image_mean))
+ image = image_processor.preprocess(image, return_tensors="pt")["pixel_values"][0]
+ new_images.append(image)
+ else:
+ return image_processor.preprocess(images, return_tensors="pt")["pixel_values"]
+ if all(x.shape == new_images[0].shape for x in new_images):
+ new_images = torch.stack(new_images, dim=0)
+ return new_images
+
+
+def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("")]
+
+ def insert_separator(X, sep):
+ return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
+
+ input_ids = []
+ offset = 0
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
+ offset = 1
+ input_ids.append(prompt_chunks[0][0])
+
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
+ input_ids.extend(x[offset:])
+
+ if return_tensors is not None:
+ if return_tensors == "pt":
+ return torch.tensor(input_ids, dtype=torch.long)
+ raise ValueError(f"Unsupported tensor type: {return_tensors}")
+ return input_ids
+
+
+def get_model_name_from_path(model_path):
+ model_path = model_path.strip("/")
+ model_paths = model_path.split("/")
+ if model_paths[-1].startswith("checkpoint-"):
+ return model_paths[-2] + "_" + model_paths[-1]
+ else:
+ return model_paths[-1]
+
+
+class KeywordsStoppingCriteria(StoppingCriteria):
+ def __init__(self, keywords, tokenizer, input_ids):
+ self.keywords = keywords
+ self.keyword_ids = []
+ for keyword in keywords:
+ cur_keyword_ids = tokenizer(keyword).input_ids
+ if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
+ cur_keyword_ids = cur_keyword_ids[1:]
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
+ self.tokenizer = tokenizer
+ self.start_len = input_ids.shape[1]
+
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
+ assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
+ offset = min(output_ids.shape[1] - self.start_len, 3)
+ self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
+ for keyword_id in self.keyword_ids:
+ if output_ids[0, -keyword_id.shape[0] :] == keyword_id:
+ return True
+ outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
+ for keyword in self.keywords:
+ if keyword in outputs:
+ return True
+ return False
diff --git a/VLMEvalKit-sudoku/llava/model/apply_delta.py b/VLMEvalKit-sudoku/llava/model/apply_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..c183ba19a4e91e9cb95155b542e7406ea5b287a0
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/apply_delta.py
@@ -0,0 +1,47 @@
+"""
+Usage:
+python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
+"""
+
+import argparse
+
+import torch
+from tqdm import tqdm
+from transformers import AutoTokenizer, AutoModelForCausalLM
+from llava import LlavaLlamaForCausalLM
+
+
+def apply_delta(base_model_path, target_model_path, delta_path):
+ print("Loading base model")
+ base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+
+ print("Loading delta")
+ delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+ delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
+
+ print("Applying delta")
+ for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
+ if name not in base.state_dict():
+ assert name in ["model.mm_projector.weight", "model.mm_projector.bias"], f"{name} not in base model"
+ continue
+ if param.data.shape == base.state_dict()[name].shape:
+ param.data += base.state_dict()[name]
+ else:
+ assert name in ["model.embed_tokens.weight", "lm_head.weight"], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
+ bparam = base.state_dict()[name]
+ param.data[: bparam.shape[0], : bparam.shape[1]] += bparam
+
+ print("Saving target model")
+ delta.save_pretrained(target_model_path)
+ delta_tokenizer.save_pretrained(target_model_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+
+ args = parser.parse_args()
+
+ apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
diff --git a/VLMEvalKit-sudoku/llava/model/consolidate.py b/VLMEvalKit-sudoku/llava/model/consolidate.py
new file mode 100644
index 0000000000000000000000000000000000000000..f02e575f6b8e4388e1758776cadd62309147a1ad
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/consolidate.py
@@ -0,0 +1,30 @@
+"""
+Usage:
+python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
+"""
+
+import argparse
+
+import torch
+from transformers import AutoTokenizer, AutoModelForCausalLM
+from llava.model import *
+from llava.model.utils import auto_upgrade
+
+
+def consolidate_ckpt(src_path, dst_path):
+ print("Loading model")
+ auto_upgrade(src_path)
+ src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+ src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
+ src_model.save_pretrained(dst_path)
+ src_tokenizer.save_pretrained(dst_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--src", type=str, required=True)
+ parser.add_argument("--dst", type=str, required=True)
+
+ args = parser.parse_args()
+
+ consolidate_ckpt(args.src, args.dst)
diff --git a/VLMEvalKit-sudoku/llava/model/language_model/llava_llama.py b/VLMEvalKit-sudoku/llava/model/language_model/llava_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..b406dd13b552b6f54dffe4c45b9de25a566f1e11
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/language_model/llava_llama.py
@@ -0,0 +1,168 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+
+from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig
+
+from torch.nn import CrossEntropyLoss
+
+
+# , LlamaModel, LlamaForCausalLM, GenerationConfig
+# from .modeling_llama import LlamaModel, LlamaForCausalLM
+from transformers import LlamaModel, LlamaForCausalLM
+from transformers.modeling_outputs import CausalLMOutputWithPast
+from transformers.generation.utils import GenerateOutput
+
+from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
+
+
+class LlavaConfig(LlamaConfig):
+ model_type = "llava_llama"
+ temperature: float = 0.0 # reset to 0.0, previously 0.9 for Vicuna
+ max_new_tokens: int = 1024
+ do_sample: bool = False
+ top_p: Optional[float] = None
+ # rope_scaling: Optional[dict] = {}
+
+
+class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
+ config_class = LlavaConfig
+
+ def __init__(self, config: LlamaConfig):
+ super(LlavaLlamaModel, self).__init__(config)
+
+
+class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaConfig
+
+ def __init__(self, config):
+ LlamaForCausalLM.__init__(self, config)
+
+ # configure default generation settings
+ config.model_type = "llava_llama"
+ # config.rope_scaling = None
+
+ self.model = LlavaLlamaModel(config)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_model(self):
+ return self.model
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ images: Optional[torch.FloatTensor] = None,
+ image_sizes: Optional[List[List[int]]] = None,
+ return_dict: Optional[bool] = None,
+ modalities: Optional[List[str]] = ["image"],
+ dpo_forward: Optional[bool] = None,
+ cache_position=None,
+ patch_images: Optional[torch.FloatTensor] = None,
+ ind_tokens: Optional[List[int]] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+
+ if inputs_embeds is None:
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,patch_images=patch_images,ind_tokens=ind_tokens)
+
+ if dpo_forward:
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ return logits, labels
+
+ else:
+ return super().forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ images: Optional[torch.Tensor] = None,
+ image_sizes: Optional[torch.Tensor] = None,
+ modalities: Optional[List[str]] = ["image"],
+ patch_images: Optional[torch.FloatTensor] = None,
+ ind_tokens: Optional[List[int]] = None,
+ **kwargs,
+ ) -> Union[GenerateOutput, torch.LongTensor]:
+ modalities = kwargs.pop("modalities", None) if "modalities" in kwargs and modalities is None else modalities
+ position_ids = kwargs.pop("position_ids", None)
+ attention_mask = kwargs.pop("attention_mask", None)
+ if "inputs_embeds" in kwargs:
+ raise NotImplementedError("`inputs_embeds` is not supported")
+
+ if images is not None:
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes,
+ patch_images=patch_images,
+ ind_tokens=ind_tokens)
+ else:
+ inputs_embeds = self.get_model().embed_tokens(inputs)
+
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
+ images = kwargs.pop("images", None)
+ image_sizes = kwargs.pop("image_sizes", None)
+ patch_images = kwargs.pop("patch_images", None)
+ ind_tokens = kwargs.pop("ind_tokens", None)
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
+ if images is not None:
+ inputs["images"] = images
+ if image_sizes is not None:
+ inputs["image_sizes"] = image_sizes
+ if patch_images is not None:
+ inputs['patch_images'] = patch_images
+ if ind_tokens is not None:
+ inputs['ind_tokens'] = ind_tokens
+ return inputs
+
+
+AutoConfig.register("llava_llama", LlavaConfig)
+AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
diff --git a/VLMEvalKit-sudoku/llava/model/llava_arch.py b/VLMEvalKit-sudoku/llava/model/llava_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..de85c7a6a6fc59f1bed912679c954441bc4394f0
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/llava_arch.py
@@ -0,0 +1,808 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from abc import ABC, abstractmethod
+
+import math
+import re
+import time
+import torch
+import torch.nn as nn
+from .multimodal_encoder.builder import build_vision_tower
+from .multimodal_resampler.builder import build_vision_resampler
+from .multimodal_projector.builder import build_vision_projector
+
+from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+
+from llava.mm_utils import get_anyres_image_grid_shape
+from llava.utils import rank0_print, rank_print
+import random
+
+
+class LlavaMetaModel:
+
+ def __init__(self, config):
+ super(LlavaMetaModel, self).__init__(config)
+
+ if hasattr(config, "mm_vision_tower"):
+ delay_load = getattr(config, "delay_load", False)
+ self.vision_tower = build_vision_tower(config, delay_load=delay_load)
+ self.vision_resampler = build_vision_resampler(config, vision_tower=self.vision_tower)
+ self.mm_projector = build_vision_projector(config, vision_cfg=self.vision_tower.config)
+
+ if "unpad" in getattr(config, "mm_patch_merge_type", ""):
+ self.image_newline = nn.Parameter(torch.empty(config.hidden_size, dtype=self.dtype))
+
+ def get_vision_tower(self):
+ vision_tower = getattr(self, "vision_tower", None)
+ if type(vision_tower) is list:
+ vision_tower = vision_tower[0]
+ return vision_tower
+
+ def initialize_vision_modules(self, model_args, fsdp=None):
+ vision_tower = model_args.vision_tower
+ mm_vision_select_layer = model_args.mm_vision_select_layer
+ mm_vision_select_feature = model_args.mm_vision_select_feature
+ pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
+ mm_patch_merge_type = model_args.mm_patch_merge_type
+
+ self.config.mm_vision_tower = vision_tower
+ self.config.vision_tower_pretrained = getattr(model_args, "vision_tower_pretrained", "")
+
+ if self.get_vision_tower() is None:
+ vision_tower = build_vision_tower(model_args)
+ vision_resampler = build_vision_resampler(model_args, vision_tower=vision_tower)
+ for k, v in vision_resampler.config.items():
+ setattr(self.config, k, v)
+
+ if fsdp is not None and len(fsdp) > 0:
+ self.vision_tower = [vision_tower]
+ self.vision_resampler = [vision_resampler]
+ else:
+ self.vision_tower = vision_tower
+ self.vision_resampler = vision_resampler
+ else:
+ if fsdp is not None and len(fsdp) > 0:
+ vision_resampler = self.vision_resampler[0]
+ vision_tower = self.vision_tower[0]
+ else:
+ vision_resampler = self.vision_resampler
+ vision_tower = self.vision_tower
+ vision_tower.load_model()
+
+ # In case it is frozen by LoRA
+ for p in self.vision_resampler.parameters():
+ p.requires_grad = True
+
+ self.config.use_mm_proj = True
+ self.config.mm_projector_type = getattr(model_args, "mm_projector_type", "linear")
+ self.config.mm_hidden_size = getattr(vision_resampler, "hidden_size", vision_tower.hidden_size)
+ self.config.mm_vision_select_layer = mm_vision_select_layer
+ self.config.mm_vision_select_feature = mm_vision_select_feature
+ self.config.mm_patch_merge_type = mm_patch_merge_type
+
+
+ if not hasattr(self.config, 'add_faster_video'):
+ if model_args.add_faster_video:
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
+ self.faster_token = nn.Parameter(
+ torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std
+ )
+
+ if getattr(self, "mm_projector", None) is None:
+ self.mm_projector = build_vision_projector(self.config, vision_cfg=vision_tower.config)
+
+ if "unpad" in mm_patch_merge_type:
+ embed_std = 1 / torch.sqrt(torch.tensor(self.config.hidden_size, dtype=self.dtype))
+ self.image_newline = nn.Parameter(torch.randn(self.config.hidden_size, dtype=self.dtype) * embed_std)
+ else:
+ # In case it is frozen by LoRA
+ for p in self.mm_projector.parameters():
+ p.requires_grad = True
+
+ if pretrain_mm_mlp_adapter is not None:
+ mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location="cpu")
+
+ def get_w(weights, keyword):
+ return {k.split(keyword + ".")[1]: v for k, v in weights.items() if keyword in k}
+
+ incompatible_keys = self.mm_projector.load_state_dict(get_w(mm_projector_weights, "mm_projector"))
+ rank0_print(f"Loaded mm projector weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
+ incompatible_keys = self.vision_resampler.load_state_dict(get_w(mm_projector_weights, "vision_resampler"), strict=False)
+ rank0_print(f"Loaded vision resampler weights from {pretrain_mm_mlp_adapter}. Incompatible keys: {incompatible_keys}")
+
+def unpad_image(tensor, original_size):
+ """
+ Unpads a PyTorch tensor of a padded and resized image.
+
+ Args:
+ tensor (torch.Tensor): The image tensor, assumed to be in CxHxW format.
+ original_size (tuple): The original size of the image (height, width).
+
+ Returns:
+ torch.Tensor: The unpadded image tensor.
+ """
+ original_width, original_height = original_size
+ current_height, current_width = tensor.shape[1:]
+
+ # Compute aspect ratios
+ original_aspect_ratio = original_width / original_height
+ current_aspect_ratio = current_width / current_height
+
+ # Determine padding size and direction
+ if original_aspect_ratio > current_aspect_ratio:
+ # Padding was added to the height
+ scale_factor = current_width / original_width
+ new_height = int(original_height * scale_factor)
+ padding = (current_height - new_height) // 2
+ unpadded_tensor = tensor[:, padding : current_height - padding, :]
+ else:
+ # Padding was added to the width
+ scale_factor = current_height / original_height
+ new_width = int(original_width * scale_factor)
+ padding = (current_width - new_width) // 2
+ unpadded_tensor = tensor[:, :, padding : current_width - padding]
+
+ return unpadded_tensor
+
+
+class LlavaMetaForCausalLM(ABC):
+
+ @abstractmethod
+ def get_model(self):
+ pass
+
+ def get_vision_tower(self):
+ return self.get_model().get_vision_tower()
+
+ def get_2dPool(self, image_feature, stride=2):
+ height = width = self.get_vision_tower().num_patches_per_side
+ num_frames, num_tokens, num_dim = image_feature.shape
+ image_feature = image_feature.view(num_frames, height, width, -1)
+ image_feature = image_feature.permute(0, 3, 1, 2).contiguous()
+ # image_feature = nn.functional.max_pool2d(image_feature, self.config.mm_spatial_pool_stride)
+ if self.config.mm_spatial_pool_mode == "average":
+ image_feature = nn.functional.avg_pool2d(image_feature, stride)
+ elif self.config.mm_spatial_pool_mode == "max":
+ image_feature = nn.functional.max_pool2d(image_feature, stride)
+ elif self.config.mm_spatial_pool_mode == "bilinear":
+ height, width = image_feature.shape[2:]
+ scaled_shape = [math.ceil(height / stride), math.ceil(width / stride)]
+ image_feature = nn.functional.interpolate(image_feature, size=scaled_shape, mode='bilinear')
+
+ else:
+ raise ValueError(f"Unexpected mm_spatial_pool_mode: {self.config.mm_spatial_pool_mode}")
+ image_feature = image_feature.permute(0, 2, 3, 1)
+ image_feature = image_feature.view(num_frames, -1, num_dim)
+ return image_feature
+
+ def concat_src_patch_images(self, images, patch_images, ind_tokens, per_patch_size = 14):
+ all_images = []
+ patch_sizes = []
+ for src_image, patches, ind_token in zip(images, patch_images, ind_tokens):
+ if len(ind_token) == 0:
+ all_images += [src_image]
+ img_h, img_w = src_image.shape[-2:]
+ patch_sizes.append((img_h // per_patch_size, img_w // per_patch_size))
+ else:
+ patches = [patch for patch in patches]
+ slice_img_h, slice_img_w = patches[0].shape[-2:]
+ patch_sizes += [(slice_img_h // per_patch_size, slice_img_w // per_patch_size)] * len(patches)
+
+ patches += [src_image]
+ abs_img_h, abs_img_w = src_image.shape[-2:]
+ patch_sizes.append((abs_img_h // per_patch_size, abs_img_w // per_patch_size))
+
+ all_images += patches
+
+ return all_images, patch_sizes
+
+ def encode_images(self, images): #torch.Size([4, 3, 336, 336])
+ patch_sizes = []
+ for _ in range(images.shape[0]):
+ patch_sizes.append((images.shape[2] // 14, images.shape[3] // 14))
+ tgt_sizes = torch.tensor(patch_sizes, dtype=torch.long, device=images[0].device)
+
+ image_features = self.get_model().get_vision_tower()(images, tgt_sizes)
+ image_features = torch.cat(image_features, dim=0)
+ # image_features = self.get_model().vision_resampler(image_features, images=images)
+ image_features = self.get_model().mm_projector(image_features)
+ return image_features
+
+ def partition_list(self, input_list, lengths):
+ """
+ 按照指定的长度划分列表。
+
+ 参数:
+ input_list (list): 要划分的原始列表。
+ lengths (list): 一个包含划分长度的整数列表。
+
+ 返回:
+ list: 一个包含子列表的列表,每个子列表的长度由 lengths 指定。
+ """
+ result = []
+ current_index = 0
+ for length in lengths:
+ if current_index + length > len(input_list):
+ raise ValueError("划分长度超过了列表的总长度")
+ sublist = input_list[current_index:current_index + length]
+ result.append(sublist)
+ current_index += length
+ if current_index != len(input_list):
+ raise ValueError("划分长度和列表总长度不一致")
+ return result
+
+ def encode_images_uhd_v1(self, images, patch_images, ind_tokens):
+ num_images = [len(ind_token) + 1 for ind_token in ind_tokens]
+ # concat images
+ per_patch_size = 14
+ down_sample_ratio = 1
+
+ if 'siglip2' in self.get_vision_tower().vision_tower_name:
+ model_config = self.get_model().get_vision_tower().vision_tower.config
+ per_patch_size = getattr(model_config, "patch_size", 16)
+ # per_patch_size = 14
+ if hasattr(model_config, "vision_config"):
+ vision_model_config = model_config.vision_config
+ if vision_model_config.get('merger_layer_index', False):
+ merger_layer_index = vision_model_config['merger_layer_index']
+ down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
+ else:
+ if hasattr(model_config, 'merger_layer_index'):
+ merger_layer_index = model_config.merger_layer_index
+ down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
+
+ elif 'moonvit' in self.get_vision_tower().vision_tower_name:
+ model_config = self.get_model().get_vision_tower().vision_tower.config
+ per_patch_size = getattr(model_config, "patch_size", 14)
+ if hasattr(model_config, "vision_config"):
+ vision_model_config = model_config.vision_config
+ if vision_model_config.get('merger_layer_index', False):
+ merger_layer_index = vision_model_config['merger_layer_index']
+ down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
+ else:
+ if hasattr(model_config, 'merger_layer_index'):
+ merger_layer_index = model_config.merger_layer_index
+ down_sample_ratio = down_sample_ratio * len(merger_layer_index)**2
+
+ elif 'qwen2_5vl' in self.get_vision_tower().vision_tower_name:
+ model_config = self.get_model().get_vision_tower().vision_tower.config
+ per_patch_size = getattr(model_config, "patch_size", 14)
+
+ images, patch_sizes = self.concat_src_patch_images(images, patch_images, ind_tokens, per_patch_size)
+ image_features = self.get_model().get_vision_tower()(images, patch_sizes)
+ max_patch_sizes = max([patch_size[0] * patch_size[1] for patch_size in patch_sizes])
+ projected_image_features = []
+ # breakpoint()
+ for image_feature, patch_size in zip(image_features, patch_sizes):
+ # import pdb; pdb.set_trace()
+ # breakpoint()
+ image_feature = tuple(feat.to(torch.bfloat16) for feat in image_feature)
+ # image_feature = image_feature.to(torch.bfloat16)
+ patch_size = (patch_size[0] // down_sample_ratio, patch_size[1] // down_sample_ratio)
+
+ if self.config.mm_projector_type == "resampler" and 'siglip2' in self.get_vision_tower().vision_tower_name:
+ projected_image_feature = self.get_model().mm_projector(image_feature, tgt_size=patch_size, max_patch_sizes=max_patch_sizes)
+ else:
+ projected_image_feature = self.get_model().mm_projector(image_feature, tgt_size=patch_size) # 1, n, c
+ projected_image_feature = projected_image_feature[0]
+ projected_image_features.append(projected_image_feature)
+
+ # chunk features
+ projected_image_features = self.partition_list(projected_image_features, num_images)
+ # import pdb; pdb.set_trace()
+ return projected_image_features
+
+ # def encode_images_uhd_v2(self, images, patch_images, ind_tokens):
+ # # start = time.time()
+ # num_images = [len(ind_token) + 1 for ind_token in ind_tokens]
+ # # concat images
+ # images, patch_sizes = self.concat_src_patch_images(images, patch_images, ind_tokens)
+
+ # tgt_sizes = torch.tensor(patch_sizes, dtype=torch.long, device=images[0].device)
+
+ # features_1x = self.get_model().get_vision_tower().forward_uhd_v2(images, tgt_sizes) #list torch.Size([1, 550, 1024])
+
+ # return self.get_model().mm_projector.forward_with_featup(features_1x, patch_sizes, images, num_images)
+
+
+ def encode_multimodals(self, videos_or_images, video_idx_in_batch, split_sizes=None):
+ videos_or_images_features = self.get_model().get_vision_tower()(videos_or_images)
+ per_videos_or_images_features = torch.split(videos_or_images_features, split_sizes, dim=0) # tuple, (dim_1, 576, 4096)
+ all_videos_or_images_features = []
+ all_faster_video_features = []
+ cur_mm_spatial_pool_stride = self.config.mm_spatial_pool_stride
+
+ for idx, feat in enumerate(per_videos_or_images_features):
+
+ feat = self.get_model().mm_projector(feat)
+ faster_video_feature = 0
+ slower_img_feat = 0
+ if idx in video_idx_in_batch and cur_mm_spatial_pool_stride > 1:
+ slower_img_feat = self.get_2dPool(feat,cur_mm_spatial_pool_stride)
+ if self.config.add_faster_video:
+ cur_mm_spatial_pool_stride = cur_mm_spatial_pool_stride * 2
+ faster_video_feature = self.get_2dPool(feat,cur_mm_spatial_pool_stride)
+ if slower_img_feat != 0:
+ all_videos_or_images_features.append(slower_img_feat)
+ else:
+ all_videos_or_images_features.append(feat)
+ all_faster_video_features.append(faster_video_feature)
+ return all_videos_or_images_features,all_faster_video_features
+
+ def add_token_per_grid(self, image_feature):
+ resize_h = int(math.sqrt(image_feature.shape[1]))
+ num_frames = image_feature.shape[0]
+ feature_dim = image_feature.shape[-1]
+
+ image_feature = image_feature.view(num_frames, 1, resize_h, resize_h, -1)
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3) #torch.Size([3584, 224, 14])
+ image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)#torch.Size([3584, 224, 15])
+ if getattr(self.config, "add_faster_video", False):
+ # import pdb; pdb.set_trace()
+ # (3584, 832, 14) -> (3584, 64, 13, 14)
+ image_feature = image_feature.view(feature_dim, num_frames,resize_h, -1)
+ # (3584, 64, 13, 14) -> (64, 13, 14, 3584)
+ image_feature = image_feature.permute(1, 2, 3, 0).contiguous()
+ # (64, 13, 14, 3584) -> (64, 13*14, 3584)
+ image_feature = image_feature.flatten(1, 2)
+ # import pdb; pdb.set_trace()
+ return image_feature
+ # import pdb; pdb.set_trace()
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1) #torch.Size([3360, 3584])
+ return image_feature
+
+ def add_token_per_frame(self, image_feature):
+ image_feature = image_feature.permute(2, 0, 1).contiguous()
+ image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
+ image_feature = image_feature.permute(1, 2, 0).contiguous()
+ return image_feature
+
+
+ def prepare_inputs_labels_for_multimodal(self, input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities=["image"], image_sizes=None, patch_images=None, ind_tokens=None):
+ vision_tower = self.get_vision_tower()
+ # rank_print(modalities)
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
+ return input_ids, position_ids, attention_mask, past_key_values, None, labels
+
+ if isinstance(modalities, str):
+ modalities = [modalities]
+
+ model_mode = getattr(self.config, 'model_mode', 'llava')
+ # import pdb; pdb.set_trace()
+ if model_mode == 'llava' and (type(images) is list or images.ndim == 5):
+ if type(images) is list:
+ images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] #torch.Size([16, 3, 384, 384])
+
+ video_idx_in_batch = []
+ for _ in range(len(modalities)):
+ if modalities[_] == "video":
+ video_idx_in_batch.append(_)
+
+ images_list = []
+ for image in images:
+ if image.ndim == 4:
+ images_list.append(image)
+ else:
+ images_list.append(image.unsqueeze(0))
+
+ concat_images = torch.cat([image for image in images_list], dim=0) #torch.Size([16, 3, 384, 384])
+ split_sizes = [image.shape[0] for image in images_list]
+ encoded_image_features = self.encode_images(concat_images) #video:torch.Size([16, 729, 3584]), muti: torch.Size([4, 729, 3584])
+ # image_features,all_faster_video_features = self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes)
+
+ # This is a list, each element is [num_images, patch * patch, dim]
+ # rank_print(f"Concat images : {concat_images.shape}")
+ encoded_image_features = torch.split(encoded_image_features, split_sizes) #[torch.Size([16, 196, 3584])], muti: [4x torch.Size([1, 729, 3584])]
+ image_features = []
+ for idx, image_feat in enumerate(encoded_image_features):
+ if idx in video_idx_in_batch:
+ image_features.append(self.get_2dPool(image_feat))
+ else:
+ image_features.append(image_feat)
+ # image_features = self.encode_multimodals(concat_images, video_idx_in_batch, split_sizes)
+ # rank_print(f"Encoded image feats : {[x.shape for x in image_features]}")
+ # image_features = torch.split(image_features, split_sizes, dim=0)
+ mm_patch_merge_type = getattr(self.config, "mm_patch_merge_type", "flat")
+ image_aspect_ratio = getattr(self.config, "image_aspect_ratio", "square")
+ mm_newline_position = getattr(self.config, "mm_newline_position", "one_token")
+
+ mm_newline_position = 'grid'
+ if mm_patch_merge_type == "flat":
+ image_features = [x.flatten(0, 1) for x in image_features]
+
+ elif mm_patch_merge_type.startswith("spatial"):
+ new_image_features = []
+ for image_idx, image_feature in enumerate(image_features):
+ # FIXME: now assume the image is square, and split to 2x2 patches
+ # num_patches = h * w, where h = w = sqrt(num_patches)
+ # currently image_feature is a tensor of shape (4, num_patches, hidden_size)
+ # we want to first unflatten it to (2, 2, h, w, hidden_size)
+ # rank0_print("At least we are reaching here")
+ # import pdb; pdb.set_trace()
+ if image_idx in video_idx_in_batch: # video operations
+ # rank0_print("Video")
+ if mm_newline_position == "grid":
+ # Grid-wise
+ image_feature = self.add_token_per_grid(image_feature)
+ if getattr(self.config, "add_faster_video", False):
+ faster_video_feature = self.add_token_per_grid(all_faster_video_features[image_idx])
+ # Add a token for each frame
+ concat_slow_fater_token = []
+ # import pdb; pdb.set_trace()
+ for _ in range(image_feature.shape[0]):
+ if _ % self.config.faster_token_stride == 0:
+ concat_slow_fater_token.append(torch.cat((image_feature[_], self.model.faster_token[None].to(image_feature.device)), dim=0))
+ else:
+ concat_slow_fater_token.append(torch.cat((faster_video_feature[_], self.model.faster_token[None].to(image_feature.device)), dim=0))
+ # import pdb; pdb.set_trace()
+ image_feature = torch.cat(concat_slow_fater_token)
+
+ # print("!!!!!!!!!!!!")
+
+ new_image_features.append(image_feature)
+ elif mm_newline_position == "frame":
+ # Frame-wise
+ image_feature = self.add_token_per_frame(image_feature)
+
+ new_image_features.append(image_feature.flatten(0, 1))
+
+ elif mm_newline_position == "one_token":
+ # one-token
+ image_feature = image_feature.flatten(0, 1)
+ if 'unpad' in mm_patch_merge_type:
+ image_feature = torch.cat((
+ image_feature,
+ self.model.image_newline[None].to(image_feature.device)
+ ), dim=0)
+ new_image_features.append(image_feature)
+ elif mm_newline_position == "no_token":
+ new_image_features.append(image_feature.flatten(0, 1))
+ else:
+ raise ValueError(f"Unexpected mm_newline_position: {mm_newline_position}")
+ elif image_feature.shape[0] > 1: # multi patches and multi images operations
+ # rank0_print("Single-images")
+ base_image_feature = image_feature[0]
+ image_feature = image_feature[1:]
+ height = width = self.get_vision_tower().num_patches_per_side
+ assert height * width == base_image_feature.shape[0]
+
+ if "anyres_max" in image_aspect_ratio:
+ matched_anyres_max_num_patches = re.match(r"anyres_max_(\d+)", image_aspect_ratio)
+ if matched_anyres_max_num_patches:
+ max_num_patches = int(matched_anyres_max_num_patches.group(1))
+
+ if image_aspect_ratio == "anyres" or "anyres_max" in image_aspect_ratio:
+ if hasattr(self.get_vision_tower(), "image_size"):
+ vision_tower_image_size = self.get_vision_tower().image_size
+ else:
+ raise ValueError("vision_tower_image_size is not found in the vision tower.")
+ try:
+ num_patch_width, num_patch_height = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, vision_tower_image_size)
+ except Exception as e:
+ rank0_print(f"Error: {e}")
+ num_patch_width, num_patch_height = 2, 2
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
+ else:
+ image_feature = image_feature.view(2, 2, height, width, -1)
+
+ if "maxpool2x2" in mm_patch_merge_type:
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
+ image_feature = nn.functional.max_pool2d(image_feature, 2)
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
+ elif "unpad" in mm_patch_merge_type and "anyres_max" in image_aspect_ratio and matched_anyres_max_num_patches:
+ unit = image_feature.shape[2]
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
+ c, h, w = image_feature.shape
+ times = math.sqrt(h * w / (max_num_patches * unit**2))
+ if times > 1.1:
+ image_feature = image_feature[None]
+ image_feature = nn.functional.interpolate(image_feature, [int(h // times), int(w // times)], mode="bilinear")[0]
+ image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
+ elif "unpad" in mm_patch_merge_type:
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
+ image_feature = torch.cat((image_feature, self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)), dim=-1)
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
+ else:
+ image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
+ image_feature = image_feature.flatten(0, 3)
+ if "nobase" in mm_patch_merge_type:
+ pass
+ else:
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
+ new_image_features.append(image_feature)
+ else: # single image operations
+ image_feature = image_feature[0]
+ if "unpad" in mm_patch_merge_type:
+ image_feature = torch.cat((image_feature, self.model.image_newline[None]), dim=0)
+
+ new_image_features.append(image_feature)
+ image_features = new_image_features
+ else:
+ raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}")
+ # elif model_mode == 'uhd_v2':
+ # image_features = self.encode_images_uhd_v2(images, patch_images, ind_tokens)
+ elif model_mode == 'uhd_v1':
+ image_features = self.encode_images_uhd_v1(images, patch_images, ind_tokens)
+ else:
+ image_features = self.encode_images(images)
+ # [2x[3xtorch.Size([144, 3584])]]
+ # TODO: image start / end is not implemented here to support pretraining.
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(self.config, "mm_use_im_start_end", False):
+ raise NotImplementedError
+ # rank_print(f"Total images : {len(image_features)}")
+
+ # Let's just add dummy tensors if they do not exist,
+ # it is a headache to deal with None all the time.
+ # But it is not ideal, and if you have a better idea,
+ # please open an issue / submit a PR, thanks.
+ _labels = labels
+ _position_ids = position_ids
+ _attention_mask = attention_mask
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
+ else:
+ attention_mask = attention_mask.bool()
+ if position_ids is None:
+ position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
+ if labels is None:
+ labels = torch.full_like(input_ids, IGNORE_INDEX)
+
+ # remove the padding using attention_mask -- FIXME
+ _input_ids = input_ids
+ input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
+ labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
+
+ new_input_embeds = []
+ new_labels = []
+ cur_image_idx = 0
+ # rank_print("Inserting Images embedding")
+ for batch_idx, cur_input_ids in enumerate(input_ids):
+ num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
+ # rank0_print(num_images)
+ if num_images == 0:
+ cur_image_features = image_features[cur_image_idx]
+ cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
+ if type(cur_image_features) is list:
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0][0:0]], dim=0)
+ else:
+ cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
+ new_input_embeds.append(cur_input_embeds)
+ new_labels.append(labels[batch_idx])
+ cur_image_idx += 1
+ continue
+
+ image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
+ cur_input_ids_noim = []
+ cur_labels = labels[batch_idx]
+ cur_labels_noim = []
+ for i in range(len(image_token_indices) - 1):
+ cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1 : image_token_indices[i + 1]])
+ cur_labels_noim.append(cur_labels[image_token_indices[i] + 1 : image_token_indices[i + 1]])
+ split_sizes = [x.shape[0] for x in cur_labels_noim]
+ cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
+ cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
+ cur_new_input_embeds = []
+ cur_new_labels = []
+
+ for i in range(num_images + 1):
+ cur_new_input_embeds.append(cur_input_embeds_no_im[i])
+ cur_new_labels.append(cur_labels_noim[i])
+ if i < num_images:
+ try:
+ cur_image_features = image_features[cur_image_idx]
+ except IndexError:
+ cur_image_features = image_features[cur_image_idx - 1]
+
+ if model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
+ # slice features need 'for'
+ cur_ind_tokens = ind_tokens[cur_image_idx]
+ cur_image_idx += 1
+ cur_ind_tokens_embeds = self.get_model().embed_tokens(
+ torch.as_tensor(cur_ind_tokens, # \n , -> 13, 1919
+ dtype=torch.long,
+ device=cur_image_features[0].device))
+ else:
+ cur_image_idx += 1
+ cur_ind_tokens_embeds = []
+
+ if len(cur_ind_tokens_embeds) == 0: # 没有切片
+ if model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
+ cur_image_features = cur_image_features[-1]
+ else:
+ # whether not use the permute strategy
+ UsePermute = False
+ if not UsePermute:
+ abs_image_features = cur_image_features[-1]
+ slice_image_features = cur_image_features[:-1]
+ _cur_image_features = []
+ for image_feature_, ind_token_embeds_ in zip(slice_image_features, cur_ind_tokens_embeds):
+ _cur_image_features.append(torch.cat([image_feature_, ind_token_embeds_[None]], dim=0))
+ _cur_image_features.append(abs_image_features)
+ cur_image_features = torch.cat(_cur_image_features, dim=0)
+ elif model_mode == 'uhd_v1' or model_mode == 'uhd_v2':
+ # import pdb;pdb.set_trace()
+ abs_image_features = cur_image_features[-1]
+ slice_image_features = cur_image_features[:-1] # list
+
+ slice_image_features_with_batch = [slice_feat.unsqueeze(0) for slice_feat in slice_image_features]
+
+ slice_image_features_with_batch = torch.cat(slice_image_features_with_batch, dim=0)
+ slice_number, grid , channels = slice_image_features_with_batch.shape
+ edge = int(grid ** 0.5)
+
+ # slice_number_check = len(cur_ind_tokens)
+ assert slice_number == len(cur_ind_tokens), "slice_number != len(cur_ind_tokens)"
+
+ slice_in_row = 0
+ for i in range(slice_number):
+ if cur_ind_tokens[i] == 29892:
+ slice_in_row += 1
+ elif cur_ind_tokens[i] == 13:
+ slice_in_row += 1
+ break
+ else:
+ raise ValueError(f"Unexpected ind_token: {cur_ind_tokens[i]}")
+ assert slice_in_row >= 1, "no slices at all!"
+ slice_in_column = slice_number // slice_in_row
+ h_w_ratio = (slice_in_column*1.0) / slice_in_row
+ if h_w_ratio > 1:
+ ori_patch_size = (edge, int(edge/h_w_ratio))
+ else:
+ ori_patch_size = (int(edge*h_w_ratio), edge)
+ # import pdb;pdb.set_trace()
+ # 144, 4096
+ abs_image_features= abs_image_features.reshape(edge, edge, channels).permute(2, 0, 1).unsqueeze(0)
+ # abs_image_features = F.interpolate(abs_image_features, size=ori_patch_size, mode='bilinear', align_corners=False)
+ abs_image_features = abs_image_features.squeeze(0).permute(1, 2, 0).reshape(-1, channels)
+
+ # slice_in_row: how many slices in a row
+ # slice_in_column: how many slices in a column
+ # slice_number: how many slices in total
+ comma_notation = cur_ind_tokens_embeds[0] # what does a comma say in embed
+ enter_notation = cur_ind_tokens_embeds[slice_in_row-1] # what does a enter say in embed
+
+ slice_stack = slice_image_features_with_batch.reshape(slice_in_column, slice_in_row, edge, edge, channels)
+ slice_stack = slice_stack.permute(0, 2, 1, 3, 4).reshape(slice_in_column * edge, slice_in_row * edge, channels)
+ # import pdb;pdb.set_trace()
+ enter_notation = enter_notation.unsqueeze(0).unsqueeze(0).expand(slice_in_column * edge, -1, -1)
+ slice_stack = torch.cat([slice_stack, enter_notation], dim=1)
+ slice_stack = slice_stack.reshape(-1, channels)
+
+
+ cur_image_features = torch.cat([slice_stack, comma_notation[None], abs_image_features], dim=0)
+
+ cur_new_input_embeds.append(cur_image_features)
+ cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
+
+ cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds]
+
+ # import pdb; pdb.set_trace()
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds)
+ cur_new_labels = torch.cat(cur_new_labels)
+
+ new_input_embeds.append(cur_new_input_embeds)
+ new_labels.append(cur_new_labels)
+
+ # Truncate sequences to max length as image embeddings can make the sequence longer
+ tokenizer_model_max_length = getattr(self.config, "tokenizer_model_max_length", None)
+ # rank_print("Finishing Inserting")
+
+ new_input_embeds = [x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
+ new_labels = [x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
+ # TODO: Hard code for control loss spike
+ # if tokenizer_model_max_length is not None:
+ # new_input_embeds = [x[:4096] if modality != "video" else x[:tokenizer_model_max_length] for x, modality in zip(new_input_embeds, modalities)]
+ # new_labels = [x[:4096] if modality != "video" else x[:tokenizer_model_max_length] for x, modality in zip(new_labels, modalities)]
+
+ # Combine them
+ max_len = max(x.shape[0] for x in new_input_embeds)
+ batch_size = len(new_input_embeds)
+
+ new_input_embeds_padded = []
+ new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
+ attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
+ position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
+ # rank0_print("Prepare pos id")
+
+ for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
+ cur_len = cur_new_embed.shape[0]
+ if getattr(self.config, "tokenizer_padding_side", "right") == "left":
+ new_input_embeds_padded.append(torch.cat((torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), cur_new_embed), dim=0))
+ if cur_len > 0:
+ new_labels_padded[i, -cur_len:] = cur_new_labels
+ attention_mask[i, -cur_len:] = True
+ position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
+ else:
+ new_input_embeds_padded.append(torch.cat((cur_new_embed, torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0))
+ if cur_len > 0:
+ new_labels_padded[i, :cur_len] = cur_new_labels
+ attention_mask[i, :cur_len] = True
+ position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
+
+ new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
+ # rank0_print("tokenizer padding")
+
+ if _labels is None:
+ new_labels = None
+ else:
+ new_labels = new_labels_padded
+
+ if _attention_mask is None:
+ attention_mask = None
+ else:
+ attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
+
+ if _position_ids is None:
+ position_ids = None
+ if getattr(self.config, "use_pos_skipping", False) and self.training:
+ position_ids = torch.arange(new_input_embeds.size(1), device=new_input_embeds.device).unsqueeze(0).to(new_input_embeds.device)
+ split_position = random.randint(0, new_input_embeds.size(1))
+ left_add = random.randint(0, self.config.pos_skipping_range)
+ right_add = random.randint(left_add, self.config.pos_skipping_range)
+ position_ids[:, :split_position] += left_add
+ position_ids[:, split_position:] += right_add
+ # import pdb; pdb.set_trace()
+ # rank0_print("Finish preparing")
+ return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
+
+ def initialize_vision_tokenizer(self, model_args, tokenizer):
+ if model_args.mm_use_im_patch_token:
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
+ self.resize_token_embeddings(len(tokenizer))
+
+ if model_args.mm_use_im_start_end:
+ num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
+ self.resize_token_embeddings(len(tokenizer))
+
+ if num_new_tokens > 0:
+ input_embeddings = self.get_input_embeddings().weight.data
+ output_embeddings = self.get_output_embeddings().weight.data
+
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
+
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
+
+ if model_args.tune_mm_mlp_adapter:
+ for p in self.get_input_embeddings().parameters():
+ p.requires_grad = True
+ for p in self.get_output_embeddings().parameters():
+ p.requires_grad = False
+
+ if model_args.pretrain_mm_mlp_adapter:
+ mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location="cpu")
+ embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
+ assert num_new_tokens == 2
+ if input_embeddings.shape == embed_tokens_weight.shape:
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
+ else:
+ raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
+ elif model_args.mm_use_im_patch_token:
+ if model_args.tune_mm_mlp_adapter:
+ for p in self.get_input_embeddings().parameters():
+ p.requires_grad = False
+ for p in self.get_output_embeddings().parameters():
+ p.requires_grad = False
diff --git a/VLMEvalKit-sudoku/llava/model/make_delta.py b/VLMEvalKit-sudoku/llava/model/make_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b3fbabe19506d5710dbc194db4000fee62c712d
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/make_delta.py
@@ -0,0 +1,52 @@
+"""
+Usage:
+python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
+"""
+
+import argparse
+
+import torch
+from tqdm import tqdm
+from transformers import AutoTokenizer, AutoModelForCausalLM
+from llava.model.utils import auto_upgrade
+
+
+def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
+ print("Loading base model")
+ base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+
+ print("Loading target model")
+ auto_upgrade(target_model_path)
+ target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
+
+ print("Calculating delta")
+ for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
+ if name not in base.state_dict():
+ assert name in ["model.mm_projector.weight", "model.mm_projector.bias"], f"{name} not in base model"
+ continue
+ if param.data.shape == base.state_dict()[name].shape:
+ param.data -= base.state_dict()[name]
+ else:
+ assert name in ["model.embed_tokens.weight", "lm_head.weight"], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
+ bparam = base.state_dict()[name]
+ param.data[: bparam.shape[0], : bparam.shape[1]] -= bparam
+
+ print("Saving delta")
+ if hub_repo_id:
+ kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
+ else:
+ kwargs = {}
+ target.save_pretrained(delta_path, **kwargs)
+ target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
+ target_tokenizer.save_pretrained(delta_path, **kwargs)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+ parser.add_argument("--hub-repo-id", type=str, default=None)
+ args = parser.parse_args()
+
+ make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9899cf7596fd43f9a447991c15ea6a463451e04
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_qwen2_5vl.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_qwen2_5vl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6f02362b5e0ccb8328da3679c14980d979a644f
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_qwen2_5vl.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d5a330ef3d7fc143a0fe86463d907f82d8a4398
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18d2e1c73ef47b4b721733d045e741e763af9e65
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/adapt_clip_vision_model.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/adapt_clip_vision_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..3aea627bd35cdb210383f6fc3f1d3f2f93cbacdb
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/adapt_clip_vision_model.py
@@ -0,0 +1,236 @@
+import torch
+import torch.nn as nn
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
+from transformers.models.clip.modeling_clip import CLIPVisionTransformer, CLIPEncoder, CLIPVisionEmbeddings, CLIPConfig, BaseModelOutput
+from transformers.activations import ACT2FN
+from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
+from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from transformers.modeling_utils import PreTrainedModel
+from transformers.configuration_utils import PretrainedConfig
+from transformers.utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ logging,
+ replace_return_docstrings,
+)
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+CLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+class AdaptCLIPVisionEmbeddings(CLIPVisionEmbeddings):
+ def __init__(self, config: CLIPVisionConfig):
+ super().__init__(config)
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ def resize_pos_embedding(self, position_embedding, dst_size=(24, 24), square_size=24):
+ _dtype = position_embedding.dtype
+ patch_height, patch_width = dst_size
+ class_position_embedding = position_embedding[:, :1] # 1, 1, c
+ patch_position_embedding = position_embedding[:, 1:] # 1, 576, c
+
+ patch_position_embedding = patch_position_embedding.permute(0, 2, 1).unflatten(-1, [square_size, square_size])
+ patch_position_embedding = torch.nn.functional.interpolate(
+ patch_position_embedding, size=(patch_height, patch_width), mode='bicubic'
+ ).to(dtype=_dtype) # 1, c, ph, pw
+ patch_position_embedding = patch_position_embedding.flatten(-2).permute(0, 2, 1) # 1, n, c
+ position_embedding = torch.cat([class_position_embedding, patch_position_embedding], dim=1) # 1, n+1, c
+ return position_embedding
+
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ batch_size = pixel_values.shape[0]
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] torch.Size([1024, 19, 29])
+
+ # add
+ patch_height, patch_width = patch_embeds.shape[-2:]
+
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1) #torch.Size([3, 1, 1024])
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+
+ # embeddings = embeddings + self.position_embedding(self.position_ids)
+
+ # add
+ square_size = self.config.image_size // self.config.patch_size
+ if patch_height == square_size and patch_width == square_size:
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+ else:
+ position_embedding = self.position_embedding(self.position_ids)
+ position_embedding = self.resize_pos_embedding(position_embedding, dst_size=(patch_height, patch_width), square_size=square_size)
+ embeddings = embeddings + position_embedding
+ return embeddings
+
+class AdaptCLIPVisionTransformer(CLIPVisionTransformer):
+ def __init__(self, config: CLIPVisionConfig):
+ super().__init__(config)
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = AdaptCLIPVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = CLIPEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[list] = None,
+ tgt_sizes: Optional[torch.IntTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ batch_size = len(pixel_values)
+
+ # add
+ max_patches = max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
+
+ hidden_states = []
+ for i in range(batch_size):
+ hidden_state = self.embeddings(pixel_values=pixel_values[i].unsqueeze(0)) #torch.Size([1, 552, 1024])
+ hidden_state = self.pre_layrnorm(hidden_state)
+ padding_size = max_patches + 1 - hidden_state.shape[1]
+ padding = torch.zeros((1, padding_size, hidden_state.shape[2]), dtype=hidden_state.dtype, device=hidden_state.device) #torch.Size([1, 25, 1024])
+ state = torch.cat([hidden_state, padding], dim=1)
+ hidden_states.append(state)
+
+ hidden_states = torch.cat(hidden_states, dim=0)
+
+ patch_attention_mask = torch.zeros((batch_size, 1, max_patches + 1), dtype=torch.bool, device=hidden_states.device)#torch.Size([10, 577])
+ for i in range(batch_size):
+ patch_attention_mask[i, 0, :tgt_sizes[i][0] * tgt_sizes[i][1] + 1] = True
+
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
+ if not torch.any(~patch_attention_mask):
+ attention_mask=None
+ else:
+ attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) #torch.Size([10, 1, 577, 577])
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ #FIXME the pooled_output here is incorrect for post_layernorm on padded features
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+class AdaptCLIPVisionModel(CLIPVisionModel):
+ def __init__(self, config: CLIPVisionConfig):
+ super().__init__(config)
+ self.vision_model = AdaptCLIPVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[list] = None,
+ tgt_sizes: Optional[torch.IntTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, CLIPVisionModel
+
+ >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ return self.vision_model(
+ pixel_values=pixel_values,
+ tgt_sizes=tgt_sizes,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz
new file mode 100644
index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
+size 1356917
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..a156624bad999775be6dc2741be648d3d2c15c67
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py
@@ -0,0 +1,240 @@
+""" huggingface model adapter
+
+Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
+"""
+
+import re
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+from torch import TensorType
+
+try:
+ import transformers
+ from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
+except ImportError as e:
+ transformers = None
+
+ class BaseModelOutput:
+ pass
+
+ class PretrainedConfig:
+ pass
+
+
+from .hf_configs import arch_dict
+
+
+# utils
+def _camel2snake(s):
+ return re.sub(r"(? TensorType:
+ # image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
+ # attn_mask = (x != self.config.pad_token_id).long()
+ # out = self.transformer(
+ # input_ids=x,
+ # attention_mask=attn_mask,
+ # encoder_hidden_states = image_embeds,
+ # encoder_attention_mask = image_atts,
+ # )
+ # pooled_out = self.pooler(out, attn_mask)
+
+ # return self.itm_proj(pooled_out)
+
+ def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
+ if masked_indices is None:
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+
+ masked_indices[input_ids == self.tokenizer.pad_token_id] = False
+ masked_indices[input_ids == self.tokenizer.cls_token_id] = False
+
+ if targets is not None:
+ targets[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
+ input_ids[indices_replaced] = self.tokenizer.mask_token_id
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
+ input_ids[indices_random] = random_words[indices_random]
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+
+ if targets is not None:
+ return input_ids, targets
+ else:
+ return input_ids
+
+ def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
+ labels = input_ids.clone()
+ attn_mask = (input_ids != self.config.pad_token_id).long()
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(input_ids.device)
+ vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
+ probability_matrix = torch.full(labels.shape, mlm_probability)
+ input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels, probability_matrix=probability_matrix)
+ mlm_output = self.transformer(
+ input_ids,
+ attention_mask=attn_mask,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_atts,
+ return_dict=True,
+ labels=labels,
+ )
+ return mlm_output.loss
+ # mlm_output = self.transformer(input_ids,
+ # attention_mask = attn_mask,
+ # encoder_hidden_states = image_embeds,
+ # encoder_attention_mask = image_atts,
+ # return_dict = True,
+ # ).last_hidden_state
+ # logits = self.mlm_proj(mlm_output)
+
+ # # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
+ # logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
+ # labels = labels[:, 1:].contiguous().view(-1)
+
+ # mlm_loss = F.cross_entropy(
+ # logits,
+ # labels,
+ # # label_smoothing=0.1,
+ # )
+ # return mlm_loss
+
+ def forward(self, x: TensorType) -> TensorType:
+ attn_mask = (x != self.config.pad_token_id).long()
+ out = self.transformer(input_ids=x, attention_mask=attn_mask)
+ pooled_out = self.pooler(out, attn_mask)
+
+ return self.proj(pooled_out)
+
+ def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
+ if not unlocked_layers: # full freezing
+ for n, p in self.transformer.named_parameters():
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
+ return
+
+ encoder = self.transformer.encoder if hasattr(self.transformer, "encoder") else self.transformer
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
+ print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
+ embeddings = getattr(self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
+ modules = [embeddings, *layer_list][:-unlocked_layers]
+ # freeze layers
+ for module in modules:
+ for n, p in module.named_parameters():
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.transformer.gradient_checkpointing_enable()
+
+ def get_num_layers(self):
+ encoder = self.transformer.encoder if hasattr(self.transformer, "encoder") else self.transformer
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
+ return len(layer_list)
+
+ def init_parameters(self):
+ pass
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json
new file mode 100644
index 0000000000000000000000000000000000000000..689492a25d365436fd85ed432e6fb7295ca1c7bd
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json
@@ -0,0 +1,27 @@
+{
+ "embed_dim": 1280,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 32,
+ "width": 4096,
+ "head_width": 128,
+ "mlp_ratio": 5,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-8b-14-x",
+ "drop_path_rate": 0,
+ "qkv_bias": false,
+ "xattn": true,
+ "postnorm": false,
+ "fusedLN": false,
+ "use_rms_norm": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 1280,
+ "heads": 20,
+ "layers": 32,
+ "xattn": false,
+ "fusedLN": false
+ }
+}
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f753e69bc8e24b607b0fa1378ebe236b3d47c27
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py
@@ -0,0 +1,205 @@
+""" CLIP tokenizer
+
+Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
+"""
+
+import gzip
+import html
+import os
+from functools import lru_cache
+from typing import Union, List
+
+import ftfy
+import regex as re
+import torch
+
+# https://stackoverflow.com/q/62691279
+import os
+
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+@lru_cache()
+def default_bpe():
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
+ The reversible bpe codes work on unicode strings.
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
+ """
+ bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """Return set of symbol pairs in a word.
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def basic_clean(text):
+ text = ftfy.fix_text(text)
+ text = html.unescape(html.unescape(text))
+ return text.strip()
+
+
+def whitespace_clean(text):
+ text = re.sub(r"\s+", " ", text)
+ text = text.strip()
+ return text
+
+
+class SimpleTokenizer(object):
+ def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
+ merges = merges[1 : 49152 - 256 - 2 + 1]
+ merges = [tuple(merge.split()) for merge in merges]
+ vocab = list(bytes_to_unicode().values())
+ vocab = vocab + [v + "" for v in vocab]
+ for merge in merges:
+ vocab.append("".join(merge))
+ if not special_tokens:
+ special_tokens = ["", ""]
+ else:
+ special_tokens = ["", ""] + special_tokens
+ vocab.extend(special_tokens)
+ self.encoder = dict(zip(vocab, range(len(vocab))))
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {t: t for t in special_tokens}
+ special = "|".join(special_tokens)
+ self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
+
+ self.vocab_size = len(self.encoder)
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ new_word.extend(word[i:j])
+ i = j
+ except:
+ new_word.extend(word[i:])
+ break
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def encode(self, text):
+ bpe_tokens = []
+ text = whitespace_clean(basic_clean(text)).lower()
+ for token in re.findall(self.pat, text):
+ token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def decode(self, tokens):
+ text = "".join([self.decoder[token] for token in tokens])
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors="replace").replace("", " ")
+ return text
+
+
+_tokenizer = SimpleTokenizer()
+
+
+def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
+ """
+ Returns the tokenized representation of given input string(s)
+
+ Parameters
+ ----------
+ texts : Union[str, List[str]]
+ An input string or a list of input strings to tokenize
+ context_length : int
+ The context length to use; all CLIP models use 77 as the context length
+
+ Returns
+ -------
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
+ """
+ if isinstance(texts, str):
+ texts = [texts]
+
+ sot_token = _tokenizer.encoder[""]
+ eot_token = _tokenizer.encoder[""]
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
+
+ for i, tokens in enumerate(all_tokens):
+ if len(tokens) > context_length:
+ tokens = tokens[:context_length] # Truncate
+ tokens[-1] = eot_token
+ result[i, : len(tokens)] = torch.tensor(tokens)
+
+ return result
+
+
+class HFTokenizer:
+ "HuggingFace tokenizer wrapper"
+
+ def __init__(self, tokenizer_name: str):
+ from transformers import AutoTokenizer
+
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
+
+ def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:
+ # same cleaning as for default tokenizer, except lowercasing
+ # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
+ if isinstance(texts, str):
+ texts = [texts]
+ texts = [whitespace_clean(basic_clean(text)) for text in texts]
+ input_ids = self.tokenizer(texts, return_tensors="pt", max_length=context_length, padding="max_length", truncation=True).input_ids
+ return input_ids
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ee1273155ddf1c32f1ac4085f8141582d9259cd
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py
@@ -0,0 +1,72 @@
+"""
+# Adapted from https://github.com/baaivision/EVA/tree/master/EVA-CLIP
+"""
+
+from torchvision import transforms
+from torchvision.transforms.functional import InterpolationMode
+from transformers.image_processing_utils import BatchFeature
+from PIL import Image
+from transformers.image_transforms import convert_to_rgb
+
+
+class BaseProcessor:
+ def __init__(self):
+ self.transform = lambda x: x
+ return
+
+ def __call__(self, item):
+ return self.transform(item)
+
+
+class EvaClipImageBaseProcessor(BaseProcessor):
+ def __init__(self, mean=None, std=None):
+ self.mean = (0.48145466, 0.4578275, 0.40821073) if mean is None else mean
+ self.std = (0.26862954, 0.26130258, 0.27577711) if std is None else std
+
+ self.normalize = transforms.Normalize(self.mean, self.std)
+
+ @property
+ def image_mean(self):
+ return self.mean
+
+
+class EvaClipImageTrainProcessor(EvaClipImageBaseProcessor):
+ def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0):
+ super().__init__(mean=mean, std=std)
+
+ self.transform = transforms.Compose(
+ [
+ convert_to_rgb,
+ transforms.Resize(
+ image_size,
+ interpolation=InterpolationMode.BICUBIC,
+ ),
+ transforms.CenterCrop(image_size),
+ transforms.ToTensor(),
+ self.normalize,
+ ]
+ )
+
+ self.image_size = image_size
+
+ def preprocess(self, images, return_tensors):
+ if isinstance(images, Image.Image):
+ images = [images]
+ else:
+ assert isinstance(images, list)
+
+ transformed_images = [self.transform(image).numpy() for image in images]
+ data = {"pixel_values": transformed_images}
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ def __call__(self, item):
+ return self.transform(item)
+
+ @property
+ def crop_size(self):
+ return {"height": self.image_size, "width": self.image_size}
+
+ @property
+ def size(self):
+ return {"shortest_edge": self.image_size}
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
new file mode 100644
index 0000000000000000000000000000000000000000..3e1d124e1118911c5ad7b1ce85df195aca363ac4
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
@@ -0,0 +1,29 @@
+{
+ "embed_dim": 768,
+ "vision_cfg": {
+ "image_size": 336,
+ "layers": 24,
+ "width": 1024,
+ "drop_path_rate": 0,
+ "head_width": 64,
+ "mlp_ratio": 2.6667,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-l-14-336",
+ "xattn": true,
+ "fusedLN": true,
+ "rope": true,
+ "pt_hw_seq_len": 16,
+ "intp_freq": true,
+ "naiveswiglu": true,
+ "subln": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 768,
+ "heads": 12,
+ "layers": 12,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/imagebind.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/imagebind.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bbe71c7b42e25ff7e5a8912b403498002a26348
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/imagebind.py
@@ -0,0 +1,73 @@
+import torch
+import torch.nn as nn
+
+from transformers import CLIPImageProcessor
+
+try:
+ from imagebind.models import imagebind_model
+ from imagebind.models.imagebind_model import ModalityType
+ from imagebind.data import load_and_transform_audio_data
+except ImportError:
+ pass
+
+
+class ImageBindWrapper(nn.Module):
+ def __init__(self, vision_tower, select_layer, select_feature="patch", delay_load=False):
+ super().__init__()
+
+ self.is_loaded = False
+
+ self.vision_tower_name = vision_tower
+ self.select_layer = select_layer
+ self.select_feature = select_feature
+
+ if not delay_load:
+ self.load_model()
+
+ def load_model(self):
+ self.image_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
+ self.vision_tower = imagebind_model.imagebind_huge(pretrained=True)
+ for p in self.vision_tower.parameters():
+ p.requires_grad = False
+ self.vision_tower.eval()
+ self.is_loaded = True
+
+ def train(self, mode=True):
+ self.training = mode
+
+ if self.is_loaded:
+ self.vision_tower.eval()
+
+ @torch.no_grad()
+ def forward(self, x):
+ if type(x) == dict:
+ if x["audios"] is not None:
+ inputs = {ModalityType.AUDIO: load_and_transform_audio_data(x["audios"], device=self.device).half()}
+ embeddings = self.vision_tower(inputs)
+ audio_embedding = embeddings[ModalityType.AUDIO]
+ return audio_embedding.unsqueeze(1)
+ else:
+ inputs = {ModalityType.VISION: x.to(dtype=self.dtype)}
+ embeddings = self.vision_tower(inputs)
+ vision_embedding = embeddings[ModalityType.VISION]
+ if vision_embedding.ndim == 2:
+ return vision_embedding.unsqueeze(1)
+ if vision_embedding.shape[1] == 257:
+ return vision_embedding[:, 1:]
+ raise ValueError(f"Unexpected shape: {vision_embedding.shape}")
+
+ @property
+ def dummy_feature(self):
+ return torch.zeros(1, 1024, device=self.device, dtype=self.dtype)
+
+ @property
+ def dtype(self):
+ return self.vision_tower.modality_preprocessors.vision.cls_token.dtype
+
+ @property
+ def device(self):
+ return self.vision_tower.modality_preprocessors.vision.cls_token.device
+
+ @property
+ def hidden_size(self):
+ return 1024
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_encoder/open_clip_encoder.py b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/open_clip_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..17a3277f99d1a36e443217d0ace0fb17bf5997cf
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_encoder/open_clip_encoder.py
@@ -0,0 +1,163 @@
+import torch
+import torch.nn as nn
+from transformers import CLIPImageProcessor
+from llava.utils import rank0_print
+
+try:
+ import open_clip
+ import torchvision
+ from open_clip.transformer import _expand_token
+except ImportError:
+ print("OpenCLIP not installed")
+ open_clip = None
+
+HIDDEN_SIZE_DICT = {
+ "ViT-H-14-378-quickgelu": 1280,
+}
+
+
+class OpenCLIPVisionTower(nn.Module):
+ def __init__(self, vision_tower, args, delay_load=False):
+ super().__init__()
+
+ self.is_loaded = False
+ self.model_name = vision_tower.replace("open_clip_hub:", "")
+ self.pretrained = args.vision_tower_pretrained
+ self.select_layer = args.mm_vision_select_layer
+ self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
+
+ if not delay_load:
+ rank0_print(f"Loading vision tower: {vision_tower}")
+ self.load_model()
+ elif getattr(args, "unfreeze_mm_vision_tower", False):
+ # TODO: better detector is needed.
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
+ self.load_model()
+ elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts:
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
+ self.load_model()
+
+ def load_model(self, device_map="auto"):
+ rank0_print(f"Loading OpenCLIP model: {self.model_name}")
+ rank0_print(f"Pretrained: {self.pretrained}")
+ vision_tower, _, image_processor = open_clip.create_model_and_transforms(model_name=self.model_name, pretrained=self.pretrained, precision="fp32", device="cuda")
+
+ resize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Resize)][0]
+ normalize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Normalize)][0]
+ self.resize_transform_size = resize_transform.size # 224 or 384
+ self.patch_size = vision_tower.visual.conv1.kernel_size[0] # 14 or 16
+
+ self.image_processor = CLIPImageProcessor.from_pretrained(
+ "openai/clip-vit-large-patch14",
+ crop_size=resize_transform.size,
+ size={"shortest_edge": resize_transform.size},
+ image_mean=list(normalize_transform.mean),
+ image_std=list(normalize_transform.std),
+ )
+ rank0_print(f"Loaded image processor: {self.image_processor}")
+ self.vision_tower = vision_tower.visual
+ self.vision_tower.requires_grad_(False)
+
+ self.is_loaded = True
+
+ def feature_select(self, image_forward_outs):
+ image_features = image_forward_outs[self.select_layer]
+ if self.select_feature == "patch":
+ image_features = image_features[:, 1:]
+ elif self.select_feature == "cls_patch":
+ image_features = image_features
+ elif self.select_feature == "conv_flatten":
+ image_features = image_features.flatten(2).transpose(1, 2)
+ else:
+ raise ValueError(f"Unexpected select feature: {self.select_feature}")
+ return image_features
+
+ def forward_visual(self, x, output_hidden_states=False):
+ if hasattr(self.vision_tower, "trunk") and hasattr(self.vision_tower.trunk, "_intermediate_layers"):
+ return self.vision_tower.trunk._intermediate_layers(x, abs(self.select_layer))
+ else:
+
+ def forward_openclip(self, x: torch.Tensor):
+ features = []
+ x = self.conv1(x) # shape = [*, width, grid, grid]
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
+
+ # class embeddings and positional embeddings
+ x = torch.cat(
+ [_expand_token(self.class_embedding, x.shape[0]).to(x.dtype), x],
+ dim=1,
+ )
+ # shape = [*, grid ** 2 + 1, width]
+ x = x + self.positional_embedding.to(x.dtype)
+
+ x = self.patch_dropout(x)
+ x = self.ln_pre(x)
+
+ x = x.permute(1, 0, 2) # NLD -> LND
+ for r in self.transformer.resblocks:
+ x = r(x, attn_mask=None)
+ features.append(x)
+ return features
+
+ return forward_openclip(self.vision_tower, x)
+
+ def forward(self, images):
+ if type(images) is list:
+ image_features = []
+ for image in images:
+ image_forward_out = self.forward_visual(image.to(self.dtype).unsqueeze(0), output_hidden_states=True)
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
+ image_features.append(image_feature)
+ else:
+ image_forward_outs = self.forward_visual(images.to(self.dtype), output_hidden_states=True)
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
+
+ return image_features
+
+ @property
+ def dummy_feature(self):
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
+
+ @property
+ def dtype(self):
+ if hasattr(self.vision_tower, "conv1"):
+ return self.vision_tower.conv1.weight.dtype
+ if hasattr(self.vision_tower, "trunk"):
+ return self.vision_tower.trunk.patch_embed.proj.weight.dtype
+ raise NotImplementedError
+
+ @property
+ def device(self):
+ if hasattr(self.vision_tower, "conv1"):
+ return self.vision_tower.conv1.weight.device
+ if hasattr(self.vision_tower, "trunk"):
+ return self.vision_tower.trunk.patch_embed.proj.weight.device
+ raise NotImplementedError
+
+ @property
+ def config(self):
+ return None
+
+ @property
+ def hidden_size(self):
+ if self.model_name in HIDDEN_SIZE_DICT:
+ return HIDDEN_SIZE_DICT[self.model_name]
+ else:
+ raise NotImplementedError
+
+ @property
+ def num_patches(self):
+ image_size = self.resize_transform_size if isinstance(self.resize_transform_size, int) else self.resize_transform_size[0]
+ _num_patches = (image_size // self.patch_size) ** 2
+ if "cls_patch" in self.select_feature:
+ _num_patches += 1
+ return _num_patches
+
+ @property
+ def image_size(self):
+ return self.resize_transform_size
+
+ @property
+ def num_patches_per_side(self):
+ return self.resize_transform_size // self.patch_size
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/adapt_spatial_resampler.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/adapt_spatial_resampler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4aba6fa10c7fd721aa41a884102475dbdaf29f3e
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/adapt_spatial_resampler.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/llava_mlp.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/llava_mlp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8573eb05f5d089693ea35e186ed80c079feac9f1
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/llava_mlp.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/mlp.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/mlp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f039be6e8d58fd35f7c8b1c0791b19035bf7f77f
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/mlp.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/percive_sampler.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/percive_sampler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f5ac136a915cdcd6fc81dd8de581722657bc8a4
Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/percive_sampler.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/adapt_spatial_resampler.py b/VLMEvalKit-sudoku/llava/model/multimodal_projector/adapt_spatial_resampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..31733eb500ffb8734ebbc1b06c91286cfb9a785b
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_projector/adapt_spatial_resampler.py
@@ -0,0 +1,515 @@
+# Copyright (c) Alibaba Cloud.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from collections import OrderedDict
+import math
+import requests
+from io import BytesIO
+from functools import partial
+from PIL import Image
+from typing import Callable, Optional, Sequence, Tuple, List, Union
+import numpy as np
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.init import trunc_normal_
+from torchvision import transforms
+from torchvision.transforms import InterpolationMode
+
+from llava.slice_process import slice_image_feature_minicpm
+import torchvision.ops.roi_align as RoIAlign
+from einops import rearrange
+import time
+# from llava.model.multimodal_encoder.hubconf import featup
+
+def get_abs_pos(abs_pos, tgt_size):
+ # abs_pos: L, C
+ # tgt_size: (H, W)
+ # return: M, C
+ src_size = int(math.sqrt(abs_pos.size(0)))
+ dtype = abs_pos.dtype
+
+ return F.interpolate(
+ abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
+ size=(tgt_size[0], tgt_size[1]),
+ mode="bicubic",
+ align_corners=False,
+ ).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
+
+
+# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
+def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
+ """
+ grid_size: int of the grid height and width
+ return:
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
+ """
+ grid_h = np.arange(grid_size, dtype=np.float32)
+ grid_w = np.arange(grid_size, dtype=np.float32)
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
+ grid = np.stack(grid, axis=0)
+
+ grid = grid.reshape([2, 1, grid_size, grid_size])
+
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
+ if cls_token:
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
+ return pos_embed
+
+
+def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
+ assert embed_dim % 2 == 0
+
+ # use half of dimensions to encode grid_h
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
+
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
+ return emb
+
+
+def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (M,)
+ out: (M, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000 ** omega # (D/2,)
+
+ pos = pos.reshape(-1) # (M,)
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
+
+ emb_sin = np.sin(out) # (M, D/2)
+ emb_cos = np.cos(out) # (M, D/2)
+
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
+ return emb
+
+
+class AdaptSpatialResampler(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ (grid_size**2) learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (grid_size**2, embed_dim)
+ """
+
+ def __init__(
+ self,
+ config,
+ grid_size,
+ embed_dim,
+ num_heads,
+ kv_dim=None,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6)
+ ):
+ super().__init__()
+ self.config = config
+ self.grid_size = grid_size
+ self.num_queries = grid_size ** 2
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.mm_hidden_size = self.config.mm_hidden_size
+ self.feature_scale_mask = getattr(self.config, 'feature_scale_mask', 7)
+ vision_tower = getattr(self.config, 'mm_vision_tower', '')
+ self.vision_tower_name = 'clip-large'
+ if 'clip' in vision_tower:
+ self.vision_tower_name = 'clip-large'
+ elif 'siglip' in vision_tower:
+ self.vision_tower_name = 'siglip'
+
+ self.pos_embed = nn.Parameter(
+ torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
+ ).requires_grad_(False)
+
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query, std=.02)
+
+ if kv_dim is not None and kv_dim != embed_dim:
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
+ else:
+ self.kv_proj = nn.Identity()
+
+ if self.feature_scale_mask & 8:
+ self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='8x')
+ elif self.feature_scale_mask & 4:
+ self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='4x')
+ elif self.feature_scale_mask & 2:
+ self.upsampler = featup(self.vision_tower_name, pretrained=False, use_norm=True, scale='2x')
+
+ # four learnable expert embeddings
+ self.feature_1x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
+ self.feature_2x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
+ self.feature_4x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
+ self.feature_8x_embedding = nn.Parameter(torch.zeros(1,1, self.embed_dim))
+
+ # It is a 144 diverse embedding, not
+ self.query_1 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query_1, std=.02)
+
+ self.query_2 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query_2, std=.02)
+
+ self.query_3 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query_3, std=.02)
+
+ self.query_4 = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query_4, std=.02)
+
+ self.features_1x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
+ self.features_2x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
+ self.features_4x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
+ self.features_8x_projector = nn.Linear(in_features=self.mm_hidden_size, out_features=self.embed_dim)
+
+ self.attn = nn.MultiheadAttention(embed_dim, num_heads)
+ self.ln_q = norm_layer(embed_dim)
+ self.ln_kv = norm_layer(embed_dim)
+ self.ln_proj = norm_layer(embed_dim)
+ self.ln_post = norm_layer(embed_dim)
+ self.cat_proj = nn.Linear(4*embed_dim, embed_dim)
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
+
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def cal_best_pooling_size(self, feature_wh_ratio=1.0):
+ candidate_pooling_sizes = [
+ (4, 2), (3, 2), (4, 3), (3, 3),
+ (2, 4), (2, 3), (3, 4)
+ ] # w, h
+ log_feature_wh_ratio = math.log(feature_wh_ratio)
+ best_pooling_size = (3, 3) # w, h
+ min_error = float("inf")
+ for candidate_pooling_size in candidate_pooling_sizes:
+ w, h = candidate_pooling_size
+ error = abs(log_feature_wh_ratio - math.log(w/h))
+ if error < min_error:
+ best_pooling_size = (h, w)
+ min_error = error
+ return best_pooling_size
+
+ def adapt_unfold(self, input_embeds, spatial_size=(24, 24), best_grid=(1, 1), sampler_bins=1):
+ # input_embeds: bs, n, c
+ # spatial_size: feature map height, width
+ # sampler_bins越大,采样点越多,细节越多
+ input_embeds = input_embeds.permute(0, 3,1,2)
+
+ resample_regions, best_grid, wh_ratio = slice_image_feature_minicpm(input_embeds, self.num_queries)
+
+ output_size = self.cal_best_pooling_size(wh_ratio)
+ aligned_feature = RoIAlign(input_embeds.float(), resample_regions.float(), output_size,
+ spatial_scale=1.0).to(dtype=input_embeds.dtype)
+ unfold_input_embeds = aligned_feature.flatten(-2).permute(0, 2, 1)
+ # bs*N, c, h, w -> bs*N,c,h*w -> bs*N, h*w, c
+ return unfold_input_embeds
+
+ def unfold(self, input_embeds, spatial_size=(24, 24), kernel_size=2, stride=2):
+ # input_embeds: bs, n, c
+ # spatial_size: feature map height, width
+ input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
+ unfold_func = nn.Unfold(kernel_size=kernel_size, stride=stride)
+ unfold_input_embeds = unfold_func(input_embeds) # bs, c* k**2, l
+ unfold_input_embeds = unfold_input_embeds.unflatten(1, [-1, kernel_size ** 2]).permute(0, 3, 2, 1).flatten(0, 1)
+ # bs, c*k**2, l -> bs, c, k**2, l -> bs, l, k**2, c -> bs*l, k**2, c
+ return unfold_input_embeds
+
+ def forward(self, x, tgt_size=(24, 24), attn_mask=None):
+ dtype = x.dtype
+ bs = x.shape[0]
+ key_height, key_width = tgt_size
+ key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
+
+
+ x = self.ln_kv(self.kv_proj(x))
+
+ q = self.ln_q(self.query) #[:num_valid_query]
+
+
+ query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
+ key = x + key_pos_embed[None].to(dtype=dtype)
+ value = x
+
+ query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
+ key = self.adapt_unflod(key, spatial_size=(key_height, key_width))
+ value = self.adapt_unflod(value, spatial_size=(key_height, key_width))
+
+ out, attn_weights = self.attn(
+ query.permute(1, 0, 2),
+ key.permute(1, 0, 2),
+ value.permute(1, 0, 2),
+ attn_mask=attn_mask
+ )
+ # out->1, bs*l, c
+ x = out[0].unflatten(0, [bs, -1]) # bs, l, c
+ x = self.ln_post(x)
+ x = x @ self.proj
+ return x
+
+ def forward_with_muti_res(self, feature_1x, feature_2x, feature_4x, feature_8x, tgt_size=(24, 24), attn_mask=None, dtype=torch.bfloat16):
+ """Prepare KV in a 4*9 manner"""
+ muti_res_feat_keys = []
+ muti_res_feat_values = []
+ bs = 1
+
+ feature_list = [feature_1x, feature_2x, feature_4x, feature_8x]
+ embedding_list = [self.feature_1x_embedding, self.feature_2x_embedding, self.feature_4x_embedding, self.feature_8x_embedding]
+ projector_list = [self.features_1x_projector, self.features_2x_projector, self.features_4x_projector, self.features_8x_projector]
+
+ for feature, embedding, projector in zip(feature_list, embedding_list, projector_list):
+ if feature is None:
+ continue
+
+ feature = feature.to(torch.bfloat16)
+ feature = projector(feature.permute(0,2,3,1))
+
+ key_height = feature.shape[1]
+ key_width = feature.shape[2]
+ key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width)) #torch.Size([550, 4096])
+ feature = rearrange(feature,'b h w c -> b (h w) c') #torch.Size([1, 50, 44, 4096]) to torch.Size([1, 2200, 4096])
+ feature = self.ln_kv(feature) #torch.Size([1, 2304, 4096]) #torch.Size([1, 9216, 4096])
+ key = feature + key_pos_embed[None].to(dtype=dtype) + embedding.to(dtype=dtype)
+ value = feature
+ key = key.reshape(bs, key_height, key_width, self.embed_dim) #torch.Size([1, 48, 48, 4096]) #torch.Size([1, 96, 96, 4096])
+ key = self.adapt_unfold(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ value = value.reshape(bs, key_height, key_width, self.embed_dim)
+ value = self.adapt_unfold(value)# torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ muti_res_feat_keys.append(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ muti_res_feat_values.append(value)
+
+ muti_res_feat_keys = torch.cat(muti_res_feat_keys, dim=1) # (144, 36, 5120)
+ muti_res_feat_values = torch.cat(muti_res_feat_values, dim=1) # (144, 36, 5120)
+
+ # achor2 = time.time() - start #0.38
+ # print(f'kv: {achor2}')
+
+ """Prepare Q and do attn"""
+ attn_results = []
+ for query_now in [self.query_1, self.query_2, self.query_3, self.query_4]:
+ q = self.ln_q(query_now)
+ query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
+ query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
+
+ out, attn_weights = self.attn(
+ query.permute(1, 0, 2), #torch.Size([1, 144, 4096]) #Q * B * D
+ muti_res_feat_keys.permute(1, 0, 2), #torch.Size([18, 144, 4096]) #L * B * D
+ muti_res_feat_values.permute(1, 0, 2), #torch.Size([18, 144, 4096])
+ attn_mask=attn_mask
+ )
+ #out : torch.Size([1, 144, 4096])
+ # out->1, bs*l, c
+ get = out[0].unflatten(0, [bs, -1]) # bs, l, c #torch.Size([1, 144, 4096])
+ get = self.ln_proj(get)
+ attn_results.append(get)
+
+ x = torch.cat(attn_results, dim=2) #torch.Size([1, 144, 16384])
+ x = self.cat_proj(x) #torch.Size([1, 144, 4096])
+ x = self.ln_post(x) #torch.Size([1, 144, 4096])
+ x = x @ self.proj #torch.Size([1, 144, 4096])
+
+ # achor3 = time.time() - start #0.38
+ # print(f'query: {achor3 - achor2}')
+
+ return x
+
+ def prepare_single_key_value(self, feature_1x, feature_2x, feature_4x, feature_8x, tgt_size=(24, 24), attn_mask=None, dtype=torch.bfloat16):
+ """Prepare KV in a 4*9 manner"""
+ muti_res_feat_keys = []
+ muti_res_feat_values = []
+ bs = feature_1x.shape[0]
+
+ feature_list = [feature_1x, feature_2x, feature_4x, feature_8x]
+ embedding_list = [self.feature_1x_embedding, self.feature_2x_embedding, self.feature_4x_embedding, self.feature_8x_embedding]
+ projector_list = [self.features_1x_projector, self.features_2x_projector, self.features_4x_projector, self.features_8x_projector]
+
+ for feature, embedding, projector in zip(feature_list, embedding_list, projector_list):
+ if feature is None:
+ continue
+
+ feature = feature.to(torch.bfloat16)
+ feature = projector(feature.permute(0,2,3,1))
+
+ key_height = feature.shape[1]
+ key_width = feature.shape[2]
+ key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width)) #torch.Size([550, 4096])
+ feature = rearrange(feature,'b h w c -> b (h w) c') #torch.Size([1, 50, 44, 4096]) to torch.Size([1, 2200, 4096])
+ feature = self.ln_kv(feature) #torch.Size([1, 2304, 4096]) #torch.Size([1, 9216, 4096])
+ key = feature + key_pos_embed[None].to(dtype=dtype) + embedding.to(dtype=dtype)
+ value = feature
+ key = key.reshape(bs, key_height, key_width, self.embed_dim) #torch.Size([1, 48, 48, 4096]) #torch.Size([1, 96, 96, 4096])
+ key = self.adapt_unfold(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ value = value.reshape(bs, key_height, key_width, self.embed_dim)
+ value = self.adapt_unfold(value)# torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ muti_res_feat_keys.append(key) #torch.Size([144, 9, 4096]) #torch.Size([144, 9, 4096])
+ muti_res_feat_values.append(value)
+
+ muti_res_feat_keys = torch.cat(muti_res_feat_keys, dim=1) # (144, 36, 5120)
+ muti_res_feat_values = torch.cat(muti_res_feat_values, dim=1) # (144, 36, 5120)
+
+ return muti_res_feat_keys, muti_res_feat_values
+
+ def query_with_parallel_attn(self, bs, key_list, value_list, dtype=torch.bfloat16):
+ """Prepare Q and do attn"""
+ max_len = max([key.shape[1] for key in key_list]) #36
+ tgt_lengths = []
+
+ for i in range(len(key_list)):
+ for _ in range(key_list[i].shape[0] // self.num_queries):
+ tgt_lengths.append(key_list[i][0].shape[0])
+
+ padded_key_list = []
+ for key in key_list:
+ padding_size = max_len - key.shape[1]
+ padding = torch.zeros((key.shape[0], padding_size, key.shape[2]), dtype=key.dtype, device=key.device) #torch.Size([144, 36, 1024])
+ padded_key = torch.cat([key, padding], dim=1)
+ padded_key_list.append(padded_key)
+
+ padded_value_list = []
+ for value in value_list:
+ padding_size = max_len - value.shape[1]
+ padding = torch.zeros((value.shape[0], padding_size, value.shape[2]), dtype=value.dtype, device=value.device) #torch.Size([144, 36, 1024])
+ padded_value = torch.cat([value, padding], dim=1)
+ padded_value_list.append(padded_value)
+
+ padded_keys = torch.cat(padded_key_list, dim=0) # torch.Size([1440, 36, 4096])
+ padded_values = torch.cat(padded_value_list, dim=0) # torch.Size([1440, 36, 4096])
+
+ token_length = int(padded_keys.shape[0] / bs) #144
+ key_padding_mask = torch.ones((padded_keys.shape[0], max_len), dtype=torch.bool, device=key_list[0].device) #torch.Size([1440, 36])
+ for i in range(bs):
+ key_padding_mask[i*token_length : (i+1)*token_length, :tgt_lengths[i]] = False
+
+ attn_results = []
+ for query_now in [self.query_1, self.query_2, self.query_3, self.query_4]:
+ q = self.ln_q(query_now)
+ query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
+ query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1) #torch.Size([1440, 1, 4096])
+
+ out, attn_weights = self.attn( #[1, 1008, 4096]
+ query.permute(1, 0, 2), #torch.Size([1, 1008, 4096])
+ padded_keys.permute(1, 0, 2), #torch.Size([36, 1008, 4096])
+ padded_values.permute(1, 0, 2), #torch.Size([36, 1008, 4096])
+ key_padding_mask=key_padding_mask
+ )
+ # out->1, bs*l, c
+ get = out[0].unflatten(0, [bs, -1]) # bs, l, c #torch.Size([7, 144, 4096])
+ get = self.ln_proj(get)
+ attn_results.append(get)
+
+ x = torch.cat(attn_results, dim=2) #torch.Size([7, 144, 16384])
+ x = self.cat_proj(x) #torch.Size([7, 144, 4096])
+ x = self.ln_post(x) #torch.Size([7, 144, 4096])
+ x = x @ self.proj #torch.Size([7, 144, 4096])
+
+ projected_image_features = [x[i] for i in range(bs)]
+
+ return projected_image_features
+
+ def _repeat(self, query, N: int):
+ return query.unsqueeze(0).repeat(N, 1, 1)
+
+ def partition_list(self, input_list, lengths):
+ """
+ 按照指定的长度划分列表。
+
+ 参数:
+ input_list (list): 要划分的原始列表。
+ lengths (list): 一个包含划分长度的整数列表。
+
+ 返回:
+ list: 一个包含子列表的列表,每个子列表的长度由 lengths 指定。
+ """
+ result = []
+ current_index = 0
+ for length in lengths:
+ if current_index + length > len(input_list):
+ raise ValueError("划分长度超过了列表的总长度")
+ sublist = input_list[current_index:current_index + length]
+ result.append(sublist)
+ current_index += length
+ if current_index != len(input_list):
+ raise ValueError("划分长度和列表总长度不一致")
+ return result
+
+
+ def forward_with_featup(self, features, patch_sizes, images, num_images):
+ # achor2 = time.time() - start #0.38
+ # print(f'achor2: {achor2 - achor1}')
+
+ bs = len(images)
+
+ features_1x = [] #list torch.Size([1, 1024, 25, 22])
+
+ for i in range(len(features)):
+ h, w = patch_sizes[i]
+
+ if type(features) is list:
+ feature = features[i][:h * w, :]
+ else:
+ feature = features[i][:h * w, :].unsqueeze(0)
+ feature = feature.permute(0, 2, 1) #torch.Size([1, 1024, 25*22])
+ feature = feature.unflatten(2, [h, w]) #torch.Size([1, 1024, 25, 22])
+ features_1x.append(feature)
+
+ # 对features_1x 中的元素分组,如果连续且 shape 相同,则分为一组
+ keys = []
+ values = []
+ feat_group = []
+ image_group = []
+ for i in range(len(features_1x)):
+ if i == 0:
+ feat_group.append(features_1x[i])
+ image_group.append(images[i])
+ elif(features_1x[i].shape != features_1x[i-1].shape):
+ key, value = self.get_group_keys(feat_group, image_group)
+ keys.append(key)
+ values.append(value)
+
+ feat_group = []
+ image_group = []
+ feat_group.append(features_1x[i])
+ image_group.append(images[i])
+ else:
+ feat_group.append(features_1x[i])
+ image_group.append(images[i])
+
+ key, value = self.get_group_keys(feat_group, image_group)
+ keys.append(key)
+ values.append(value)
+
+ return self.compute_atten(bs, keys, values, num_images)
+
+ def get_group_keys(self, features_1x, image_group):
+ features_1x = torch.cat(features_1x, dim=0)
+ image_group = torch.stack(image_group, dim=0)
+
+ features_2x, features_4x, features_8x = self.upsampler.forward_with_internal_features(image_group, features_1x)
+
+ if self.feature_scale_mask & 1 == 0:
+ features_1x = None
+ if self.feature_scale_mask & 2 == 0:
+ features_2x = None
+ if self.feature_scale_mask & 4 == 0:
+ features_4x = None
+ if self.feature_scale_mask & 8 == 0:
+ features_8x = None
+
+ return self.prepare_single_key_value(features_1x, features_2x, features_4x, features_8x)
+
+ def compute_atten(self, bs, key_list, value_list, num_images):
+ projected_image_features = self.query_with_parallel_attn(bs, key_list, value_list)
+ projected_image_features = self.partition_list(projected_image_features, num_images)
+ return projected_image_features
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/llava_mlp.py b/VLMEvalKit-sudoku/llava/model/multimodal_projector/llava_mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..456fe7df5e6e2a5df1c80d64ff612709092f2092
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_projector/llava_mlp.py
@@ -0,0 +1,113 @@
+# Copyright (c) Alibaba Cloud.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from collections import OrderedDict
+import math
+import requests
+from io import BytesIO
+from functools import partial
+from PIL import Image
+from typing import Callable, Optional, Sequence, Tuple, List, Union
+import numpy as np
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.init import trunc_normal_
+from torchvision import transforms
+from torchvision.transforms import InterpolationMode
+from einops import rearrange
+
+
+
+
+class LLaVA_MLP(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ (grid_size**2) learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (grid_size**2, embed_dim)
+ """
+
+ def __init__(
+ self,
+ config,
+ embed_dim,
+ kv_dim=None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.config = config
+
+ self.proj = nn.Sequential(
+ nn.Linear(kv_dim, embed_dim),
+ nn.GELU(),
+ nn.Linear(embed_dim, embed_dim),
+ )
+
+ def forward(self, x, tgt_size=(24, 24)):
+ x = x[:, :tgt_size[0] * tgt_size[1], :]
+ return self.proj(x)
+
+class LLaVA_MLP_norm(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ (grid_size**2) learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (grid_size**2, embed_dim)
+ """
+
+ def __init__(
+ self,
+ config,
+ embed_dim,
+ kv_dim=None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.config = config
+
+ self.proj = nn.Sequential(
+ nn.Linear(kv_dim, embed_dim),
+ nn.GELU(),
+ nn.Linear(embed_dim, embed_dim),
+ )
+ self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
+
+ def forward(self, x, tgt_size=(24, 24)):
+ x = x[:, :tgt_size[0] * tgt_size[1], :]
+ x = self.proj(x)
+ x = self.norm(x)
+ return x
+
+class LLaVA_MLP_Fused(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ (grid_size**2) learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (grid_size**2, embed_dim)
+ """
+
+ def __init__(
+ self,
+ config,
+ embed_dim,
+ kv_dim=None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.config = config
+ self.proj = nn.Sequential(
+ nn.Linear(3 * kv_dim, embed_dim),
+ nn.GELU(),
+ nn.Linear(embed_dim, embed_dim),
+ )
+
+ def forward(self, x, tgt_size=(24, 24)):
+ image_features, fused_features = x
+ image_features = image_features[:, :tgt_size[0] * tgt_size[1], :]
+ image_features = torch.cat((image_features, fused_features), dim=0)
+ image_features = rearrange(image_features, 'm n d -> n (m d)')
+ return self.proj(image_features).unsqueeze(0)
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/pooler_projector.py b/VLMEvalKit-sudoku/llava/model/multimodal_projector/pooler_projector.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce5a2e05fa44ad2978272aea6dcf0aa9ca135e55
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_projector/pooler_projector.py
@@ -0,0 +1,33 @@
+import torch
+import torch.nn as nn
+
+import math
+
+from transformers.models.clip.modeling_clip import CLIPVisionModel
+
+
+class PoolerProjector(nn.Module):
+ def __init__(self, config, vision_cfg):
+ super().__init__()
+ self._config = config
+ self.hw = vision_cfg.image_size // vision_cfg.patch_size
+
+ self.conv_pool = nn.Conv2d(config.mm_hidden_size, config.hidden_size, kernel_size=2, stride=2)
+
+ self.proj = nn.Sequential(
+ nn.GELU(),
+ nn.Linear(config.hidden_size, config.hidden_size),
+ )
+
+ def forward(self, x, *args, **kwargs):
+ height = width = self.hw
+ assert height * width == x.shape[1]
+ x = x.view(x.shape[0], height, width, -1).permute(0, 3, 1, 2)
+ x = self.conv_pool(x)
+ x = x.flatten(2).transpose(1, 2)
+ x = self.proj(x)
+ return x
+
+ @property
+ def config(self):
+ return {"mm_projector_type": "pooler"}
diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_projector/uhd_v1_resampler.py b/VLMEvalKit-sudoku/llava/model/multimodal_projector/uhd_v1_resampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1ca311edc3187030b7bab8d954870cfa6ae57e9
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/multimodal_projector/uhd_v1_resampler.py
@@ -0,0 +1,218 @@
+# Copyright (c) Alibaba Cloud.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from collections import OrderedDict
+import math
+import requests
+from io import BytesIO
+from functools import partial
+from PIL import Image
+from typing import Callable, Optional, Sequence, Tuple, List, Union
+import numpy as np
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from torch.nn.init import trunc_normal_
+from torchvision import transforms
+from torchvision.transforms import InterpolationMode
+
+from llava.slice_process import slice_image_feature_minicpm
+import torchvision.ops.roi_align as RoIAlign
+
+def get_abs_pos(abs_pos, tgt_size):
+ # abs_pos: L, C
+ # tgt_size: (H, W)
+ # return: M, C
+ src_size = int(math.sqrt(abs_pos.size(0)))
+ dtype = abs_pos.dtype
+
+ return F.interpolate(
+ abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
+ size=(tgt_size[0], tgt_size[1]),
+ mode="bicubic",
+ align_corners=False,
+ ).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
+
+
+# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
+def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
+ """
+ grid_size: int of the grid height and width
+ return:
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
+ """
+ grid_h = np.arange(grid_size, dtype=np.float32)
+ grid_w = np.arange(grid_size, dtype=np.float32)
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
+ grid = np.stack(grid, axis=0)
+
+ grid = grid.reshape([2, 1, grid_size, grid_size])
+
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
+ if cls_token:
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
+ return pos_embed
+
+
+def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
+ assert embed_dim % 2 == 0
+
+ # use half of dimensions to encode grid_h
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
+
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
+ return emb
+
+
+def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (M,)
+ out: (M, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000 ** omega # (D/2,)
+
+ pos = pos.reshape(-1) # (M,)
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
+
+ emb_sin = np.sin(out) # (M, D/2)
+ emb_cos = np.cos(out) # (M, D/2)
+
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
+ return emb
+
+
+class AdaptSpatialResampler_v1(nn.Module):
+ """
+ A 2D perceiver-resampler network with one cross attention layers by
+ (grid_size**2) learnable queries and 2d sincos pos_emb
+ Outputs:
+ A tensor with the shape of (grid_size**2, embed_dim)
+ """
+
+ def __init__(
+ self,
+ grid_size,
+ embed_dim,
+ num_heads,
+ kv_dim=None,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6)
+ ):
+ super().__init__()
+ self.grid_size = grid_size
+ self.num_queries = grid_size ** 2
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+
+ self.pos_embed = nn.Parameter(
+ torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
+ ).requires_grad_(False)
+
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
+ trunc_normal_(self.query, std=.02)
+
+ if kv_dim is not None and kv_dim != embed_dim:
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
+ else:
+ self.kv_proj = nn.Identity()
+
+ self.attn = nn.MultiheadAttention(embed_dim, num_heads)
+ self.ln_q = norm_layer(embed_dim)
+ self.ln_kv = norm_layer(embed_dim)
+
+ self.ln_post = norm_layer(embed_dim)
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
+
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def cal_best_pooling_size(self, feature_wh_ratio=1.0):
+ candidate_pooling_sizes = [
+ (4, 2), (3, 2), (4, 3), (3, 3),
+ (2, 4), (2, 3), (3, 4)
+ ] # w, h
+ log_feature_wh_ratio = math.log(feature_wh_ratio)
+ best_pooling_size = (3, 3) # w, h
+ min_error = float("inf")
+ for candidate_pooling_size in candidate_pooling_sizes:
+ w, h = candidate_pooling_size
+ error = abs(log_feature_wh_ratio - math.log(w/h))
+ if error < min_error:
+ best_pooling_size = (h, w)
+ min_error = error
+ return best_pooling_size
+
+ def adapt_unflod(self, input_embeds, spatial_size=(24, 24), best_grid=(1, 1), sampler_bins=1):
+ # input_embeds: bs, n, c
+ # spatial_size: feature map height, width
+ # sampler_bins越大,采样点越多,细节越多
+ input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
+ resample_regions, best_grid, wh_ratio = slice_image_feature_minicpm(input_embeds, self.num_queries)
+
+ output_size = self.cal_best_pooling_size(wh_ratio)
+ aligned_feature = RoIAlign(input_embeds.float(), resample_regions.float(), output_size,
+ spatial_scale=1.0).to(dtype=input_embeds.dtype)
+ unfold_input_embeds = aligned_feature.flatten(-2).permute(0, 2, 1)
+ # bs*N, c, h, w -> bs*N,c,h*w -> bs*N, h*w, c
+ return unfold_input_embeds
+
+ def unfold(self, input_embeds, spatial_size=(24, 24), kernel_size=2, stride=2):
+ # input_embeds: bs, n, c
+ # spatial_size: feature map height, width
+ input_embeds = input_embeds.permute(0, 2, 1).unflatten(-1, spatial_size)
+ unfold_func = nn.Unfold(kernel_size=kernel_size, stride=stride)
+ unfold_input_embeds = unfold_func(input_embeds) # bs, c* k**2, l
+ unfold_input_embeds = unfold_input_embeds.unflatten(1, [-1, kernel_size ** 2]).permute(0, 3, 2, 1).flatten(0, 1)
+ # bs, c*k**2, l -> bs, c, k**2, l -> bs, l, k**2, c -> bs*l, k**2, c
+ return unfold_input_embeds
+
+ def forward(self, x, tgt_size=(24, 24), attn_mask=None):
+ x = x.to(torch.bfloat16)
+ dtype = x.dtype
+ bs = x.shape[0]
+ key_height, key_width = tgt_size
+ key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
+
+
+ x = self.ln_kv(self.kv_proj(x))
+
+ q = self.ln_q(self.query) #[:num_valid_query]
+
+
+ query = self._repeat(q, bs) + self.pos_embed[None].to(dtype=dtype)
+ key = x + key_pos_embed[None].to(dtype=dtype)
+ value = x
+
+ query = self.unfold(query, spatial_size=(self.grid_size, self.grid_size), kernel_size=1, stride=1)
+ key = self.adapt_unflod(key, spatial_size=(key_height, key_width))
+ value = self.adapt_unflod(value, spatial_size=(key_height, key_width))
+
+ out, attn_weights = self.attn(
+ query.permute(1, 0, 2),
+ key.permute(1, 0, 2),
+ value.permute(1, 0, 2),
+ attn_mask=attn_mask
+ )
+ # out->1, bs*l, c
+ x = out[0].unflatten(0, [bs, -1]) # bs, l, c
+ x = self.ln_post(x)
+ x = x @ self.proj
+ return x
+
+ def _repeat(self, query, N: int):
+ return query.unsqueeze(0).repeat(N, 1, 1)
diff --git a/VLMEvalKit-sudoku/llava/model/utils.py b/VLMEvalKit-sudoku/llava/model/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..10652a5f9aaa2e0cddaef0b1a7bc39013a0d957b
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/model/utils.py
@@ -0,0 +1,20 @@
+from transformers import AutoConfig
+
+
+def auto_upgrade(config):
+ cfg = AutoConfig.from_pretrained(config)
+ if "llava" in config and "llava" not in cfg.model_type:
+ assert cfg.model_type == "llama"
+ print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.")
+ print("You must upgrade the checkpoint to the new code base (this can be done automatically).")
+ confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
+ if confirm.lower() in ["y", "yes"]:
+ print("Upgrading checkpoint...")
+ assert len(cfg.architectures) == 1
+ setattr(cfg.__class__, "model_type", "llava")
+ cfg.architectures[0] = "LlavaLlamaForCausalLM"
+ cfg.save_pretrained(config)
+ print("Checkpoint upgraded.")
+ else:
+ print("Checkpoint upgrade aborted.")
+ exit(1)
diff --git a/VLMEvalKit-sudoku/llava/serve/examples/waterview.jpg b/VLMEvalKit-sudoku/llava/serve/examples/waterview.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5ea03ee6fa60f4025999012b817e674984c706cd
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/serve/examples/waterview.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d092764cc9f21b9bc535ff5284b5add4d8256148bab1bc2f5b5ab3fd32759a36
+size 95499
diff --git a/VLMEvalKit-sudoku/llava/serve/register_worker.py b/VLMEvalKit-sudoku/llava/serve/register_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2c40295e0351f25709ba25554c9329f15bf0d2
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/serve/register_worker.py
@@ -0,0 +1,26 @@
+"""
+Manually register workers.
+
+Usage:
+python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
+"""
+
+import argparse
+
+import requests
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--controller-address", type=str)
+ parser.add_argument("--worker-name", type=str)
+ parser.add_argument("--check-heart-beat", action="store_true")
+ args = parser.parse_args()
+
+ url = args.controller_address + "/register_worker"
+ data = {
+ "worker_name": args.worker_name,
+ "check_heart_beat": args.check_heart_beat,
+ "worker_status": None,
+ }
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
diff --git a/VLMEvalKit-sudoku/llava/serve/sglang_worker.py b/VLMEvalKit-sudoku/llava/serve/sglang_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..09c047f48ea6d8774b30d1c5df159d877edfd742
--- /dev/null
+++ b/VLMEvalKit-sudoku/llava/serve/sglang_worker.py
@@ -0,0 +1,237 @@
+"""
+A model worker executes the model.
+"""
+
+import argparse
+import asyncio
+from concurrent.futures import ThreadPoolExecutor
+import json
+import time
+import threading
+import uuid
+
+from fastapi import FastAPI, Request, BackgroundTasks
+from fastapi.responses import StreamingResponse
+import requests
+import re
+import uvicorn
+from functools import partial
+
+from llava.constants import WORKER_HEART_BEAT_INTERVAL
+from llava.utils import build_logger, server_error_msg, pretty_print_semaphore
+from llava.model.builder import load_pretrained_model
+from llava.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, expand2square
+from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from transformers import AutoTokenizer
+
+import sglang as sgl
+from sglang.test.test_utils import add_common_sglang_args_and_parse, select_sglang_backend
+from sglang.backend.runtime_endpoint import RuntimeEndpoint
+from sglang.utils import read_jsonl, dump_state_text
+from sglang.lang.interpreter import ProgramState
+
+
+GB = 1 << 30
+
+worker_id = str(uuid.uuid4())[:6]
+logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
+global_counter = 0
+
+model_semaphore = None
+
+
+def heart_beat_worker(controller):
+ while True:
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
+ controller.send_heart_beat()
+
+
+@sgl.function
+def pipeline(s, prompt, max_tokens):
+ for p in prompt:
+ if type(p) is str:
+ s += p
+ else:
+ s += sgl.image(p)
+ s += sgl.gen("response", max_tokens=max_tokens)
+
+
+class ModelWorker:
+ def __init__(self, controller_addr, worker_addr, sgl_endpoint, worker_id, no_register, model_name):
+ self.controller_addr = controller_addr
+ self.worker_addr = worker_addr
+ self.worker_id = worker_id
+
+ # Select backend
+ backend = RuntimeEndpoint(sgl_endpoint)
+ sgl.set_default_backend(backend)
+ model_path = backend.model_info["model_path"]
+
+ if model_path.endswith("/"):
+ model_path = model_path[:-1]
+ if model_name is None:
+ model_paths = model_path.split("/")
+ if model_paths[-1].startswith("checkpoint-"):
+ self.model_name = model_paths[-2] + "_" + model_paths[-1]
+ else:
+ self.model_name = model_paths[-1]
+ else:
+ self.model_name = model_name
+
+ logger.info(f"Loading the SGLANG model {self.model_name} on worker {worker_id} ...")
+
+ if not no_register:
+ self.register_to_controller()
+ self.heart_beat_thread = threading.Thread(target=heart_beat_worker, args=(self,))
+ self.heart_beat_thread.start()
+
+ def register_to_controller(self):
+ logger.info("Register to controller")
+
+ url = self.controller_addr + "/register_worker"
+ data = {"worker_name": self.worker_addr, "check_heart_beat": True, "worker_status": self.get_status()}
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
+
+ def send_heart_beat(self):
+ logger.info(f"Send heart beat. Models: {[self.model_name]}. " f"Semaphore: {pretty_print_semaphore(model_semaphore)}. " f"global_counter: {global_counter}")
+
+ url = self.controller_addr + "/receive_heart_beat"
+
+ while True:
+ try:
+ ret = requests.post(url, json={"worker_name": self.worker_addr, "queue_length": self.get_queue_length()}, timeout=5)
+ exist = ret.json()["exist"]
+ break
+ except requests.exceptions.RequestException as e:
+ logger.error(f"heart beat error: {e}")
+ time.sleep(5)
+
+ if not exist:
+ self.register_to_controller()
+
+ def get_queue_length(self):
+ if model_semaphore is None:
+ return 0
+ else:
+ return args.limit_model_concurrency - model_semaphore._value + (len(model_semaphore._waiters) if model_semaphore._waiters is not None else 0)
+
+ def get_status(self):
+ return {
+ "model_names": [self.model_name],
+ "speed": 1,
+ "queue_length": self.get_queue_length(),
+ }
+
+ async def generate_stream(self, params):
+ ori_prompt = prompt = params["prompt"]
+ images = params.get("images", None)
+ if images is not None and len(images) > 0:
+ if len(images) > 0:
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
+ raise ValueError("Number of images does not match number of tokens in prompt")
+
+ images = [load_image_from_base64(image) for image in images]
+ # FIXME: hacky padding
+ images = [expand2square(image, tuple(int(x * 255) for x in [0.48145466, 0.4578275, 0.40821073])) for image in images]
+
+ # FIXME: for image-start/end token
+ # replace_token = DEFAULT_IMAGE_TOKEN
+ # if getattr(self.model.config, 'mm_use_im_start_end', False):
+ # replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ # prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+ prompt = prompt.replace(" " + DEFAULT_IMAGE_TOKEN + "\n", DEFAULT_IMAGE_TOKEN)
+ prompt_split = prompt.split(DEFAULT_IMAGE_TOKEN)
+ prompt = []
+ for i in range(len(prompt_split)):
+ prompt.append(prompt_split[i])
+ if i < len(images):
+ prompt.append(images[i])
+ else:
+ prompt = [prompt]
+
+ temperature = float(params.get("temperature", 1.0))
+ top_p = float(params.get("top_p", 1.0))
+ # max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
+ stop_str = params.get("stop", None)
+ stop_str = [stop_str] if stop_str is not None else None
+
+ if max_new_tokens < 1:
+ yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
+ return
+
+ # print(prompt)
+ state = pipeline.run(prompt, max_new_tokens, temperature=temperature, top_p=top_p, stream=True)
+
+ generated_text = ori_prompt
+ async for text_outputs in state.text_async_iter(var_name="response"):
+ generated_text += text_outputs
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode() + b"\0"
+
+ async def generate_stream_gate(self, params):
+ try:
+ async for x in self.generate_stream(params):
+ yield x
+ except ValueError as e:
+ print("Caught ValueError:", e)
+ ret = {
+ "text": server_error_msg,
+ "error_code": 1,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+ except Exception as e:
+ print("Caught Unknown Error", e)
+ ret = {
+ "text": server_error_msg,
+ "error_code": 1,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+
+
+app = FastAPI()
+
+
+def release_model_semaphore(fn=None):
+ model_semaphore.release()
+ if fn is not None:
+ fn()
+
+
+@app.post("/worker_generate_stream")
+async def generate_stream(request: Request):
+ global model_semaphore, global_counter
+ global_counter += 1
+ params = await request.json()
+
+ if model_semaphore is None:
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
+ await model_semaphore.acquire()
+ worker.send_heart_beat()
+ generator = worker.generate_stream_gate(params)
+ background_tasks = BackgroundTasks()
+ background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
+ return StreamingResponse(generator, background=background_tasks)
+
+
+@app.post("/worker_get_status")
+async def get_status(request: Request):
+ return worker.get_status()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="localhost")
+ parser.add_argument("--port", type=int, default=21002)
+ parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
+ parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
+ parser.add_argument("--model-name", type=str)
+ parser.add_argument("--sgl-endpoint", type=str)
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
+ parser.add_argument("--stream-interval", type=int, default=1)
+ parser.add_argument("--no-register", action="store_true")
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ worker = ModelWorker(args.controller_address, args.worker_address, args.sgl_endpoint, worker_id, args.no_register, args.model_name)
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/VLMEvalKit-sudoku/scripts/AI2D_preproc.ipynb b/VLMEvalKit-sudoku/scripts/AI2D_preproc.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..f93b8a880e5fc636a40abc76ab6a59b9c3c7eeda
--- /dev/null
+++ b/VLMEvalKit-sudoku/scripts/AI2D_preproc.ipynb
@@ -0,0 +1,261 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os, cv2\n",
+ "import string\n",
+ "import os.path as osp\n",
+ "import numpy as np\n",
+ "from collections import defaultdict\n",
+ "from vlmeval.smp import ls, load, dump, download_file, encode_image_file_to_base64, md5, mrlines\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import multiprocessing as mp\n",
+ "from PIL import Image, ImageFont, ImageDraw\n",
+ "\n",
+ "font_URL = 'http://opencompass.openxlab.space/utils/Fonts/timesb.ttf'\n",
+ "font_file = 'timesb.ttf'\n",
+ "if not osp.exists(font_file):\n",
+ " download_file(font_URL)\n",
+ " \n",
+ "test_split_URL = 'https://s3-us-east-2.amazonaws.com/prior-datasets/ai2d_test_ids.csv'\n",
+ "test_split_file = 'ai2d_test_ids.csv'\n",
+ "if not osp.exists(test_split_file):\n",
+ " download_file(test_split_URL)\n",
+ " \n",
+ "test_ids = set(mrlines(test_split_file))\n",
+ " \n",
+ "def proper_font_size(font_file, wh, text, ratio=1):\n",
+ " font_size = 2\n",
+ " while True:\n",
+ " font = ImageFont.truetype(font_file, font_size)\n",
+ " real_box = font.getbbox(text)\n",
+ " real_wh = (real_box[2] - real_box[0], real_box[3] - real_box[1])\n",
+ " if real_wh[0] > wh[0] * ratio or real_wh[1] > wh[1] * ratio:\n",
+ " break\n",
+ " font_size += 1\n",
+ " return font_size\n",
+ "\n",
+ "def cover_image(ann_path):\n",
+ " data = load(ann_path)\n",
+ " texts = list(data['text'].values())\n",
+ " raw_img = ann_path.replace('annotations', 'images').replace('.json', '')\n",
+ " tgt_img = raw_img.replace('images', 'images_abc')\n",
+ " img = Image.open(raw_img)\n",
+ " draw = ImageDraw.Draw(img)\n",
+ " for text in texts:\n",
+ " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n",
+ " T = text['replacementText']\n",
+ " draw.rectangle((st, ed), fill='white')\n",
+ " font_size = proper_font_size(font_file, (ed[0] - st[0], ed[1] - st[1]), T, ratio=1)\n",
+ " font = ImageFont.truetype(font_file, font_size)\n",
+ " text_box = font.getbbox(T)\n",
+ " text_wh = (text_box[2] - text_box[0], text_box[3] - text_box[1])\n",
+ " cx, cy = (st[0] + ed[0]) // 2, st[1]\n",
+ " stx = cx - text_wh[0] // 2\n",
+ " sty = cy - text_wh[1] // 2\n",
+ " draw.text((stx, sty), T, font=font, fill='black')\n",
+ " img.save(tgt_img) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Process for no mask images\n",
+ "test_ids = set(mrlines(test_split_file))\n",
+ "\n",
+ "def detect_image_color(image):\n",
+ " gray_image = image.convert('L')\n",
+ " mean_brightness = np.mean(np.array(gray_image))\n",
+ " if mean_brightness < 127:\n",
+ " return 'white'\n",
+ " else:\n",
+ " return 'black'\n",
+ "\n",
+ "def cover_image(ann_path):\n",
+ " data = load(ann_path)\n",
+ " texts = list(data['text'].values())\n",
+ " raw_img = ann_path.replace('annotations', 'images').replace('.json', '')\n",
+ " tgt_img = raw_img.replace('images', 'images_abc')\n",
+ " img = Image.open(raw_img)\n",
+ " draw = ImageDraw.Draw(img)\n",
+ " color = detect_image_color(img)\n",
+ " font_size = 0\n",
+ " for text in texts:\n",
+ " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n",
+ " font_size += (ed[1] - st[1])\n",
+ " if len(texts) != 0:\n",
+ " font_size /= len(texts)\n",
+ " else:\n",
+ " font_size = 2\n",
+ " for text in texts:\n",
+ " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n",
+ " T = text['replacementText']\n",
+ " for i in range(2):\n",
+ " draw.rectangle(\n",
+ " [(st[0] - i, st[1] - i), (ed[0] + i, ed[1] + i)],\n",
+ " outline=color\n",
+ " )\n",
+ " font = ImageFont.truetype(font_file, font_size)\n",
+ " text_box = font.getbbox(T)\n",
+ " text_wh = (text_box[2] - text_box[0], text_box[3] - text_box[1])\n",
+ " cx, cy = (st[0] + ed[0]) // 2, st[1]\n",
+ " stx = cx - text_wh[0] // 2\n",
+ " sty = cy - text_wh[1] * 1.5\n",
+ " if sty < 0:\n",
+ " sty = cy + text_wh[1] * 1.3\n",
+ " draw.text((stx, sty), T, font=font, fill=color)\n",
+ " img.save(tgt_img) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "download_file('https://ai2-public-datasets.s3.amazonaws.com/diagrams/ai2d-all.zip')\n",
+ "os.system('unzip -o ai2d-all.zip')\n",
+ "\n",
+ "images = ls('ai2d/images/')\n",
+ "questions = ls('ai2d/questions/')\n",
+ "annotations = ls('ai2d/annotations/')\n",
+ "cates = load('ai2d/categories.json')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pool = mp.Pool(32)\n",
+ "pool.map(cover_image, annotations)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def puncproc(inText):\n",
+ " import re\n",
+ " outText = inText\n",
+ " punct = [\n",
+ " ';', r'/', '[', ']', '\"', '{', '}', '(', ')', '=', '+', '\\\\', '_', '-',\n",
+ " '>', '<', '@', '`', ',', '?', '!'\n",
+ " ]\n",
+ " commaStrip = re.compile('(\\d)(,)(\\d)') # noqa: W605\n",
+ " periodStrip = re.compile('(?!<=\\d)(\\.)(?!\\d)') # noqa: W605\n",
+ " for p in punct:\n",
+ " if (p + ' ' in inText or ' ' + p in inText) or (re.search(commaStrip, inText) is not None):\n",
+ " outText = outText.replace(p, '')\n",
+ " else:\n",
+ " outText = outText.replace(p, ' ')\n",
+ " outText = periodStrip.sub('', outText, re.UNICODE)\n",
+ " return outText\n",
+ "\n",
+ "def check_choices(line):\n",
+ " def ischar(s):\n",
+ " s = str(s)\n",
+ " if s in ['{}', 'Both', 'None of above']:\n",
+ " return True\n",
+ " elif s.startswith('Stage ') and ischar(s[6:]):\n",
+ " return True\n",
+ " elif ' and ' in s and np.all([ischar(x) for x in s.split(' and ')]):\n",
+ " return True\n",
+ " elif len(s) <= 2:\n",
+ " return True\n",
+ " elif len(puncproc(s).split()) > 1:\n",
+ " return np.all([ischar(x) for x in puncproc(s).split()])\n",
+ " return False\n",
+ " n_char = sum([ischar(line[x]) for x in 'ABCD'])\n",
+ " return n_char >= 3\n",
+ "\n",
+ "def check_question(question):\n",
+ " words = puncproc(question).split()\n",
+ " for ch in string.ascii_lowercase + string.ascii_uppercase:\n",
+ " if ch in words:\n",
+ " return True\n",
+ " return False\n",
+ "\n",
+ "def is_abc(abc, choices, question):\n",
+ " if abc == 0:\n",
+ " return False\n",
+ " if check_choices(choices):\n",
+ " return True\n",
+ " if check_question(question):\n",
+ " return True\n",
+ " return False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data_all = defaultdict(list)\n",
+ "for qfile in questions:\n",
+ " data = load(qfile)\n",
+ " idx = data['imageName'].split('.')[0]\n",
+ " if idx not in test_ids:\n",
+ " continue\n",
+ " image_pth = qfile.replace('questions', 'images').replace('.json', '')\n",
+ " cate = cates[image_pth.split('/')[-1]]\n",
+ " for q, qmeta in data['questions'].items():\n",
+ " assert '.png-' in qmeta['questionId']\n",
+ " main, sub = qmeta['questionId'].split('.png-')\n",
+ " idx = int(main) * 100 + int(sub)\n",
+ " \n",
+ " answers = qmeta['answerTexts']\n",
+ " correct = qmeta['correctAnswer']\n",
+ " \n",
+ " data_all['index'].append(idx)\n",
+ " data_all['question'].append(q)\n",
+ " assert len(answers) == 4\n",
+ " for c, a in zip('ABCD', answers):\n",
+ " data_all[c].append(a)\n",
+ " data_all['answer'].append('ABCD'[qmeta['correctAnswer']])\n",
+ " data_all['category'].append(cate)\n",
+ " data_all['abcLabel'].append(qmeta['abcLabel'])\n",
+ " abc = is_abc(qmeta['abcLabel'], {x: data_all[x][-1] for x in 'ABCD'}, q)\n",
+ " # if qmeta['abcLabel'] and not abc:\n",
+ " # print(qmeta['abcLabel'], {x: data_all[x][-1] for x in 'ABCD'}, q)\n",
+ " data_all['image_path'].append(image_pth.replace('images', 'images_abc') if abc else image_pth)\n",
+ "data = pd.DataFrame(data_all)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "images = []\n",
+ "image_seen = {}\n",
+ "for idx, pth in zip(data['index'], data['image_path']):\n",
+ " images.append(encode_image_file_to_base64(pth))\n",
+ "\n",
+ "data['image'] = images\n",
+ "dump(data, 'AI2D_TEST.tsv')\n",
+ "print(md5('AI2D_TEST.tsv'))"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/VLMEvalKit-sudoku/scripts/apires_scan.py b/VLMEvalKit-sudoku/scripts/apires_scan.py
new file mode 100644
index 0000000000000000000000000000000000000000..890aea3da773a8a7071cf5784bceb9177e996390
--- /dev/null
+++ b/VLMEvalKit-sudoku/scripts/apires_scan.py
@@ -0,0 +1,57 @@
+import sys
+from vlmeval import *
+from vlmeval.dataset import SUPPORTED_DATASETS
+FAIL_MSG = 'Failed to obtain answer via API.'
+
+root = sys.argv[1]
+if root[-1] in '/\\':
+ root = root[:-1]
+
+model_name = root.split('/')[-1]
+
+for d in SUPPORTED_DATASETS:
+ from vlmeval.smp import get_pred_file_format
+ pred_format = get_pred_file_format()
+ fname = f'{model_name}_{d}.{pred_format}'
+ pth = osp.join(root, fname)
+ if osp.exists(pth):
+ data = load(pth)
+ # Detect Failure
+ assert 'prediction' in data
+ data['prediction'] = [str(x) for x in data['prediction']]
+ fail = [FAIL_MSG in x for x in data['prediction']]
+ if sum(fail):
+ nfail = sum(fail)
+ ntot = len(fail)
+ print(f'Model {model_name} x Dataset {d}: {nfail} out of {ntot} failed. {nfail / ntot * 100: .2f}%. ')
+
+ eval_files = ls(root, match=f'{model_name}_{d}_')
+ eval_files = [x for x in eval_files if listinstr([f'{d}_openai', f'{d}_gpt'], x) and x.endswith('.xlsx')]
+
+ if len(eval_files) == 0:
+ print(f'Model {model_name} x Dataset {d} openai missing')
+ continue
+
+ assert len(eval_files) == 1
+ eval_file = eval_files[0]
+ data = load(eval_file)
+
+ if 'MMVet' in d:
+ bad = [x for x in data['log'] if 'All 5 retries failed.' in str(x)]
+ if len(bad):
+ print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.')
+ elif 'MathVista' in d:
+ bad = [x for x in data['res'] if FAIL_MSG in str(x)]
+ if len(bad):
+ print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.')
+
+ elif d == 'LLaVABench':
+ sub = data[data['gpt4_score'] == -1]
+ sub = sub[sub['gpt4_score'] == -1]
+ if len(sub):
+ print(f'Model {model_name} x Dataset {d} Evaluation: {len(sub)} out of {len(data)} failed.')
+ else:
+ bad = [x for x in data['log'] if FAIL_MSG in str(x)]
+ if len(bad):
+ print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.')
+
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/scripts/auto_run.py b/VLMEvalKit-sudoku/scripts/auto_run.py
new file mode 100644
index 0000000000000000000000000000000000000000..381c3432f36bc9a1f510661d2c230530410e14fa
--- /dev/null
+++ b/VLMEvalKit-sudoku/scripts/auto_run.py
@@ -0,0 +1,40 @@
+import argparse
+from vlmeval.smp import *
+from vlmeval.config import supported_VLM
+
+def is_api(x):
+ return getattr(supported_VLM[x].func, 'is_api', False)
+
+models = list(supported_VLM)
+models = [x for x in models if 'fs' not in x]
+models = [x for x in models if not is_api(x)]
+exclude_list = ['cogvlm-grounding-generalist', 'emu2']
+models = [x for x in models if x not in exclude_list]
+
+def is_large(x):
+ return '80b' in x or 'emu2' in x or '34B' in x
+
+small_models = [x for x in models if not is_large(x)]
+large_models = [x for x in models if is_large(x)]
+models = small_models + large_models
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--data', type=str, nargs='+', required=True)
+args = parser.parse_args()
+
+# Skip some models
+models = [x for x in models if not listinstr(['MiniGPT', 'grounding-generalist'], x)]
+
+for m in models:
+ from vlmeval.smp import get_pred_file_format
+ pred_format = get_pred_file_format()
+ unknown_datasets = [x for x in args.data if not osp.exists(f'{m}/{m}_{x}.{pred_format}')]
+ if len(unknown_datasets) == 0:
+ continue
+ dataset_str = ' '.join(unknown_datasets)
+ if '80b' in m:
+ cmd = f'python run.py --data {dataset_str} --model {m}'
+ else:
+ cmd = f'bash run.sh --data {dataset_str} --model {m}'
+ print(cmd)
+ os.system(cmd)
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/scripts/cover.sh b/VLMEvalKit-sudoku/scripts/cover.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0a35c5086d3e22bfc447cb5e3eff0054ee6a254c
--- /dev/null
+++ b/VLMEvalKit-sudoku/scripts/cover.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+cp $DIR/../config.py $DIR/../vlmeval/
+cp $DIR/../misc/* $DIR/../vlmeval/vlm/misc/
\ No newline at end of file
diff --git a/VLMEvalKit-sudoku/scripts/data_browser.py b/VLMEvalKit-sudoku/scripts/data_browser.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d6ac06a03414be7f5bd8965f810253a5b08296d
--- /dev/null
+++ b/VLMEvalKit-sudoku/scripts/data_browser.py
@@ -0,0 +1,174 @@
+"""
+pip install gradio # proxy_on first
+python vis_geochat_data.py
+# browse data in http://127.0.0.1:10064
+"""
+
+import os
+import io
+import json
+import copy
+import time
+import gradio as gr
+import base64
+from PIL import Image
+from io import BytesIO
+from argparse import Namespace
+# from llava import conversation as conversation_lib
+from typing import Sequence
+from vlmeval import *
+from vlmeval.dataset import SUPPORTED_DATASETS, build_dataset
+
+SYS = "You are a helpful assistant. Your job is to faithfully translate all provided text into Chinese faithfully. "
+
+# Translator = SiliconFlowAPI(model='Qwen/Qwen2.5-7B-Instruct', system_prompt=SYS)
+Translator = OpenAIWrapper(model='gpt-4o-mini', system_prompt=SYS)
+
+
+def image_to_mdstring(image):
+ return f""
+
+
+def images_to_md(images):
+ return '\n\n'.join([image_to_mdstring(image) for image in images])
+
+
+def mmqa_display(question, target_size=2048):
+ question = {k.lower() if len(k) > 1 else k: v for k, v in question.items()}
+ keys = list(question.keys())
+ keys = [k for k in keys if k not in ['index', 'image']]
+
+ idx = question.pop('index', 'XXX')
+ text = f'\n- INDEX: {idx}\n'
+
+ if 'image' in question:
+ images = question.pop('image')
+ if images[0] == '[' and images[-1] == ']':
+ images = eval(images)
+ else:
+ images = [images]
+ else:
+ images = question.pop('image_path')
+ if images[0] == '[' and images[-1] == ']':
+ images = eval(images)
+ else:
+ images = [images]
+ images = [encode_image_file_to_base64(x) for x in images]
+
+ qtext = question.pop('question', None)
+ if qtext is not None:
+ text += f'- QUESTION: {qtext}\n'
+
+ if 'A' in question:
+ text += f'- Choices: \n'
+ for k in string.ascii_uppercase:
+ if k in question:
+ text += f'\t-{k}: {question.pop(k)}\n'
+ answer = question.pop('answer', None)
+
+ for k in question:
+ if not pd.isna(question[k]):
+ text += f'- {k.upper()}. {question[k]}\n'
+
+ if answer is not None:
+ text += f'- ANSWER: {answer}\n'
+
+ image_md = images_to_md(images)
+
+ return text, image_md
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ # Essential Args, Setting the Names of Datasets and Models
+ parser.add_argument('--port', type=int, default=7860)
+ args = parser.parse_args()
+ return args
+
+
+def gradio_app_vis_dataset(port=7860):
+ data, loaded_obj = None, {}
+
+ def btn_submit_click(filename, ann_id):
+ if filename not in loaded_obj:
+ return filename_change(filename, ann_id)
+ nonlocal data
+ data_desc = gr.Markdown(f'Visualizing {filename}, {len(data)} samples in total. ')
+ if ann_id < 0 or ann_id >= len(data):
+ return filename, ann_id, data_desc, gr.Markdown('Invalid Index'), gr.Markdown(f'Index out of range [0, {len(data) - 1}]')
+ item = data.iloc[ann_id]
+ text, image_md = mmqa_display(item)
+ return filename, ann_id, data_desc, image_md, text
+
+ def btn_next_click(filename, ann_id):
+ return btn_submit_click(filename, ann_id + 1)
+
+ # def translate_click(anno_en):
+ # return gr.Markdown(Translator.generate(anno_en))
+
+ def filename_change(filename, ann_id):
+ nonlocal data, loaded_obj
+
+ def legal_filename(filename):
+ LMURoot = LMUDataRoot()
+ if filename in SUPPORTED_DATASETS:
+ return build_dataset(filename).data
+ elif osp.exists(filename):
+ data = load(filename)
+ assert 'index' in data and 'image' in data
+ image_map = {i: image for i, image in zip(data['index'], data['image'])}
+ for k, v in image_map.items():
+ if (not isinstance(v, str) or len(v) < 64) and v in image_map:
+ image_map[k] = image_map[v]
+ data['image'] = [image_map[k] for k in data['index']]
+ return data
+ elif osp.exists(osp.join(LMURoot, filename)):
+ filename = osp.join(LMURoot, filename)
+ return legal_filename(filename)
+ else:
+ return None
+
+ data = legal_filename(filename)
+ if data is None:
+ return filename, 0, gr.Markdown(''), gr.Markdown("File not found"), gr.Markdown("File not found")
+
+ loaded_obj[filename] = data
+ return btn_submit_click(filename, 0)
+
+ with gr.Blocks() as app:
+
+ filename = gr.Textbox(
+ value='Dataset Name (supported by VLMEvalKit) or TSV FileName (Relative under `LMURoot` or Real Path)',
+ label='Dataset',
+ interactive=True,
+ visible=True)
+
+ with gr.Row():
+ ann_id = gr.Number(0, label='Sample Index (Press Enter)', interactive=True, visible=True)
+ btn_next = gr.Button("Next")
+ # btn_translate = gr.Button('CN Translate')
+
+ with gr.Row():
+ data_desc = gr.Markdown('Dataset Description', label='Dataset Description')
+
+ with gr.Row():
+ image_output = gr.Markdown('Image PlaceHolder', label='Image Visualization')
+ anno_en = gr.Markdown('Image Annotation', label='Image Annotation')
+ # anno_cn = gr.Markdown('Image Annotation (Chinese)', label='Image Annotation (Chinese)')
+
+ input_components = [filename, ann_id]
+ all_components = [filename, ann_id, data_desc, image_output, anno_en]
+
+ filename.submit(filename_change, input_components, all_components)
+ ann_id.submit(btn_submit_click, input_components, all_components)
+ btn_next.click(btn_next_click, input_components, all_components)
+ # btn_translate.click(translate_click, anno_en, anno_cn)
+
+ # app.launch()
+ app.launch(server_name='0.0.0.0', debug=True, show_error=True, server_port=port)
+
+
+if __name__ == "__main__":
+ args = parse_args()
+ gradio_app_vis_dataset(port=args.port)
+
diff --git a/VLMEvalKit-sudoku/vlmeval/api/__init__.py b/VLMEvalKit-sudoku/vlmeval/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c42e319e46afe733cec5949565aa9645da7714de
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/__init__.py
@@ -0,0 +1,30 @@
+from .gpt import OpenAIWrapper, GPT4V
+from .hf_chat_model import HFChatModel
+from .gemini import GeminiWrapper, Gemini
+from .qwen_vl_api import QwenVLWrapper, QwenVLAPI, Qwen2VLAPI
+from .qwen_api import QwenAPI
+from .claude import Claude_Wrapper, Claude3V
+from .reka import Reka
+from .glm_vision import GLMVisionAPI
+from .cloudwalk import CWWrapper
+from .sensechat_vision import SenseChatVisionAPI
+from .siliconflow import SiliconFlowAPI, TeleMMAPI
+from .hunyuan import HunyuanVision
+from .bailingmm import bailingMMAPI
+from .bluelm_api import BlueLMWrapper, BlueLM_API
+from .jt_vl_chat import JTVLChatAPI
+from .taiyi import TaiyiAPI
+from .lmdeploy import LMDeployAPI
+from .taichu import TaichuVLAPI, TaichuVLRAPI
+from .doubao_vl_api import DoubaoVL
+from .mug_u import MUGUAPI
+from .kimivl_api import KimiVLAPIWrapper, KimiVLAPI
+
+__all__ = [
+ 'OpenAIWrapper', 'HFChatModel', 'GeminiWrapper', 'GPT4V', 'Gemini',
+ 'QwenVLWrapper', 'QwenVLAPI', 'QwenAPI', 'Claude3V', 'Claude_Wrapper',
+ 'Reka', 'GLMVisionAPI', 'CWWrapper', 'SenseChatVisionAPI', 'HunyuanVision',
+ 'Qwen2VLAPI', 'BlueLMWrapper', 'BlueLM_API', 'JTVLChatAPI',
+ 'bailingMMAPI', 'TaiyiAPI', 'TeleMMAPI', 'SiliconFlowAPI', 'LMDeployAPI',
+ 'TaichuVLAPI', 'TaichuVLRAPI', 'DoubaoVL', "MUGUAPI", 'KimiVLAPIWrapper', 'KimiVLAPI'
+]
diff --git a/VLMEvalKit-sudoku/vlmeval/api/glm_vision.py b/VLMEvalKit-sudoku/vlmeval/api/glm_vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b009ef968881851458d5cfb1a1b56757d2da058
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/glm_vision.py
@@ -0,0 +1,77 @@
+import re
+import requests
+requests.packages.urllib3.disable_warnings()
+
+from vlmeval.smp import *
+from vlmeval.api.base import BaseAPI
+from vlmeval.dataset import DATASET_TYPE
+from vlmeval.smp.vlm import encode_image_file_to_base64
+
+
+class GLMVisionWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str,
+ retry: int = 5,
+ key: str = None,
+ verbose: bool = True,
+ system_prompt: str = None,
+ max_tokens: int = 4096,
+ proxy: str = None,
+ **kwargs):
+
+ from zhipuai import ZhipuAI
+ self.model = model
+ self.fail_msg = 'Failed to obtain answer via API. '
+ if key is None:
+ key = os.environ.get('GLMV_API_KEY', None)
+ assert key is not None, (
+ 'Please set the API Key (obtain it here: '
+ 'https://bigmodel.cn)'
+ )
+ self.client = ZhipuAI(api_key=key)
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ def build_msgs(self, msgs_raw, system_prompt=None, dataset=None):
+ msgs = cp.deepcopy(msgs_raw)
+ content = []
+ for i, msg in enumerate(msgs):
+ if msg['type'] == 'text':
+ content.append(dict(type='text', text=msg['value']))
+ elif msg['type'] == 'image':
+ content.append(dict(type='image_url', image_url=dict(url=encode_image_file_to_base64(msg['value']))))
+ if dataset in {'HallusionBench', 'POPE'}:
+ content.append(dict(type="text", text="Please answer yes or no."))
+ ret = [dict(role='user', content=content)]
+ return ret
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ assert isinstance(inputs, str) or isinstance(inputs, list)
+ inputs = [inputs] if isinstance(inputs, str) else inputs
+
+ messages = self.build_msgs(msgs_raw=inputs, dataset=kwargs.get('dataset', None))
+
+ try:
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=messages,
+ do_sample=False,
+ max_tokens=2048
+ )
+ answer = response.choices[0].message.content.strip()
+ if self.verbose:
+ self.logger.info(f'inputs: {inputs}\nanswer: {answer}')
+ return 0, answer, 'Succeeded!'
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(f'The input messages are {inputs}.')
+ return -1, self.fail_msg, ''
+
+
+class GLMVisionAPI(GLMVisionWrapper):
+
+ def generate(self, message, dataset=None):
+ return super(GLMVisionAPI, self).generate(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/gpt.py b/VLMEvalKit-sudoku/vlmeval/api/gpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f929dfd857c0d6d6a2c04a9636b6c11b7863071
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/gpt.py
@@ -0,0 +1,293 @@
+from ..smp import *
+import os
+import sys
+from .base import BaseAPI
+
+APIBASES = {
+ 'OFFICIAL': 'https://api.openai.com/v1/chat/completions',
+}
+
+
+def GPT_context_window(model):
+ length_map = {
+ 'gpt-4': 8192,
+ 'gpt-4-0613': 8192,
+ 'gpt-4-turbo-preview': 128000,
+ 'gpt-4-1106-preview': 128000,
+ 'gpt-4-0125-preview': 128000,
+ 'gpt-4-vision-preview': 128000,
+ 'gpt-4-turbo': 128000,
+ 'gpt-4-turbo-2024-04-09': 128000,
+ 'gpt-3.5-turbo': 16385,
+ 'gpt-3.5-turbo-0125': 16385,
+ 'gpt-3.5-turbo-1106': 16385,
+ 'gpt-3.5-turbo-instruct': 4096,
+ }
+ if model in length_map:
+ return length_map[model]
+ else:
+ return 128000
+
+
+class OpenAIWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str = 'gpt-3.5-turbo-0613',
+ retry: int = 5,
+ key: str = None,
+ verbose: bool = False,
+ system_prompt: str = None,
+ temperature: float = 0,
+ timeout: int = 300,
+ api_base: str = None,
+ max_tokens: int = 2048,
+ img_size: int = -1,
+ img_detail: str = 'low',
+ use_azure: bool = False,
+ **kwargs):
+
+ self.model = model
+ self.cur_idx = 0
+ self.fail_msg = 'Failed to obtain answer via API. '
+ self.max_tokens = max_tokens
+ self.temperature = temperature
+ self.use_azure = use_azure
+
+ if 'step' in model:
+ env_key = os.environ.get('STEPAI_API_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'yi-vision' in model:
+ env_key = os.environ.get('YI_API_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'internvl2-pro' in model:
+ env_key = os.environ.get('InternVL2_PRO_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'abab' in model:
+ env_key = os.environ.get('MiniMax_API_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'moonshot' in model:
+ env_key = os.environ.get('MOONSHOT_API_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'grok' in model:
+ env_key = os.environ.get('XAI_API_KEY', '')
+ if key is None:
+ key = env_key
+ elif 'gemini' in model and 'preview' in model:
+ # Will only handle preview models
+ env_key = os.environ.get('GOOGLE_API_KEY', '')
+ if key is None:
+ key = env_key
+ api_base = "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions"
+ elif 'ernie' in model:
+ env_key = os.environ.get('BAIDU_API_KEY', '')
+ if key is None:
+ key = env_key
+ api_base = 'https://qianfan.baidubce.com/v2/chat/completions'
+ self.baidu_appid = os.environ.get('BAIDU_APP_ID', None)
+ else:
+ if use_azure:
+ env_key = os.environ.get('AZURE_OPENAI_API_KEY', None)
+ assert env_key is not None, 'Please set the environment variable AZURE_OPENAI_API_KEY. '
+
+ if key is None:
+ key = env_key
+ assert isinstance(key, str), (
+ 'Please set the environment variable AZURE_OPENAI_API_KEY to your openai key. '
+ )
+ else:
+ env_key = os.environ.get('OPENAI_API_KEY', '')
+ if key is None:
+ key = env_key
+ assert isinstance(key, str) and key.startswith('sk-'), (
+ f'Illegal openai_key {key}. '
+ 'Please set the environment variable OPENAI_API_KEY to your openai key. '
+ )
+
+ self.key = key
+ assert img_size > 0 or img_size == -1
+ self.img_size = img_size
+ assert img_detail in ['high', 'low']
+ self.img_detail = img_detail
+ self.timeout = timeout
+ self.is_max_completion_tokens = ('o1' in model) or ('o3' in model) or ('o4' in model) or ('gpt-5' in model)
+ self.is_o_model = ('o1' in model) or ('o3' in model) or ('o4' in model)
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ if use_azure:
+ api_base_template = (
+ '{endpoint}openai/deployments/{deployment_name}/chat/completions?api-version={api_version}'
+ )
+ endpoint = os.getenv('AZURE_OPENAI_ENDPOINT', None)
+ assert endpoint is not None, 'Please set the environment variable AZURE_OPENAI_ENDPOINT. '
+ deployment_name = os.getenv('AZURE_OPENAI_DEPLOYMENT_NAME', None)
+ assert deployment_name is not None, 'Please set the environment variable AZURE_OPENAI_DEPLOYMENT_NAME. '
+ api_version = os.getenv('OPENAI_API_VERSION', None)
+ assert api_version is not None, 'Please set the environment variable OPENAI_API_VERSION. '
+
+ self.api_base = api_base_template.format(
+ endpoint=os.getenv('AZURE_OPENAI_ENDPOINT'),
+ deployment_name=os.getenv('AZURE_OPENAI_DEPLOYMENT_NAME'),
+ api_version=os.getenv('OPENAI_API_VERSION')
+ )
+ else:
+ if api_base is None:
+ if 'OPENAI_API_BASE' in os.environ and os.environ['OPENAI_API_BASE'] != '':
+ self.logger.info('Environment variable OPENAI_API_BASE is set. Will use it as api_base. ')
+ api_base = os.environ['OPENAI_API_BASE']
+ else:
+ api_base = 'OFFICIAL'
+
+ assert api_base is not None
+
+ if api_base in APIBASES:
+ self.api_base = APIBASES[api_base]
+ elif api_base.startswith('http'):
+ self.api_base = api_base
+ else:
+ self.logger.error('Unknown API Base. ')
+ raise NotImplementedError
+ if os.environ.get('BOYUE', None):
+ self.api_base = os.environ.get('BOYUE_API_BASE')
+ self.key = os.environ.get('BOYUE_API_KEY')
+
+ self.logger.info(f'Using API Base: {self.api_base}; API Key: {self.key}')
+
+ # inputs can be a lvl-2 nested list: [content1, content2, content3, ...]
+ # content can be a string or a list of image & text
+ def prepare_itlist(self, inputs):
+ assert np.all([isinstance(x, dict) for x in inputs])
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
+ if has_images:
+ content_list = []
+ for msg in inputs:
+ if msg['type'] == 'text':
+ content_list.append(dict(type='text', text=msg['value']))
+ elif msg['type'] == 'image':
+ from PIL import Image
+ img = Image.open(msg['value'])
+ b64 = encode_image_to_base64(img, target_size=self.img_size)
+ img_struct = dict(url=f'data:image/jpeg;base64,{b64}', detail=self.img_detail)
+ content_list.append(dict(type='image_url', image_url=img_struct))
+ else:
+ assert all([x['type'] == 'text' for x in inputs])
+ text = '\n'.join([x['value'] for x in inputs])
+ content_list = [dict(type='text', text=text)]
+ return content_list
+
+ def prepare_inputs(self, inputs):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='system', content=self.system_prompt))
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
+ if 'role' in inputs[0]:
+ assert inputs[-1]['role'] == 'user', inputs[-1]
+ for item in inputs:
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
+ else:
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
+ return input_msgs
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ input_msgs = self.prepare_inputs(inputs)
+ temperature = kwargs.pop('temperature', self.temperature)
+ max_tokens = kwargs.pop('max_tokens', self.max_tokens)
+
+ # Will send request if use Azure, dk how to use openai client for it
+ if self.use_azure:
+ headers = {'Content-Type': 'application/json', 'api-key': self.key}
+ elif 'internvl2-pro' in self.model:
+ headers = {'Content-Type': 'application/json', 'Authorization': self.key}
+ else:
+ headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self.key}'}
+ if hasattr(self, 'baidu_appid'):
+ headers['appid'] = self.baidu_appid
+
+ payload = dict(
+ model=self.model,
+ messages=input_msgs,
+ n=1,
+ temperature=temperature,
+ **kwargs)
+
+ if self.is_max_completion_tokens:
+ payload['max_completion_tokens'] = max_tokens
+ payload.pop('temperature')
+ else:
+ payload['max_tokens'] = max_tokens
+
+ if 'gemini' in self.model:
+ payload.pop('max_tokens')
+ payload.pop('n')
+ payload['reasoning_effort'] = 'high'
+
+ response = requests.post(
+ self.api_base,
+ headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1)
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct['choices'][0]['message']['content'].strip()
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(response.text if hasattr(response, 'text') else response)
+
+ return ret_code, answer, response
+
+ def get_image_token_len(self, img_path, detail='low'):
+ import math
+ if detail == 'low':
+ return 85
+
+ im = Image.open(img_path)
+ height, width = im.size
+ if width > 1024 or height > 1024:
+ if width > height:
+ height = int(height * 1024 / width)
+ width = 1024
+ else:
+ width = int(width * 1024 / height)
+ height = 1024
+
+ h = math.ceil(height / 512)
+ w = math.ceil(width / 512)
+ total = 85 + 170 * h * w
+ return total
+
+ def get_token_len(self, inputs) -> int:
+ import tiktoken
+ try:
+ enc = tiktoken.encoding_for_model(self.model)
+ except Exception as err:
+ if 'gpt' in self.model.lower():
+ if self.verbose:
+ self.logger.warning(f'{type(err)}: {err}')
+ enc = tiktoken.encoding_for_model('gpt-4')
+ else:
+ return 0
+ assert isinstance(inputs, list)
+ tot = 0
+ for item in inputs:
+ if 'role' in item:
+ tot += self.get_token_len(item['content'])
+ elif item['type'] == 'text':
+ tot += len(enc.encode(item['value']))
+ elif item['type'] == 'image':
+ tot += self.get_image_token_len(item['value'], detail=self.img_detail)
+ return tot
+
+
+class GPT4V(OpenAIWrapper):
+
+ def generate(self, message, dataset=None):
+ return super(GPT4V, self).generate(message)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/hf_chat_model.py b/VLMEvalKit-sudoku/vlmeval/api/hf_chat_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..f575c07cfa71fb70eeba6c92afb602bf2fd0971f
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/hf_chat_model.py
@@ -0,0 +1,261 @@
+import os
+import sys
+import os.path as osp
+import torch
+from ..smp import *
+
+
+def get_gpu_num(model_name):
+ model_name = model_name.lower()
+ kws = {
+ 8: ['65b', '70b'],
+ 4: ['30b', '33b', '35b', '40b'],
+ 2: ['13b', '14b', '20b', '8b'],
+ 1: ['6b', '7b', 'moss'],
+ }
+ for k in [8, 4, 2, 1]:
+ for keyword in kws[k]:
+ if keyword in model_name:
+ return k
+ return 8
+
+
+validated_llms = [
+ 'internlm/internlm-chat-7b', 'internlm/internlm-chat-7b-8k', 'internlm/internlm-chat-20b',
+ 'Qwen/Qwen-7B-Chat', 'Qwen/Qwen-14B-Chat',
+ 'THUDM/chatglm2-6b', 'THUDM/chatglm2-6b-32k', 'THUDM/chatglm3-6b', 'THUDM/chatglm3-6b-32k',
+ 'baichuan-inc/Baichuan2-7B-Chat', 'baichuan-inc/Baichuan2-13B-Chat',
+ 'lmsys/vicuna-7b-v1.5', 'lmsys/vicuna-13b-v1.5',
+ 'meta-llama/Llama-2-7b-chat-hf',
+ 'meta-llama/Llama-3.1-8B-Instruct'
+]
+Auto_model = ['chatglm']
+
+
+class HFChatModel:
+
+ def _get_context_length(self, model, model_path):
+ # By default, we use model.config.seq_length
+ model_path = model_path.lower()
+ if 'baichuan' in model_path:
+ context_window = model.config.model_max_length
+ elif 'internlm' in model_path or 'llama' in model_path:
+ context_window = model.config.max_position_embeddings
+ elif 'vicuna' in model_path:
+ context_window = model.generation_config.max_length
+ else:
+ # chatglm & qwen
+ context_window = model.config.seq_length
+ return context_window
+
+ def _get_context_length_robust(self, model, model_path):
+ try:
+ context_window = self._get_context_length(model, model_path)
+ return context_window
+ except Exception as err:
+ self.logger.critical(f'{type(err)}: {err}')
+ self.logger.critical(
+ 'Failed to extract context_window information from config / generation_config. '
+ 'Please read the above code and check if the logic works for you model path'
+ )
+ raise NotImplementedError
+
+ def __init__(self,
+ model_path,
+ system_prompt: str = None,
+ **kwargs):
+
+ self.logger = get_logger('HFChatModel')
+ if 'vicuna' in model_path.lower() or 'llama' in model_path.lower():
+ try:
+ from fastchat.model import get_conversation_template
+ except Exception as err:
+ self.logger.critical('Please install fastchat first to use vicuna. ')
+ raise err
+
+ self.explicit_device = kwargs.pop('device', None)
+ if self.explicit_device is None:
+ # If CUDA_VISIBLE_DEVICES is not properly set
+ if 'CUDA_VISIBLE_DEVICES' not in os.environ or os.environ['CUDA_VISIBLE_DEVICES'] == '0,1,2,3,4,5,6,7':
+ num_gpu = get_gpu_num(model_path)
+ gpu_offset = kwargs.pop('gpu_offset', 0)
+ cuda_visible_devices = ','.join([str(i) for i in range(gpu_offset, gpu_offset + num_gpu)])
+ os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
+
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
+
+ if model_path not in validated_llms:
+ self.logger.warning(f'{model_path} not in validated LLMs, may have inference troubles. ')
+
+ self.model_path = model_path
+ if listinstr(Auto_model, model_path):
+ LoadModel = AutoModel
+ else:
+ LoadModel = AutoModelForCausalLM
+ assert osp.exists(model_path) or len(model_path.split('/')) == 2
+
+ device = self.explicit_device if self.explicit_device else 'auto'
+
+ precision = {}
+ if 'internlm-chat-7b' in model_path:
+ precision = {'torch_dtype': torch.float16}
+ elif 'internlm-chat-20b' in model_path:
+ precision = {'torch_dtype': torch.bfloat16}
+
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
+ if ',' in cuda_devices:
+ device_ids = [int(x) for x in cuda_devices.split(',')]
+ _ = {i: i for i in range(len(device_ids))}
+ else:
+ _ = {'': 0}
+
+ if 'llama' in self.model_path.lower():
+ from lmdeploy import pipeline, GenerationConfig, TurbomindEngineConfig
+ print(f"Loading model {model_path} with {num_gpu} GPUs")
+ backend_config = TurbomindEngineConfig(tp=num_gpu)
+ self.gen_config = GenerationConfig(max_new_tokens=256)
+ model = pipeline(model_path, backend_config=backend_config)
+ else:
+ model = LoadModel.from_pretrained(model_path, trust_remote_code=True, device_map='cpu', **precision)
+ model = model.eval()
+
+ if device != 'cpu':
+ model = model.to(f'cuda:{device}' if isinstance(device, int) else 'cuda')
+ try:
+ from transformers.generation import GenerationConfig
+ model.generation_config = GenerationConfig.from_pretrained(
+ model_path, trust_remote_code=True, device_map=device)
+ except Exception as err:
+ self.logger.warning(f'{type(err)}: {err}')
+
+ self.context_length = self._get_context_length_robust(model=model, model_path=model_path)
+
+ torch.cuda.empty_cache()
+ self.model = model
+ self.answer_buffer = 192
+ self.system_prompt = system_prompt
+ for k, v in kwargs.items():
+ self.logger.info(f'Following args will be used for generation (If not set specifically), {k}: {v}. ')
+ self.kwargs = kwargs
+
+ def generate_str(self, input, **kwargs):
+ if 'baichuan' in self.model_path.lower():
+ messages = []
+ messages.append({'role': 'user', 'content': input})
+ resp = self.model.chat(self.tokenizer, messages, **kwargs)
+ elif 'vicuna' in self.model_path.lower():
+ from fastchat.model import get_conversation_template
+ conv = get_conversation_template('vicuna')
+ conv.append_message(conv.roles[0], input)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+ inputs = self.tokenizer([prompt], return_tensors='pt')
+ if torch.cuda.is_available():
+ for k in inputs:
+ inputs[k] = inputs[k].cuda()
+
+ params = dict(do_sample=True, temperature=0.7, repetition_penalty=1.0, max_new_tokens=512)
+ params.update(self.kwargs)
+ params.update(kwargs)
+ outputs = self.model.generate(**inputs, **params)
+ resp = self.tokenizer.decode(
+ outputs[0][len(inputs['input_ids'][0]):],
+ skip_special_tokens=True,
+ spaces_between_special_tokens=False)
+ elif 'llama' in self.model_path.lower():
+ prompt = [{'role': 'system', 'content': self.system_prompt}, {'role': 'user', 'content': input}]
+ resp = self.model(prompt, gen_config=self.gen_config).text
+ else:
+ params = self.kwargs
+ params.update(kwargs)
+ resp, _ = self.model.chat(self.tokenizer, input, history=[], **params)
+
+ return resp
+
+ def length_ok(self, inputs):
+ tot = len(self.tokenizer.encode(self.system_prompt)) if self.system_prompt is not None else 0
+ for s in inputs:
+ tot += len(self.tokenizer.encode(s))
+ return tot + self.answer_buffer < self.context_length
+
+ def generate_list(self, full_inputs, offset=0, **kwargs):
+ assert isinstance(full_inputs, list)
+ inputs = full_inputs[offset:]
+ if not self.length_ok(inputs):
+ return self.chat(full_inputs, offset + 1)
+
+ model_path = self.model_path.lower()
+
+ if sum([x in model_path for x in ['baichuan']]):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='user', content=self.system_prompt))
+ if len(inputs):
+ assert isinstance(inputs, list) and isinstance(inputs[0], str)
+ roles = ['user', 'assistant'] if len(inputs) % 2 == 1 else ['assistant', 'user']
+ roles = roles * len(inputs)
+ for role, msg in zip(roles, inputs):
+ input_msgs.append(dict(role=role, content=msg))
+ response = self.model.chat(self.tokenizer, input_msgs)
+ elif sum([x in model_path for x in ['vicuna']]):
+ from fastchat.model import get_conversation_template
+ conv = get_conversation_template('vicuna')
+ assert isinstance(inputs, list) and isinstance(inputs[0], str)
+ if len(inputs) % 2 == 1:
+ if self.system_prompt is not None:
+ conv.append_message(conv.roles[0], self.system_prompt)
+ for i in range(len(inputs) // 2):
+ conv.append_message(conv.roles[0], inputs[2 * i])
+ conv.append_message(conv.roles[1], inputs[2 * i + 1])
+ else:
+ assert self.system_prompt is not None
+ conv.append_message(conv.roles[0], self.system_prompt)
+ conv.append_message(conv.roles[1], inputs[0])
+ for i in range(len(inputs) // 2 - 1):
+ conv.append_message(conv.roles[0], inputs[2 * i + 1])
+ conv.append_message(conv.roles[1], inputs[2 * i + 2])
+ conv.append_message(conv.roles[0], inputs[-1])
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+ inputs = self.tokenizer([prompt], return_tensors='pt')
+ if torch.cuda.is_available():
+ for k in inputs:
+ inputs[k] = inputs[k].cuda()
+
+ params = dict(do_sample=True, temperature=0.7, repetition_penalty=1.0, max_new_tokens=512)
+ params.update(self.kwargs)
+ params.update(kwargs)
+
+ outputs = self.model.generate(**inputs, **params)
+ response = self.tokenizer.decode(
+ outputs[0][len(inputs['input_ids'][0]):],
+ skip_special_tokens=True,
+ spaces_between_special_tokens=False)
+ response = response.lstrip('\n')
+ else:
+ # The default option, support internlm, chatglm, qwen
+ history, msg = [], None
+ if len(inputs) % 2 == 1:
+ if self.system_prompt is not None:
+ history = [(self.system_prompt, '')]
+ for i in range(len(inputs) // 2):
+ history.append((inputs[2 * i], inputs[2 * i + 1]))
+ else:
+ assert self.system_prompt is not None
+ history = [(self.system_prompt, inputs[0])]
+ for i in range(len(inputs) // 2 - 1):
+ history.append((inputs[2 * i + 1], inputs[2 * i + 2]))
+ msg = inputs[-1]
+
+ params = self.kwargs
+ params.update(kwargs)
+ response, _ = self.model.chat(self.tokenizer, msg, history=history, **params)
+
+ return response, offset
+
+ def generate(self, inputs, **kwargs):
+ if isinstance(inputs, str):
+ return self.generate_str(inputs, **kwargs)
+ elif isinstance(inputs, list):
+ return self.generate_list(inputs, **kwargs)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/jt_vl_chat.py b/VLMEvalKit-sudoku/vlmeval/api/jt_vl_chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce0ef88d07780d1a0db461390283d9f224554c6c
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/jt_vl_chat.py
@@ -0,0 +1,275 @@
+import pandas as pd
+import requests
+import json
+import os
+import base64
+from vlmeval.smp import *
+from vlmeval.api.base import BaseAPI
+from vlmeval.dataset import DATASET_TYPE
+from vlmeval.dataset import img_root_map
+
+API_ENDPOINT = "https://hl.jiutian.10086.cn/kunlun/ingress/api/hl-4a9c15/7b11a3451e1a4612a6661c3e22235df6/ai-b6f55f2068a546498ebcfe4fc893ade8/service-97bc0f7b638041d18be4c5c7df31c359/v1/chat/completions" # noqa: E501
+APP_CODE = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI2ZTNiMmQ5OWNiNTE0ZmQ0Yjk1M2M3YTg2NjQzNTFmOCIsImlzcyI6ImFwaS1hdXRoLWtleSIsImV4cCI6NDg5MDY4ODM2Nn0.GX61EKQ0hlQO4CisPwEwsAkmi7fvmc2Kl47EOq2IFpGWk9y4K1ocwM7aMbn7hJ-a4GkDoy3vyndTwPOFDn4y4t4J26tgwPziNS1-fUaQi6e1r7Dt372ZJEJgxxb99SkEulXrkOxOdwltJ87jnia7ZAyOzcfbQc6B4RdpCZERXn7Q-gED62emJbZ_8fuAu86lxtFUZ55lp8Jzmbu0QxNMR4c4Xy4tioxyfv5ZsFjo09GunDD875i__WFPEOl_I15NzhhOOGi3RKFVvZdTF4v3BCYNZoYF02pbM78XPkzcNxSpRHfjBKIjENBMEEygiZseGrcF6x-ThoTnjYsklu9HwA' # noqa: E501
+
+
+class JTVLChatWrapper(BaseAPI):
+ is_api: bool = True
+ INTERLEAVE = False
+
+ def __init__(self,
+ model: str = 'jt-vl-chat',
+ retry: int = 5,
+ wait: int = 5,
+ api_base: str = '',
+ app_code: str = '',
+ verbose: bool = True,
+ system_prompt: str = None,
+ temperature: float = 0.7,
+ max_tokens: int = 2048,
+ proxy: str = None,
+ **kwargs):
+ self.model = model
+
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.api_base = API_ENDPOINT
+ self.app_code = APP_CODE
+
+ super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ def dump_image(self, line, dataset):
+ """Dump the image(s) of the input line to the corresponding dataset folder.
+
+ Args:
+ line (line of pd.DataFrame): The raw input line.
+ dataset (str): The name of the dataset.
+
+ Returns:
+ str | list[str]: The paths of the dumped images.
+ """
+ ROOT = LMUDataRoot()
+ assert isinstance(dataset, str)
+
+ img_root = os.path.join(ROOT, 'images', img_root_map(dataset) if dataset in img_root_map(dataset) else dataset)
+ os.makedirs(img_root, exist_ok=True)
+ if 'image' in line:
+ if isinstance(line['image'], list):
+ tgt_path = []
+ assert 'image_path' in line
+ for img, im_name in zip(line['image'], line['image_path']):
+ path = osp.join(img_root, im_name)
+ if not read_ok(path):
+ decode_base64_to_image_file(img, path)
+ tgt_path.append(path)
+ else:
+ tgt_path = osp.join(img_root, f"{line['index']}.jpg")
+ if not read_ok(tgt_path):
+ decode_base64_to_image_file(line['image'], tgt_path)
+ tgt_path = [tgt_path]
+ else:
+ assert 'image_path' in line
+ tgt_path = toliststr(line['image_path'])
+
+ return tgt_path
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ if listinstr(['MMMU_DEV_VAL','MMMU_TEST'], dataset):
+ return False
+ else:
+ return True
+
+ def build_multi_choice_prompt(self, line, dataset=None):
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if len(options):
+ prompt += '\n请直接回答选项字母。' if cn_string(
+ prompt) else "\nAnswer with the option's letter from the given choices directly."
+ else:
+ prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.'
+
+ return prompt
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+
+ tgt_path = self.dump_image(line, dataset)
+
+ if dataset is not None and listinstr(['MME'], dataset):
+ question = line['question']
+ prompt = question + ' Answer the question using a single word or phrase.'
+ elif dataset is not None and listinstr(['HallusionBench'], dataset):
+ question = line['question']
+ prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.'
+ elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ prompt = self.build_multi_choice_prompt(line, dataset)
+ elif dataset is not None and DATASET_TYPE(dataset) == 'VQA':
+ if listinstr(['MathVista', 'MathVision'], dataset):
+ prompt = line['question']
+ elif listinstr(['LLaVABench'], dataset):
+ question = line['question']
+ prompt = question + '\nAnswer this question in detail.'
+ elif listinstr(['MMVet'], dataset):
+ prompt = line['question']
+ else:
+ question = line['question']
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ else:
+ prompt = line['question']
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=s) for s in tgt_path])
+ return message
+
+ def message_to_promptimg(self, message, dataset=None):
+ assert not self.INTERLEAVE
+ model_name = self.__class__.__name__
+ import warnings
+ warnings.warn(
+ f'Model {model_name} does not support interleaved input. '
+ 'Will use the first image and aggregated texts as prompt. ')
+ num_images = len([x for x in message if x['type'] == 'image'])
+ if num_images == 0:
+ prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text'])
+ image = None
+ else:
+ prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text'])
+ if dataset == 'BLINK':
+ image = concat_images_vlmeval(
+ [x['value'] for x in message if x['type'] == 'image'],
+ target_size=512)
+ else:
+ image = [x['value'] for x in message if x['type'] == 'image'][0]
+ return prompt, image
+
+ def get_send_data(self,prompt, image_path, temperature, max_tokens,stream=False,understanding_plus=False):
+ image = ''
+ with open(image_path, 'rb') as f:
+ image = str(base64.b64encode(f.read()), 'utf-8')
+ send_data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "image_base64": image,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "do_sample": False,
+ "understanding_plus":understanding_plus,
+ "stream": stream
+ }
+ return send_data
+
+ def get_send_data_no_image(self,prompt, temperature, max_tokens, stream=False,understanding_plus=False):
+ send_data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "stream": stream,
+ "understanding_plus":understanding_plus
+ }
+ return send_data
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ assert isinstance(inputs, str) or isinstance(inputs, list)
+ inputs = [inputs] if isinstance(inputs, str) else inputs
+ dataset = kwargs.get('dataset', None)
+ prompt, image_path = self.message_to_promptimg(message=inputs, dataset=dataset)
+ # print("prompt:",prompt)
+ if image_path:
+ send_data = self.get_send_data(
+ prompt=prompt,
+ image_path=image_path,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ stream=True)
+ else:
+ send_data = self.get_send_data_no_image(
+ prompt=prompt,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ stream=True)
+
+ json_data = json.dumps(send_data)
+
+ header_dict = {'Content-Type': 'application/json','Authorization': 'Bearer ' + self.app_code}
+
+ r = requests.post(self.api_base, headers=header_dict, data=json_data, timeout=3000,stream=True)
+ try:
+ if send_data.get('stream', False):
+ # 流式处理
+ chunks = []
+ full_content = ""
+
+ try:
+ for line in r.iter_lines():
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data: '):
+ event_data = decoded_line[6:]
+ if event_data == '[DONE]':
+ break
+ try:
+ chunk = json.loads(event_data)
+ chunks.append(chunk)
+
+ # 记录最后一个有效的usage(不累加)
+ if 'usage' in chunk:
+ _ = chunk['usage']
+
+ # 实时输出内容
+ if 'choices' in chunk:
+ for choice in chunk['choices']:
+ if 'delta' in choice and 'content' in choice['delta']:
+ content = choice['delta']['content']
+ print(content, end='', flush=True)
+ full_content += content
+ except json.JSONDecodeError:
+ continue
+ print("\n") # 换行
+
+ return 0,full_content,'Succeeded! '
+
+ except Exception as e:
+ return -1,f'Error: {str(e)}',''
+ else:
+ # 非流式处理
+ try:
+ r_json = r.json()
+ output = r_json['choices'][0]['message']['content']
+ return 0,output,'Succeeded! '
+ except:
+ error_msg = f'Error! code {r.status_code} content: {r.content}'
+ error_con = r.content.decode('utf-8')
+ if self.verbose:
+ self.logger.error(error_msg)
+ self.logger.error(error_con)
+ self.logger.error(f'The input messages are {inputs}.')
+ return -1,error_msg,''
+ except Exception as e:
+ return -1,f'Error: {str(e)}',''
+
+
+class JTVLChatAPI(JTVLChatWrapper):
+
+ def generate(self, message, dataset=None):
+ return super(JTVLChatAPI, self).generate(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/kimivl_api.py b/VLMEvalKit-sudoku/vlmeval/api/kimivl_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..da33bf313f1db63f8cd64feeaa630e3b8ad4699d
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/kimivl_api.py
@@ -0,0 +1,159 @@
+from ..smp import *
+import os
+import sys
+from .base import BaseAPI
+
+APIBASES = {
+ 'OFFICIAL': 'http://localhost:8000/v1/chat/completions',
+}
+
+
+def extract_summary(text: str, bot: str = "◁think▷", eot: str = "◁/think▷") -> str:
+ # 输出截断, 返回空字符串
+ if bot in text and eot not in text:
+ return ""
+ if eot in text:
+ return text[text.index(eot) + len(eot):].strip()
+ return text
+
+
+class KimiVLAPIWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str = 'api-kimi-vl-thinking-2506',
+ retry: int = 5,
+ key: str = None,
+ verbose: bool = True,
+ system_prompt: str = None,
+ temperature: float = 0.8,
+ timeout: int = 360,
+ api_base: str = 'OFFICIAL',
+ max_tokens: int = 32768,
+ **kwargs):
+
+ self.model = model
+ self.cur_idx = 0
+ self.fail_msg = 'Failed to obtain answer via API. '
+ self.max_tokens = max_tokens
+ self.temperature = temperature
+
+ if 'kimi' in model:
+ env_key = os.environ.get('KIMI_VL_API_KEY', '')
+ if key is None:
+ key = env_key
+
+ self.key = key
+ self.timeout = timeout
+
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ if 'KIMI_VL_API_BASE' in os.environ and os.environ['KIMI_VL_API_BASE'] != '':
+ self.logger.info('Environment variable KIMI_VL_API_BASE is set. Will use it as api_base. ')
+ api_base = os.environ['KIMI_VL_API_BASE']
+ else:
+ api_base = 'OFFICIAL'
+
+ print(api_base)
+
+ assert api_base is not None
+
+ if api_base in APIBASES:
+ self.api_base = APIBASES[api_base]
+ elif api_base.startswith('http'):
+ self.api_base = api_base
+ else:
+ self.logger.error('Unknown API Base. ')
+ raise NotImplementedError
+
+ self.logger.info(f'Using API Base: {self.api_base}; API Key: {self.key}')
+
+ # inputs can be a lvl-2 nested list: [content1, content2, content3, ...]
+ # content can be a string or a list of image & text
+ def prepare_itlist(self, inputs):
+ assert np.all([isinstance(x, dict) for x in inputs])
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
+ if has_images:
+ content_list = []
+ for msg in inputs:
+ if msg['type'] == 'text':
+ if msg["value"] == "":
+ continue
+ content_list.append(dict(type='text', text=msg['value']))
+
+ elif msg['type'] == 'image':
+ from PIL import Image
+ img = Image.open(msg['value'])
+ b64 = encode_image_to_base64(img)
+ img_struct = dict(url=f'data:image/jpeg;base64,{b64}')
+ content_list.append(dict(type='image_url', image_url=img_struct))
+ else:
+ assert all([x['type'] == 'text' for x in inputs])
+ text = '\n'.join([x['value'] for x in inputs])
+ content_list = [dict(type='text', text=text)]
+ return content_list
+
+ def prepare_inputs(self, inputs):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='system', content=self.system_prompt))
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
+ if 'role' in inputs[0]:
+ assert inputs[-1]['role'] == 'user', inputs[-1]
+ for item in inputs:
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
+ else:
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
+ if os.environ.get("THINKING_SKIPPED", False):
+ input_msgs.append({
+ "role": "assistant",
+ "content": "◁think▷\n\n◁/think▷",
+ "partial": True
+ })
+ self.logger.info("Add skip thinking pattern")
+ return input_msgs
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ input_msgs = self.prepare_inputs(inputs)
+ temperature = kwargs.pop('temperature', self.temperature)
+ max_tokens = kwargs.pop('max_tokens', self.max_tokens)
+
+ headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self.key}'}
+ payload = dict(
+ model=self.model,
+ messages=input_msgs,
+ n=1,
+ temperature=temperature,
+ **kwargs)
+ print(self.model)
+
+ payload['max_tokens'] = max_tokens
+ response = requests.post(
+ self.api_base,
+ headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1)
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct['choices'][0]['message']['content'].strip()
+ print(answer)
+ length_befofe_es = len(answer.split())
+ answer = extract_summary(answer)
+ length_after_es = len(answer.split())
+ if length_befofe_es != length_after_es:
+ self.logger.info("Thinking length: {}".format(length_befofe_es - length_after_es))
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(response.text if hasattr(response, 'text') else response)
+
+ return ret_code, answer, response
+
+
+class KimiVLAPI(KimiVLAPIWrapper):
+
+ def generate(self, message, dataset=None):
+ return super(KimiVLAPI, self).generate(message)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/lmdeploy.py b/VLMEvalKit-sudoku/vlmeval/api/lmdeploy.py
new file mode 100644
index 0000000000000000000000000000000000000000..32fd969c13c2c466993fe0ddd0ab533644ac2e78
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/lmdeploy.py
@@ -0,0 +1,332 @@
+# from http import HTTPStatus
+import os
+import requests
+from ..dataset import DATASET_TYPE, DATASET_MODALITY
+from vlmeval.api.base import BaseAPI
+from vlmeval.smp import *
+
+
+class InternVL2_PromptUtil:
+
+ def __init__(self, use_mpo_prompt=False):
+ self.use_mpo_prompt = use_mpo_prompt
+
+ def dump_image(self, line, dataset):
+ return self.dump_image_func(line)
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ assert DATASET_MODALITY(dataset) != 'VIDEO', 'not supported'
+ if dataset in [
+ 'atomic_dataset', 'electro_dataset', 'mechanics_dataset',
+ 'optics_dataset', 'quantum_dataset', 'statistics_dataset'
+ ]:
+ return False
+ if listinstr(['MMDU', 'MME-RealWorld', 'MME-RealWorld-CN', 'WeMath_COT', 'MMAlignBench'], dataset):
+ # For Multi-Turn we don't have custom prompt
+ return False
+ if DATASET_MODALITY(dataset) == 'VIDEO':
+ # For Video benchmarks we don't have custom prompt at here
+ return False
+ else:
+ return True
+
+ def build_prompt(self, line, dataset=None):
+ use_cot = (os.getenv('USE_COT') == '1')
+ use_mpo_prompt = self.use_mpo_prompt and (use_cot or dataset in ['MMStar', 'HallusionBench', 'OCRBench'])
+
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+ from ..vlm.internvl.utils import (build_multi_choice_prompt,
+ build_mcq_cot_prompt,
+ build_qa_cot_prompt,
+ build_mpo_prompt,
+ reorganize_prompt)
+
+ tgt_path = self.dump_image(line, dataset)
+ max_num = self.get_max_num(dataset)
+ if dataset is not None and DATASET_TYPE(dataset) == 'Y/N':
+ question = line['question']
+ if listinstr(['MME'], dataset):
+ prompt = question + ' Answer the question using a single word or phrase.'
+ elif listinstr(['HallusionBench', 'AMBER'], dataset):
+ prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.'
+ else:
+ prompt = question
+ elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ prompt = build_multi_choice_prompt(line, dataset)
+ if os.getenv('USE_COT') == '1':
+ prompt = build_mcq_cot_prompt(line, prompt)
+ elif dataset is not None and DATASET_TYPE(dataset) == 'VQA':
+ question = line['question']
+ if listinstr(['LLaVABench', 'WildVision'], dataset):
+ prompt = question + '\nAnswer this question in detail.'
+ elif listinstr(['OCRVQA', 'TextVQA', 'ChartQA', 'DocVQA', 'InfoVQA', 'OCRBench',
+ 'DUDE', 'SLIDEVQA', 'GQA', 'MMLongBench_DOC'], dataset):
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ elif listinstr(['MathVista', 'MathVision', 'VCR', 'MTVQA', 'MMVet', 'MathVerse',
+ 'MMDU', 'CRPE', 'MIA-Bench', 'MM-Math', 'DynaMath',
+ 'QSpatial', 'WeMath', 'LogicVista'], dataset):
+ prompt = question
+ if os.getenv('USE_COT') == '1':
+ prompt = build_qa_cot_prompt(line, prompt)
+ else:
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ else:
+ # VQA_ex_prompt: OlympiadBench, VizWiz
+ prompt = line['question']
+ if os.getenv('USE_COT') == '1':
+ prompt = build_qa_cot_prompt(line, prompt)
+
+ message = [dict(type='text', value=prompt)]
+ image_num = len(tgt_path)
+ max_num = max(1, min(max_num, 64 // image_num))
+ # TODO:support upscale_flag
+ message.extend([dict(type='image', value=s, max_dynamic_patch=max_num) for s in tgt_path])
+
+ if use_mpo_prompt:
+ message = build_mpo_prompt(message, line, dataset)
+
+ # reorganize_prompt
+ prompt = reorganize_prompt(message, image_num, dataset=dataset)
+ prompt.replace('', '')
+ message[0] = dict(type='text', value=prompt)
+ return message
+
+ def get_max_num(self, dataset):
+ self.total_max_num = 64
+ if dataset is None:
+ self.max_num = 6
+ return None
+ res_1_datasets = ['MMBench-Video', 'Video-MME', 'MVBench', 'Video', 'WorldSense'] # noqa: F841
+ res_12_datasets = ['ChartQA_TEST', 'MMMU_DEV_VAL', 'MMMU_TEST', 'MME-RealWorld',
+ 'VCR_EN', 'VCR_ZH', 'OCRVQA', 'BMMR']
+ res_18_datasets = ['DocVQA_VAL', 'DocVQA_TEST', 'DUDE', 'MMLongBench_DOC', 'SLIDEVQA']
+ res_24_datasets = ['InfoVQA_VAL', 'InfoVQA_TEST', 'OCRBench', 'HRBench4K', 'HRBench8K']
+ if DATASET_MODALITY(dataset) == 'VIDEO':
+ self.max_num = 1
+ elif listinstr(res_12_datasets, dataset):
+ return 12
+ elif listinstr(res_18_datasets, dataset):
+ return 18
+ elif listinstr(res_24_datasets, dataset):
+ return 24
+ else:
+ return 6
+
+
+class CogVLM2_PromptUtil:
+
+ def dump_image(self, line, dataset):
+ return self.dump_image_func(line)
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ if DATASET_TYPE(dataset) in 'MCQ':
+ return True
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ assert dataset is None or isinstance(dataset, str)
+ assert self.use_custom_prompt(dataset)
+ tgt_path = self.dump_image(line, dataset)
+
+ if dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ option_candidate = string.ascii_uppercase
+ options = {
+ cand: line[cand]
+ for cand in option_candidate
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if not cn_string(prompt):
+ prompt = prompt + '\n' + "Answer with the option's letter from the given choices directly."
+ else:
+ prompt = prompt + '\n' + '请直接回答选项字母。'
+ else:
+ prompt = line['question']
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=p) for p in tgt_path])
+ return message
+
+
+class LMDeployWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ custom_prompt: str = None
+ prompt_map = {
+ 'cogvlm2': CogVLM2_PromptUtil(),
+ 'internvl2': InternVL2_PromptUtil(),
+ 'internvl2-mpo-cot': InternVL2_PromptUtil(use_mpo_prompt=True),
+ }
+
+ def __init__(self,
+ model: str = None,
+ retry: int = 5,
+ key: str = 'sk-123456',
+ verbose: bool = True,
+ temperature: float = 0.0,
+ timeout: int = 60,
+ api_base: str = None,
+ system_prompt: str = None,
+ max_tokens: int = 1024,
+ **kwargs):
+ self.fail_msg = 'Failed to obtain answer via API. '
+ self.max_tokens = max_tokens
+ self.timeout = timeout
+
+ key = os.environ.get('LMDEPLOY_API_KEY', key)
+ api_base = os.environ.get('LMDEPLOY_API_BASE', api_base)
+ assert key is not None, 'Please set the environment variable LMDEPLOY_API_KEY.'
+ assert api_base is not None, 'Please set the environment variable LMDEPLOY_API_BASE.'
+ self.key = key
+ self.api_base = api_base
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ model_url = ''.join([api_base.split('v1')[0], 'v1/models'])
+ resp = requests.get(model_url)
+ model_id_list = [str(data['id']) for data in resp.json()['data']]
+ self.model = model if model in model_id_list else model_id_list[0]
+ self.logger.info(f'lmdeploy evaluate model: {self.model}')
+ self.set_prompt_pattern(self.model)
+ if hasattr(self, 'custom_prompt'):
+ self.logger.info(f'using custom prompt {self.custom_prompt}')
+ self.temperature = temperature
+ self.logger.info(f'Init temperature: {self.temperature}')
+
+ def set_dump_image(self, dump_image_func):
+ if self.custom_prompt in self.prompt_map:
+ self.prompt_map[self.custom_prompt].dump_image_func = dump_image_func
+ self.dump_image_func = dump_image_func
+
+ def use_custom_prompt(self, dataset):
+ if self.custom_prompt in self.prompt_map:
+ return self.prompt_map[self.custom_prompt].use_custom_prompt(dataset)
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ if self.custom_prompt in self.prompt_map:
+ return self.prompt_map[self.custom_prompt].build_prompt(line, dataset)
+ raise NotImplementedError
+
+ def set_prompt_pattern(self, model_name):
+ if 'Phi-3.5-Vision'.lower() in model_name.lower():
+ self.max_tokens = 1000
+ self.temperature = 0.0
+ if 'cogvlm2-llama3-chat-19B'.lower() in model_name.lower():
+ self.max_tokens = 2048
+ self.temperature = 0.0
+ self.custom_prompt = 'cogvlm2'
+ if 'internvl2' in model_name.lower() or 'internvl3' in model_name.lower():
+ self.max_tokens = 1024
+ self.temperature = 0.0
+ if 'mpo' in model_name.lower():
+ self.max_tokens = 4096
+ self.logger.info('Use custom prompt internvl2-mpo-cot')
+ self.custom_prompt = 'internvl2-mpo-cot'
+ else:
+ self.logger.info('Use custom prompt internvl2')
+ self.custom_prompt = 'internvl2'
+ if 'internvl2-8b-mpo-cot'.lower() in model_name.lower():
+ self.use_mpo_prompt = True
+ self.max_tokens = 1024
+ self.temperature = 0.0
+ self.logger.info('Use custom prompt internvl2-mpo-cot')
+ self.custom_prompt = 'internvl2-mpo-cot'
+ if 'qvq'.lower() in model_name.lower():
+ self.max_tokens = 4096
+ self.temperature = 0.0
+ self.logger.info('QVQ model detected, do not use custom prompt')
+
+ def prepare_itlist(self, inputs):
+ assert np.all([isinstance(x, dict) for x in inputs])
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
+ if has_images:
+ content_list = []
+ for msg in inputs:
+ if msg['type'] == 'text':
+ content_list.append(dict(type='text', text=msg['value']))
+ elif msg['type'] == 'image':
+ from PIL import Image
+ img = Image.open(msg['value'])
+ b64 = encode_image_to_base64(img)
+ extra_args = msg.copy()
+ extra_args.pop('type')
+ extra_args.pop('value')
+ img_struct = dict(url=f'data:image/jpeg;base64,{b64}', **extra_args)
+ content_list.append(dict(type='image_url', image_url=img_struct))
+ else:
+ assert all([x['type'] == 'text' for x in inputs])
+ text = '\n'.join([x['value'] for x in inputs])
+ content_list = [dict(type='text', text=text)]
+ return content_list
+
+ def prepare_inputs(self, inputs):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='system', content=self.system_prompt))
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
+ if 'role' in inputs[0]:
+ assert inputs[-1]['role'] == 'user', inputs[-1]
+ for item in inputs:
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
+ else:
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
+ return input_msgs
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ input_msgs = self.prepare_inputs(inputs)
+
+ temperature = kwargs.pop('temperature', self.temperature)
+ self.logger.info(f'Generate temperature: {temperature}')
+ max_tokens = kwargs.pop('max_tokens', self.max_tokens)
+ dataset = kwargs.pop('dataset', None)
+ if dataset is not None and listinstr(['BMMR'], dataset):
+ # BMMR dataset has a very long prompt, so we need to increase max_tokens
+ max_tokens = 8196
+ self.logger.info('BMMR dataset detected, set max_tokens to 8196')
+
+ headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self.key}'}
+ payload = dict(
+ model=self.model,
+ messages=input_msgs,
+ max_tokens=max_tokens,
+ n=1,
+ temperature=temperature,
+ **kwargs)
+ response = requests.post(
+ self.api_base,
+ headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1)
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct['choices'][0]['message']['content'].strip()
+
+ # for internvl2-8b-mpo-cot
+ if getattr(self, 'use_mpo_prompt', False):
+ from ..vlm.internvl.utils import mpo_post_processing
+ answer = mpo_post_processing(answer, kwargs.get('dataset'))
+ except:
+ pass
+ return ret_code, answer, response
+
+
+class LMDeployAPI(LMDeployWrapper):
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def generate(self, message, dataset=None):
+ return super(LMDeployAPI, self).generate(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/mug_u.py b/VLMEvalKit-sudoku/vlmeval/api/mug_u.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc5e8a8f871c3b118f568ea917f060ba83443bb7
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/mug_u.py
@@ -0,0 +1,210 @@
+# from http import HTTPStatus
+import os
+import requests
+from ..dataset import DATASET_TYPE, DATASET_MODALITY
+from vlmeval.api.base import BaseAPI
+from vlmeval.smp import *
+
+
+class MUGUWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str,
+ retry: int = 5,
+ key: str = None,
+ verbose: bool = True,
+ temperature: float = 0.0,
+ timeout: int = 60,
+ api_base: str = None,
+ system_prompt: str = None,
+ max_tokens: int = 4096,
+ use_mpo_prompt: bool = False,
+ **kwargs):
+ self.fail_msg = 'Failed to obtain answer via API. '
+ self.max_tokens = max_tokens
+ self.timeout = timeout
+
+ api_base = 'https://shopee.sg/api/v1/compassllvm/v1/chat/completions'
+ assert api_base is not None, 'Please set the environment variable LMDEPLOY_API_BASE.'
+ self.api_base = api_base
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ model_url = ''.join([api_base.split('v1')[0], 'v1/models'])
+ _ = requests.get(model_url)
+ self.model = model
+ if hasattr(self, 'custom_prompt'):
+ self.logger.info(f'using custom prompt {self.custom_prompt}')
+ self.temperature = temperature
+ self.logger.info(f'Init temperature: {self.temperature}')
+ self.use_mpo_prompt = use_mpo_prompt
+
+ self.temperature = 0.0
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ assert DATASET_MODALITY(dataset) != 'VIDEO', 'not supported'
+ if listinstr(['MMDU', 'MME-RealWorld', 'MME-RealWorld-CN'], dataset):
+ # For Multi-Turn we don't have custom prompt
+ return False
+ if DATASET_MODALITY(dataset) == 'VIDEO':
+ # For Video benchmarks we don't have custom prompt at here
+ return False
+ else:
+ return True
+
+ def get_max_num(self, dataset):
+ assert dataset is not None
+ res_1_datasets = ['MMBench-Video', 'Video-MME', 'MVBench', 'Video', 'WorldSense']
+ res_12_datasets = ['ChartQA_TEST', 'MMMU_DEV_VAL', 'MMMU_TEST', 'MME-RealWorld',
+ 'VCR_EN', 'VCR_ZH', 'OCRVQA']
+ res_18_datasets = ['DocVQA_VAL', 'DocVQA_TEST', 'DUDE', 'MMLongBench_DOC', 'SLIDEVQA']
+ res_24_datasets = ['InfoVQA_VAL', 'InfoVQA_TEST', 'OCRBench', 'HRBench4K', 'HRBench8K']
+ if listinstr(res_1_datasets, dataset):
+ return 1
+ elif listinstr(res_12_datasets, dataset):
+ return 12
+ elif listinstr(res_18_datasets, dataset):
+ return 18
+ elif listinstr(res_24_datasets, dataset):
+ return 24
+ else:
+ return 6
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+ from ..vlm.internvl.utils import (build_multi_choice_prompt,
+ build_mcq_cot_prompt,
+ build_qa_cot_prompt,
+ build_mpo_prompt,
+ reorganize_prompt)
+
+ tgt_path = self.dump_image(line, dataset)
+ max_num = self.get_max_num(dataset)
+ if dataset is not None and DATASET_TYPE(dataset) == 'Y/N':
+ question = line['question']
+ if listinstr(['MME'], dataset):
+ prompt = question + ' Answer the question using a single word or phrase.'
+ elif listinstr(['HallusionBench', 'AMBER'], dataset):
+ prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.'
+ else:
+ prompt = question
+ elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ prompt = build_multi_choice_prompt(line, dataset)
+ if os.getenv('USE_COT') == '1':
+ prompt = build_mcq_cot_prompt(line, prompt)
+ elif dataset is not None and DATASET_TYPE(dataset) == 'VQA':
+ question = line['question']
+ if listinstr(['LLaVABench', 'WildVision'], dataset):
+ prompt = question + '\nAnswer this question in detail.'
+ elif listinstr(['OCRVQA', 'TextVQA', 'ChartQA', 'DocVQA', 'InfoVQA', 'OCRBench',
+ 'DUDE', 'SLIDEVQA', 'GQA', 'MMLongBench_DOC'], dataset):
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ elif listinstr(['MathVista', 'MathVision', 'VCR', 'MTVQA', 'MMVet', 'MathVerse',
+ 'MMDU', 'CRPE', 'MIA-Bench', 'MM-Math', 'DynaMath',
+ 'QSpatial', 'WeMath', 'LogicVista'], dataset):
+ prompt = question
+ if os.getenv('USE_COT') == '1':
+ prompt = build_qa_cot_prompt(line, prompt)
+ else:
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ else:
+ # VQA_ex_prompt: OlympiadBench, VizWiz
+ prompt = line['question']
+ if os.getenv('USE_COT') == '1':
+ prompt = build_qa_cot_prompt(line, prompt)
+
+ message = [dict(type='text', value=prompt)]
+ image_num = len(tgt_path)
+ max_num = max(1, min(max_num, 64 // image_num))
+ # TODO:support upscale_flag
+ message.extend([dict(type='image', value=s, max_dynamic_patch=max_num) for s in tgt_path])
+
+ if self.use_mpo_prompt:
+ message = build_mpo_prompt(message, line, dataset)
+
+ # reorganize_prompt
+ prompt = reorganize_prompt(message, image_num, dataset=dataset)
+ prompt.replace('', '')
+ message[0] = dict(type='text', value=prompt)
+ return message
+
+ def prepare_itlist(self, inputs):
+ assert np.all([isinstance(x, dict) for x in inputs])
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
+ if has_images:
+ content_list = []
+ for msg in inputs:
+ if msg['type'] == 'text':
+ content_list.append(dict(type='text', text=msg['value']))
+ elif msg['type'] == 'image':
+ from PIL import Image
+ img = Image.open(msg['value'])
+ b64 = encode_image_to_base64(img)
+ extra_args = msg.copy()
+ extra_args.pop('type')
+ extra_args.pop('value')
+ img_struct = dict(url=f'data:image/jpeg;base64,{b64}', **extra_args)
+ content_list.append(dict(type='image_url', image_url=img_struct))
+ else:
+ assert all([x['type'] == 'text' for x in inputs])
+ text = '\n'.join([x['value'] for x in inputs])
+ content_list = [dict(type='text', text=text)]
+ return content_list
+
+ def prepare_inputs(self, inputs):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='system', content=self.system_prompt))
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
+ if 'role' in inputs[0]:
+ assert inputs[-1]['role'] == 'user', inputs[-1]
+ for item in inputs:
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
+ else:
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
+ return input_msgs
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ input_msgs = self.prepare_inputs(inputs)
+
+ temperature = kwargs.pop('temperature', self.temperature)
+ self.logger.info(f'Generate temperature: {temperature}')
+ max_tokens = kwargs.pop('max_tokens', self.max_tokens)
+
+ headers = {'Content-Type': 'application/json'}
+ payload = dict(
+ model=self.model,
+ messages=input_msgs,
+ max_tokens=max_tokens,
+ n=1,
+ top_k=1,
+ temperature=temperature,
+ stream=False,
+ **kwargs)
+
+ response = requests.post(
+ self.api_base,
+ headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1)
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct['choices'][0]['message']['content'].strip()
+
+ # for internvl2-8b-mpo-cot
+ if getattr(self, 'use_mpo_prompt', False):
+ from ..vlm.internvl.utils import mpo_post_processing
+ answer = mpo_post_processing(answer, kwargs.get('dataset'))
+ except:
+ pass
+ return ret_code, answer, response
+
+
+class MUGUAPI(MUGUWrapper):
+ def generate(self, message, dataset=None):
+ return super(MUGUAPI, self).generate(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/qwen_vl_api.py b/VLMEvalKit-sudoku/vlmeval/api/qwen_vl_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..cff453639a94c388730aa603a7beef42b9e0a559
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/qwen_vl_api.py
@@ -0,0 +1,218 @@
+from __future__ import annotations
+
+import os
+import warnings
+
+from vlmeval.smp import *
+from vlmeval.api.base import BaseAPI
+from vlmeval.vlm.qwen2_vl.prompt import Qwen2VLPromptMixin
+
+
+def ensure_image_url(image: str) -> str:
+ prefixes = ['http://', 'https://', 'file://', 'data:image;']
+ if any(image.startswith(prefix) for prefix in prefixes):
+ return image
+ if os.path.exists(image):
+ return 'file://' + image
+ raise ValueError(f'Invalid image: {image}')
+
+
+class Qwen2VLAPI(Qwen2VLPromptMixin, BaseAPI):
+ is_api: bool = True
+
+ def __init__(
+ self,
+ model: str = 'qwen-vl-max-0809',
+ key: str | None = None,
+ min_pixels: int | None = None,
+ max_pixels: int | None = None,
+ max_length=1024,
+ top_p=0.001,
+ top_k=1,
+ temperature=0.01,
+ repetition_penalty=1.0,
+ presence_penalty=0.0,
+ seed=3407,
+ use_custom_prompt: bool = True,
+ **kwargs,
+ ):
+ import dashscope
+
+ self.model = model
+ self.min_pixels = min_pixels
+ self.max_pixels = max_pixels
+ self.generate_kwargs = dict(
+ max_length=max_length,
+ top_p=top_p,
+ top_k=top_k,
+ temperature=temperature,
+ repetition_penalty=repetition_penalty,
+ presence_penalty=presence_penalty,
+ seed=seed,
+ )
+
+ key = os.environ.get('DASHSCOPE_API_KEY', None) if key is None else key
+ assert key is not None, (
+ 'Please set the API Key (obtain it here: '
+ 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)'
+ )
+ dashscope.api_key = key
+ super().__init__(use_custom_prompt=use_custom_prompt, **kwargs)
+
+ def _prepare_content(self, inputs: list[dict[str, str]], dataset: str | None = None) -> list[dict[str, str]]:
+ """
+ inputs list[dict[str, str]], each dict has keys: ['type', 'value']
+ """
+ content = []
+ for s in inputs:
+ if s['type'] == 'image':
+ item = {'type': 'image', 'image': ensure_image_url(s['value'])}
+ if dataset == 'OCRBench':
+ item['min_pixels'] = 10 * 10 * 28 * 28
+ warnings.warn(f"OCRBench dataset uses custom min_pixels={item['min_pixels']}")
+ if self.max_pixels is not None:
+ item['max_pixels'] = self.max_pixels
+ else:
+ if self.min_pixels is not None:
+ item['min_pixels'] = self.min_pixels
+ if self.max_pixels is not None:
+ item['max_pixels'] = self.max_pixels
+ elif s['type'] == 'text':
+ item = {'type': 'text', 'text': s['value']}
+ else:
+ raise ValueError(f"Invalid message type: {s['type']}, {s}")
+ content.append(item)
+ return content
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ import dashscope
+
+ messages = []
+ if self.system_prompt is not None:
+ messages.append({'role': 'system', 'content': self.system_prompt})
+ messages.append(
+ {'role': 'user', 'content': self._prepare_content(inputs, dataset=kwargs.get('dataset', None))}
+ )
+ if self.verbose:
+ print(f'\033[31m{messages}\033[0m')
+
+ # generate
+ generation_kwargs = self.generate_kwargs.copy()
+ kwargs.pop('dataset', None)
+ generation_kwargs.update(kwargs)
+ try:
+ response = dashscope.MultiModalConversation.call(
+ model=self.model,
+ messages=messages,
+ **generation_kwargs,
+ )
+ if self.verbose:
+ print(response)
+ answer = response.output.choices[0]['message']['content'][0]['text']
+ return 0, answer, 'Succeeded! '
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(f'The input messages are {inputs}.')
+ return -1, '', ''
+
+
+class QwenVLWrapper(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str = 'qwen-vl-plus',
+ retry: int = 5,
+ key: str = None,
+ verbose: bool = True,
+ temperature: float = 0.0,
+ system_prompt: str = None,
+ max_tokens: int = 2048,
+ proxy: str = None,
+ **kwargs):
+
+ assert model in ['qwen-vl-plus', 'qwen-vl-max']
+ self.model = model
+ import dashscope
+ self.fail_msg = 'Failed to obtain answer via API. '
+ self.max_tokens = max_tokens
+ self.temperature = temperature
+ if key is None:
+ key = os.environ.get('DASHSCOPE_API_KEY', None)
+ assert key is not None, (
+ 'Please set the API Key (obtain it here: '
+ 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)'
+ )
+ dashscope.api_key = key
+ if proxy is not None:
+ proxy_set(proxy)
+ super().__init__(retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs)
+
+ # inputs can be a lvl-2 nested list: [content1, content2, content3, ...]
+ # content can be a string or a list of image & text
+ def prepare_itlist(self, inputs):
+ assert np.all([isinstance(x, dict) for x in inputs])
+ has_images = np.sum([x['type'] == 'image' for x in inputs])
+ if has_images:
+ content_list = []
+ for msg in inputs:
+ if msg['type'] == 'text':
+ content_list.append(dict(text=msg['value']))
+ elif msg['type'] == 'image':
+ content_list.append(dict(image='file://' + msg['value']))
+ else:
+ assert all([x['type'] == 'text' for x in inputs])
+ text = '\n'.join([x['value'] for x in inputs])
+ content_list = [dict(text=text)]
+ return content_list
+
+ def prepare_inputs(self, inputs):
+ input_msgs = []
+ if self.system_prompt is not None:
+ input_msgs.append(dict(role='system', content=self.system_prompt))
+ assert isinstance(inputs, list) and isinstance(inputs[0], dict)
+ assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs
+ if 'role' in inputs[0]:
+ assert inputs[-1]['role'] == 'user', inputs[-1]
+ for item in inputs:
+ input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content'])))
+ else:
+ input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs)))
+ return input_msgs
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ from dashscope import MultiModalConversation
+ assert isinstance(inputs, str) or isinstance(inputs, list)
+
+ if 'type' in inputs[0]:
+ pure_text = np.all([x['type'] == 'text' for x in inputs])
+ else:
+ pure_text = True
+ for inp in inputs:
+ if not np.all([x['type'] == 'text' for x in inp['content']]):
+ pure_text = False
+ break
+
+ assert not pure_text
+ messages = self.prepare_inputs(inputs)
+ gen_config = dict(max_output_tokens=self.max_tokens, temperature=self.temperature)
+ gen_config.update(kwargs)
+ try:
+ response = MultiModalConversation.call(model=self.model, messages=messages)
+ if self.verbose:
+ print(response)
+ answer = response.output.choices[0]['message']['content'][0]['text']
+ return 0, answer, 'Succeeded! '
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(f'The input messages are {inputs}.')
+
+ return -1, '', ''
+
+
+class QwenVLAPI(QwenVLWrapper):
+
+ def generate(self, message, dataset=None):
+ return super(QwenVLAPI, self).generate(message)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/sensechat_vision.py b/VLMEvalKit-sudoku/vlmeval/api/sensechat_vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..de33abb5472d86052ec0103f15027c49bec4b5d8
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/sensechat_vision.py
@@ -0,0 +1,307 @@
+import os
+import string
+import time
+from typing import Optional
+import pandas as pd
+import requests
+from vlmeval.smp import (
+ LMUDataRoot,
+ osp,
+ read_ok,
+ decode_base64_to_image_file,
+ toliststr,
+ listinstr,
+ cn_string,
+)
+from vlmeval.api.base import BaseAPI
+from vlmeval.dataset import img_root_map
+from vlmeval.dataset import DATASET_TYPE
+
+
+class SenseChatVisionWrapper(BaseAPI):
+ is_api: bool = True
+
+ def __init__(
+ self,
+ base_url: str = "https://api.sensenova.cn/v1/llm/chat-completions",
+ api_key: str = None,
+ model: str = "SenseNova-V6-5-Pro",
+ retry: int = 5,
+ wait: int = 5,
+ verbose: bool = True,
+ system_prompt: str = None,
+ max_tokens: int = 16384,
+ **kwargs,
+ ):
+ self.base_url = base_url
+ self.model = model
+ self.fail_msg = "Failed to obtain answer via API. "
+ self.api_key = os.getenv("SENSENOVA_API_KEY", api_key)
+ assert self.api_key is not None, (
+ "Please set the `SENSENOVA_API_KEY` environment variable or pass `api_key` in the config.json."
+ )
+ self.max_new_tokens = max_tokens
+ self.thinking = False
+ super().__init__(
+ wait=wait,
+ retry=retry,
+ system_prompt=system_prompt,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def dump_image(self, line, dataset):
+ """Dump the image(s) of the input line to the corresponding dataset folder.
+
+ Args:
+ line (line of pd.DataFrame): The raw input line.
+ dataset (str): The name of the dataset.
+
+ Returns:
+ str | list[str]: The paths of the dumped images.
+ """
+ ROOT = LMUDataRoot()
+ assert isinstance(dataset, str)
+ img_root = osp.join(ROOT, "images", img_root_map(dataset))
+ os.makedirs(img_root, exist_ok=True)
+ if "image" in line:
+ if isinstance(line["image"], list):
+ tgt_path = []
+ assert "image_path" in line
+ for img, im_name in zip(line["image"], line["image_path"]):
+ path = osp.join(img_root, im_name)
+ if not read_ok(path):
+ decode_base64_to_image_file(img, path)
+ tgt_path.append(path)
+ else:
+ tgt_path = osp.join(img_root, f"{line['index']}.jpg")
+ if not read_ok(tgt_path):
+ decode_base64_to_image_file(line["image"], tgt_path)
+ tgt_path = [tgt_path]
+ else:
+ assert "image_path" in line
+ tgt_path = toliststr(line["image_path"])
+
+ return tgt_path
+
+ def image_to_base64(self, image_path):
+ import base64
+
+ with open(image_path, "rb") as image_file:
+ encoded_string = base64.b64encode(image_file.read())
+ return encoded_string.decode("utf-8")
+
+ def use_custom_prompt(self, *args, **kwargs):
+ """Check if the prompt is customized."""
+ return True
+
+ def build_multi_choice_prompt(self, line, dataset=None):
+ question = line["question"]
+ hint = line["hint"] if ("hint" in line and not pd.isna(line["hint"])) else None
+ if hint is not None:
+ question = hint + "\n" + question
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f"\n{key}. {item}"
+ prompt = question
+
+ if len(options):
+ prompt += (
+ "\n请直接回答选项字母。"
+ if cn_string(prompt)
+ else "\nAnswer with the option's letter from the given choices directly."
+ )
+ else:
+ prompt += (
+ "\n请直接回答问题。"
+ if cn_string(prompt)
+ else "\nAnswer the question directly."
+ )
+
+ return prompt
+
+ def build_mcq_cot_prompt(self, line, prompt):
+ question = line["question"]
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = {
+ 'multiple-choice': "You are an expert in {}. Please solve the university-level {} examination question, which includes interleaved images and text. Answer the preceding multiple choice question. The last line of your response should follow this format: 'Answer: \\boxed LETTER', where LETTER is one of the options. If you are uncertain or the problem is too complex, make a reasoned guess based on the information provided. Avoid repeating steps indefinitely—provide your best guess even if unsure. Think step by step logically, considering all relevant information before answering.", # noqa: E501
+ 'open': 'You are an expert in {}. Please solve the university-level {} examination question, which includes interleaved images and text. Your output should be divided into two parts: First, reason about the correct answer. Then write the answer in the following format where X is only the answer and nothing else: "ANSWER: X"' # noqa: E501
+ }
+ subject = '_'.join(line['id'].split('_')[1:-1])
+ prompt = prompt[line['question_type']].format(subject, subject) + '\n' + question
+ return prompt
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+
+ tgt_path = self.dump_image(line, dataset)
+
+ if dataset is not None and listinstr(["MME"], dataset):
+ question = line["question"]
+ prompt = question + " Answer the question using a single word or phrase."
+ elif dataset is not None and listinstr(["HallusionBench"], dataset):
+ question = line["question"]
+ prompt = (
+ question
+ + " Please answer yes or no. Answer the question using a single word or phrase."
+ )
+ elif dataset is not None and DATASET_TYPE(dataset) == "MCQ":
+ prompt = self.build_multi_choice_prompt(line, dataset)
+ if "MMMU" in dataset:
+ prompt = self.build_mcq_cot_prompt(line, prompt)
+ self.thinking = True
+ elif dataset is not None and DATASET_TYPE(dataset) == "VQA":
+ if "MathVista" in dataset:
+ prompt = line["question"]
+ self.thinking = True
+ elif listinstr(["LLaVABench"], dataset):
+ question = line["question"]
+ prompt = question + "\nAnswer this question in detail."
+ elif listinstr(["MMVet"], dataset):
+ prompt = line["question"]
+ else:
+ question = line["question"]
+ prompt = (
+ question
+ + "\nPlease reason step by step, and put your final answer within \\boxed{}."
+ )
+ else:
+ prompt = line["question"]
+
+ message = [dict(type="text", value=prompt)]
+ message.extend([dict(type="image", value=s) for s in tgt_path])
+
+ return message
+
+ def message_to_promptimg(self, message, dataset=None):
+ if dataset is None or listinstr(["MMMU", "BLINK"], dataset):
+ prompt = "\n".join([x["value"] for x in message if x["type"] == "text"])
+ image = [[x["value"] for x in message if x["type"] == "image"][0]]
+ else:
+ prompt = "\n".join([x["value"] for x in message if x["type"] == "text"])
+ image = [x["value"] for x in message if x["type"] == "image"]
+ return prompt, image
+
+ def set_max_num(self, dataset: Optional[str] = None) -> None:
+ """Set the max_num based on the dataset."""
+ if dataset is not None and listinstr(
+ [
+ "ChartQA_TEST",
+ "MMMU_DEV_VAL",
+ "MMMU_TEST",
+ "MME-RealWorld",
+ "VCR_EN",
+ "VCR_ZH",
+ "OCRVQA",
+ ],
+ dataset,
+ ):
+ self.max_num = 12
+ elif dataset is not None and listinstr(
+ ["DocVQA_VAL", "DocVQA_TEST", "DUDE", "MMLongBench_DOC", "SLIDEVQA"],
+ dataset,
+ ):
+ self.max_num = 18
+ elif dataset is not None and listinstr(
+ ["InfoVQA_VAL", "InfoVQA_TEST", "OCRBench", "HRBench4K", "HRBench8K"],
+ dataset,
+ ):
+ self.max_num = 24
+ else:
+ self.max_num = 6
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ assert isinstance(inputs, str) or isinstance(inputs, list)
+ inputs = [inputs] if isinstance(inputs, str) else inputs
+ dataset = kwargs.get("dataset", None)
+
+ self.set_max_num(dataset=dataset)
+
+ prompt, image = self.message_to_promptimg(message=inputs, dataset=dataset)
+ content = [
+ {
+ "image_base64": self.image_to_base64(item),
+ "type": "image_base64",
+ }
+ for item in image
+ ]
+
+ content.append(
+ {
+ "text": prompt,
+ "type": "text",
+ }
+ )
+
+ message = [{"content": content, "role": "user"}]
+ data = {
+ "messages": message,
+ "max_new_tokens": self.max_new_tokens,
+ "model": self.model,
+ "stream": False,
+ "image_split_count": self.max_num,
+ "thinking": {
+ "enabled": self.thinking,
+ }
+ }
+
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": self.api_key,
+ }
+
+ response = requests.post(
+ self.base_url,
+ headers=headers,
+ json=data,
+ )
+ request_id = response.headers.get("x-request-id", "")
+ self.logger.info(f"Request-id: {request_id}")
+
+ time.sleep(1)
+ try:
+ assert response.status_code == 200
+ response = response.json()["data"]["choices"][0]["message"].strip()
+ if self.verbose:
+ self.logger.info(f"inputs: {inputs}\nanswer: {response}")
+ return 0, response, "Succeeded! "
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(
+ "---------------------------ERROR---------------------------"
+ )
+ self.logger.error(response.json())
+ self.logger.error(err)
+ self.logger.error(
+ "---------------------------request_id---------------------------"
+ + request_id
+ )
+ self.logger.error(
+ "api error"
+ + response.json()["error"]["message"]
+ + str(
+ [
+ input["value"] if input["type"] == "image" else None
+ for input in inputs
+ ]
+ )
+ )
+ self.logger.error(f"The input messages are {inputs}.")
+ return -1, response.json()["error"]["message"], ""
+
+
+class SenseChatVisionAPI(SenseChatVisionWrapper):
+ def generate(self, message, dataset=None):
+ return super(SenseChatVisionAPI, self).generate(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/api/siliconflow.py b/VLMEvalKit-sudoku/vlmeval/api/siliconflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bd165a97b82336c3c8ade3be1885b03002d1cd6
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/siliconflow.py
@@ -0,0 +1,277 @@
+import math
+from vlmeval.smp import *
+from vlmeval.api.base import BaseAPI
+from vlmeval.dataset import img_root_map
+
+API_BASE = "https://api.siliconflow.cn/v1/chat/completions"
+
+
+def resize_image(image: Image.Image, max_height: int, max_width: int) -> Image.Image:
+ width, height = image.size
+ if min(width, height) < 50:
+ scale = 50 / min(width, height)
+ image = image.resize((int(width * scale), int(height * scale)))
+ current_pixels = width * height
+
+ if current_pixels <= max_height * max_width:
+ return image
+
+ scale = math.sqrt(max_height * max_width / current_pixels)
+ new_width = int(width * scale)
+ new_height = int(height * scale)
+
+ return image.resize((new_width, new_height), Image.Resampling.LANCZOS)
+
+
+def encode_image(path: str, max_height: int = 1024, max_width: int = 1024) -> str:
+ image = Image.open(path).convert("RGB")
+ image = resize_image(image, max_height, max_width)
+ width, height = image.size
+ if min(height, width) < 50:
+ scale = 50 / min(width, height)
+ image = image.resize((int(width * scale), int(height * scale)))
+ buffered = io.BytesIO()
+ image.save(buffered, format="PNG")
+ img_bytes = buffered.getvalue()
+ img_base64 = base64.b64encode(img_bytes).decode("utf-8")
+ return img_base64
+
+
+class SiliconFlowAPI(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(
+ self,
+ model: str = "deepseek-ai/DeepSeek-V2.5",
+ retry: int = 5,
+ key: str = None,
+ api_base: str = API_BASE,
+ verbose: bool = True,
+ system_prompt: str = None,
+ timeout: int = 60,
+ reasoning: bool = False, # If set, will return results in the format of {'content': '...', 'reasoning': '...'}
+ **kwargs,
+ ):
+
+ self.model = model
+ self.api_base = api_base
+ self.reasoning = reasoning
+ self.timeout = timeout
+
+ default_kwargs = {
+ "stream": False,
+ "temperature": 0,
+ "n": 1,
+ "max_tokens": 1280,
+ }
+ for k, v in default_kwargs.items():
+ if k not in kwargs:
+ kwargs[k] = default_kwargs[k]
+ if key is not None:
+ self.key = key
+ else:
+ self.key = os.environ.get("SiliconFlow_API_KEY", "")
+ headers = {"Authorization": "Bearer {}", "Content-Type": "application/json"}
+ headers["Authorization"] = headers["Authorization"].format(self.key)
+ self.headers = headers
+ super().__init__(
+ retry=retry,
+ system_prompt=system_prompt,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @staticmethod
+ def build_msgs(msgs_raw):
+ messages = []
+ message = {"role": "user", "content": []}
+ image_b64 = None
+ for msg in msgs_raw:
+ if msg["type"] == "image" and not image_b64:
+ image_b64 = encode_image(msg["value"])
+ message["content"].append({
+ "image_url": {
+ "url": f"data:image/png;base64,{image_b64}"
+ },
+ "type": "image_url"
+ })
+ elif msg["type"] == "text":
+ message["content"].append({"text": msg["value"], "type": "text"})
+
+ messages.append(message)
+ return messages
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ default_kwargs = self.default_kwargs
+ default_kwargs.update(kwargs)
+
+ payload = dict(
+ model=self.model,
+ messages=self.build_msgs(msgs_raw=inputs),
+ **default_kwargs,
+ )
+
+ response = requests.post(
+ self.api_base, headers=self.headers, data=json.dumps(payload), timeout=self.timeout * 1.1
+ )
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ msg = resp_struct["choices"][0]["message"]
+ if self.reasoning and 'reasoning_content' in msg:
+ answer = {'content': msg['content'], 'reasoning': msg['reasoning_content']}
+ else:
+ answer = resp_struct["choices"][0]["message"]["content"].strip()
+ except:
+ pass
+ return ret_code, answer, response
+
+
+class TeleMMAPI(SiliconFlowAPI):
+
+ is_api: bool = True
+
+ def __init__(
+ self,
+ model: str = "TeleAI/TeleMM",
+ key: str = None,
+ max_height: int = 1280,
+ max_width: int = 784,
+ **kwargs,
+ ):
+ super().__init__(model=model, key=key, **kwargs)
+ self.max_height = max_height
+ self.max_width = max_width
+
+ def dump_image(self, line, dataset):
+ """Dump the image(s) of the input line to the corresponding dataset folder.
+
+ Args:
+ line (line of pd.DataFrame): The raw input line.
+ dataset (str): The name of the dataset.
+
+ Returns:
+ str | list[str]: The paths of the dumped images.
+ """
+ ROOT = LMUDataRoot()
+ assert isinstance(dataset, str)
+ # img_root = osp.join(ROOT, 'images', img_root_map[dataset] if dataset in img_root_map else dataset)
+ img_root = osp.join(ROOT, "images", img_root_map(dataset))
+ os.makedirs(img_root, exist_ok=True)
+ if "image" in line:
+ if isinstance(line["image"], list):
+ tgt_path = []
+ assert "image_path" in line
+ for img, im_name in zip(line["image"], line["image_path"]):
+ path = osp.join(img_root, im_name)
+ if not read_ok(path):
+ decode_base64_to_image_file(img, path)
+ tgt_path.append(path)
+ else:
+ tgt_path = osp.join(img_root, f"{line['index']}.jpg")
+ if not read_ok(tgt_path):
+ decode_base64_to_image_file(line["image"], tgt_path)
+ tgt_path = [tgt_path]
+ else:
+ assert "image_path" in line
+ tgt_path = toliststr(line["image_path"])
+ return tgt_path
+
+ def _prepare_content(
+ self, inputs: list[dict[str, str]], dataset: str = None
+ ) -> list[dict[str, str]]:
+ """
+ inputs list[dict[str, str]], each dict has keys: ['type', 'value']
+ """
+ content = []
+ has_image = False
+ for s in inputs:
+ if s["type"] == "image":
+ if not has_image:
+ item = {
+ "type": "image_url",
+ "image_url": {
+ "url": encode_image(
+ s["value"],
+ max_height=self.max_height,
+ max_width=self.max_width,
+ )
+ },
+ }
+ has_image = True
+ else:
+ continue
+ elif s["type"] == "text":
+ prompt = s["value"]
+ if len(prompt) == 0:
+ continue
+ if dataset == "HallusionBench":
+ prompt += " Please answer yes or no directly, without any unnecessary explanation."
+ elif dataset == "OCRBench":
+ prompt = (
+ prompt + "\nExtract the text from the image intactly and "
+ + "answer the question concisely and clearly if possible."
+ )
+
+ elif (
+ dataset == "AI2D_TEST"
+ or dataset == "MMStar"
+ or dataset == "MMBench_TEST_EN_V11"
+ or dataset == "MMVet"
+ ):
+ prompt = prompt.replace(
+ "Please select the correct answer from the options above. \n",
+ "Please select the correct option from the above choices based on the "
+ + "input image and question. The final output should only be one option, such as 'A'",
+ )
+ elif dataset == "MMBench_TEST_CN_V11":
+ prompt = prompt.replace(
+ "Please select the correct answer from the options above. \n",
+ "请根据输入图像和问题从上述选项中选择正确选项,最终的输出只有一个选项,例如'A'",
+ )
+ item = {"type": "text", "text": prompt}
+ else:
+ raise ValueError(f"Invalid message type: {s['type']}, {s}")
+ content.append(item)
+
+ return content
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ default_kwargs = self.default_kwargs
+ default_kwargs.update(kwargs)
+
+ messages = []
+ messages.append(
+ {
+ "role": "user",
+ "content": self._prepare_content(
+ inputs, dataset=kwargs.get("dataset", None)
+ ),
+ }
+ )
+
+ payload = dict(model=self.model, messages=messages, **default_kwargs)
+
+ response = requests.post(
+ self.api_base, headers=self.headers, data=json.dumps(payload)
+ )
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct["choices"][0]["message"]["content"].strip()
+ return ret_code, answer, response
+ except Exception as err:
+ import traceback
+
+ traceback.print_exc()
+ if self.verbose:
+ self.logger.error(f"{type(err)}: {err}")
+ self.logger.error(f"The input messages are {inputs}.")
+ return -1, "", ""
diff --git a/VLMEvalKit-sudoku/vlmeval/api/stepai.py b/VLMEvalKit-sudoku/vlmeval/api/stepai.py
new file mode 100644
index 0000000000000000000000000000000000000000..30eedbc3beff8000d9d255ed98c3b8fd2d15ef0c
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/api/stepai.py
@@ -0,0 +1,86 @@
+from vlmeval.smp import *
+from vlmeval.api.base import BaseAPI
+
+url = 'https://api.stepfun.com/v1/chat/completions'
+headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer {}',
+}
+
+
+class StepAPI_INT(BaseAPI):
+
+ is_api: bool = True
+
+ def __init__(self,
+ model: str = 'step-1v-8k',
+ retry: int = 10,
+ key: str = None,
+ temperature: float = 0,
+ max_tokens: int = 300,
+ verbose: bool = True,
+ system_prompt: str = None,
+ **kwargs):
+ self.model = model
+ self.fail_msg = 'Fail to obtain answer via API.'
+ self.headers = headers
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.system_prompt = system_prompt
+ if key is not None:
+ self.key = key
+ else:
+ self.key = os.environ.get('STEPAI_API_KEY', '')
+ headers['Authorization'] = headers['Authorization'].format(self.key)
+
+ super().__init__(retry=retry, verbose=verbose, system_prompt=system_prompt, **kwargs)
+
+ @staticmethod
+ def build_msgs(msgs_raw):
+ messages = []
+ message = {'role': 'user', 'content': []}
+
+ for msg in msgs_raw:
+ if msg['type'] == 'image':
+ image_b64 = encode_image_file_to_base64(msg['value'])
+ message['content'].append({
+ 'image_url': {'url': 'data:image/webp;base64,%s' % (image_b64)},
+ 'type': 'image_url'
+ })
+ elif msg['type'] == 'text':
+ message['content'].append({
+ 'text': msg['value'],
+ 'type': 'text'
+ })
+
+ messages.append(message)
+ return messages
+
+ def generate_inner(self, inputs, **kwargs) -> str:
+ print(inputs, '\n')
+ payload = dict(
+ model=self.model,
+ max_tokens=self.max_tokens,
+ temperature=self.temperature,
+ messages=self.build_msgs(msgs_raw=inputs),
+ **kwargs)
+ response = requests.post(url, headers=headers, data=json.dumps(payload))
+ ret_code = response.status_code
+ ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code
+
+ answer = self.fail_msg
+ try:
+ resp_struct = json.loads(response.text)
+ answer = resp_struct['choices'][0]['message']['content'].strip()
+ except Exception as err:
+ if self.verbose:
+ self.logger.error(f'{type(err)}: {err}')
+ self.logger.error(response.text if hasattr(response, 'text') else response)
+
+ return ret_code, answer, response
+
+
+class Step1V_INT(StepAPI_INT):
+
+ def generate(self, message, dataset=None):
+ return super(StepAPI_INT, self).generate(message)
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cmmmu.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cmmmu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdd4c843739e1b53e695d68474039b0da2c47bd9
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cmmmu.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_mcq.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_mcq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f69dceaa9b90578535e64412fe4342125509c2f2
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_mcq.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tamperbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tamperbench.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1776dd9b739be6396573fd36b5e5cae5f9199dbc
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tamperbench.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tempcompass.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tempcompass.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90a23ccbabb962137c40f5f11435cbe026cdb939
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/tempcompass.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/video_concat_dataset.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/video_concat_dataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d0d65970ae72871051273fa60298c431f726ec6
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/video_concat_dataset.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vlmbias.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vlmbias.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9dcb874bf221c8457e1b16d1d426635a1d185d89
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vlmbias.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/bmmr.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/bmmr.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd14628272f3e4d8ef61d26e329910ea40151553
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/bmmr.py
@@ -0,0 +1,281 @@
+import os
+import re
+import evaluate
+import numpy as np
+import pandas as pd
+import json
+import jsonlines
+from tqdm import tqdm
+import os.path as osp
+from vlmeval import load, dump, track_progress_rich
+from vlmeval.dataset.utils.bmmr_grade import math_equal
+
+
+def extract_boxed_content(text):
+ result = []
+ i = 0
+ pattern = r'\boxed{'
+ len_pattern = len(pattern)
+
+ while i < len(text):
+ # 搜索模式 \boxed{
+ if text[i:i + len_pattern] == pattern:
+ start = i + len_pattern
+ brace_level = 1
+ content = []
+ i = start
+
+ # 逐字符遍历并跟踪括号层级
+ while i < len(text) and brace_level > 0:
+ if text[i] == '{':
+ brace_level += 1
+ elif text[i] == '}':
+ brace_level -= 1
+ if brace_level > 0: # 最后一个}不加入内容
+ content.append(text[i])
+ i += 1
+
+ # 如果找到闭合括号则保存结果
+ if brace_level == 0:
+ result.append(''.join(content))
+ else:
+ i += 1
+ if len(result) == 0:
+ return ['No Answer']
+ return result
+
+
+def extract_text(input_string):
+ # 使用正则表达式提取 \text{} 中的文本
+ pattern = r'\\text{(.*?)}'
+ matches = re.findall(pattern, input_string)
+ return matches
+
+
+def extract_uppercase(s):
+ # 使用列表推导式来提取大写字母
+ uppercase_letters = [char for char in s if char.isupper()]
+ # 将列表转换为字符串
+ return uppercase_letters
+
+
+SUBSTITUTIONS = [
+ ('an ', ''), ('a ', ''), ('.$', '$'), ('\\$', ''), (r'\ ', ''), ('\\%', '%'),
+ (' ', ''), ('mbox', 'text'), (',\\text{and}', ','),
+ ('\\text{and}', ','), ('\\text{m}', '\\text{}')
+]
+REMOVED_EXPRESSIONS = [
+ 'square', 'ways', 'integers', 'dollars', 'mph', 'inches', 'ft',
+ 'hours', 'km', 'units', '\\ldots', 'sue', 'points', 'feet',
+ 'minutes', 'digits', 'cents', 'degrees', 'cm', 'gm', 'pounds',
+ 'meters', 'meals', 'edges', 'students', 'childrentickets', 'multiples',
+ '\\text{s}', '\\text{.}', '\\text{\ns}', '\\text{}^2',
+ '\\text{}^3', '\\text{\n}', '\\text{}', r'\mathrm{th}',
+ r'^\circ', r'^{\circ}', r'\;', r',\!', '{,}', '"', '\\dots'
+]
+
+
+def is_integer(s):
+ try:
+ int(s)
+ return True
+ except ValueError:
+ return False
+
+
+def normalize_final_answer(final_answer: str) -> str:
+ """Normalize a final answer to a quantitative reasoning question."""
+ final_answer = str(final_answer).split('=')[-1]
+
+ for before, after in SUBSTITUTIONS:
+ final_answer = final_answer.replace(before, after)
+ for expr in REMOVED_EXPRESSIONS:
+ final_answer = final_answer.replace(expr, '')
+
+ # Extract answer that is in LaTeX math, is bold,
+ # is surrounded by a box, etc.
+ final_answer = re.sub(r'(.*?)(\$)(.*?)(\$)(.*)', '$\\3$', final_answer)
+ final_answer = re.sub(r'(\\text\{)(.*?)(\})', '\\2', final_answer)
+ final_answer = re.sub(r'(\\textbf\{)(.*?)(\})', '\\2', final_answer)
+ final_answer = re.sub(r'(\\overline\{)(.*?)(\})', '\\2', final_answer)
+ final_answer = re.sub(r'(\\boxed\{)(.*)(\})', '\\2', final_answer)
+
+ # Normalize shorthand TeX:
+ # \fracab -> \frac{a}{b}
+ # \frac{abc}{bef} -> \frac{abc}{bef}
+ # \fracabc -> \frac{a}{b}c
+ # \sqrta -> \sqrt{a}
+ # \sqrtab -> sqrt{a}b
+ final_answer = re.sub(
+ r'(frac)([^{])(.)', 'frac{\\2}{\\3}', final_answer)
+ final_answer = re.sub(
+ r'(sqrt)([^{])', 'sqrt{\\2}', final_answer)
+ final_answer = final_answer.replace('$', '')
+
+ # Normalize 100,000 -> 100000
+ if final_answer.replace(',', '').isdigit():
+ final_answer = final_answer.replace(',', '')
+
+ return final_answer
+
+
+def open_end_verify(ref, cand):
+ gt_ans = ref
+ if type(gt_ans) is list:
+ gt_ans = gt_ans[0]
+ # gt_ans = extract_answer(gt_ans)
+ gt_ans = normalize_final_answer(gt_ans)
+ if len(gt_ans) == 0:
+ return {'acc': 0}
+
+ ans = extract_boxed_content(cand)[-1]
+ ans = normalize_final_answer(ans)
+ # raw_judge = check_is_correct(ans, gt_ans)
+
+ raw_judge = False
+ # raw_judge = gt_ans.lower() in ans.lower()
+ if not raw_judge:
+ # ans = extract_boxed_content(raw_ans.split('Answer###')[-1])[0]
+
+ raw_judge = math_equal(gt_ans,ans)
+
+ return {'acc': raw_judge}
+
+
+def multichoice_verify(ref, cand):
+ correct_cnt = 0
+ correct_ness = []
+ gt_ans = ref
+ if len(gt_ans) == 0:
+ # correct_ness = [False] * len(data['model_answer_answer']) # data['model_answer_answer'] is the rollout answers
+ return {'acc': 0}
+
+ ans = extract_uppercase(extract_boxed_content(cand.split('Answer###')[-1])[0])
+ choice_correct_cnt = 0
+ if len(gt_ans) == 1 and gt_ans[0].startswith('[') and gt_ans[0].endswith(']'):
+ gt_ans = gt_ans[0]
+ gt_ans = gt_ans.replace("'", "\"")
+ gt_ans = json.loads(gt_ans)
+ if len(ans) == len(gt_ans):
+ for c in ans:
+ if c in gt_ans:
+ choice_correct_cnt += 1
+ correct_cnt += choice_correct_cnt / len(gt_ans)
+ if choice_correct_cnt / len(gt_ans) == 1:
+ correct_ness.append(True)
+ else:
+ correct_ness.append(False)
+
+ return {'acc': correct_ness[0]}
+
+
+def get_acc_for_reference_based_metrics(
+ references, candidates, image_id_list, task_types, reference_based_metrics_file
+):
+ """
+ Get the accuracy for the reference-based metrics.
+ """
+ existing_data = load(reference_based_metrics_file) if osp.exists(reference_based_metrics_file) else {}
+ idx = 1
+ print(f"Calculating metrics for {len(references)} samples")
+ assert len(references) == len(candidates) == len(image_id_list)
+ for ref, cand, image_id, task_type in tqdm(zip(references, candidates, image_id_list, task_types)):
+ if not cand.strip():
+ print(cand)
+ continue
+ default_acc_score = {'acc': 0.0}
+ if image_id not in existing_data:
+ existing_data[image_id] = {}
+ acc_score = existing_data.get(image_id, {}).get('acc_score', default_acc_score)
+ if acc_score == default_acc_score:
+ if task_type is None:
+ task_type = 'open_end'
+ if task_type == "open_end":
+ acc_score = open_end_verify(ref, cand)
+ elif task_type == "mc":
+ acc_score = multichoice_verify(ref, cand)
+ else:
+ raise ValueError(f"Task type {task_type} not supported")
+ existing_data[image_id]['acc_score'] = acc_score
+
+ if idx % 50 == 0:
+ print(f"Saving 50 samples to {reference_based_metrics_file}")
+ dump(existing_data, reference_based_metrics_file)
+
+ idx += 1
+ dump(existing_data, reference_based_metrics_file)
+ print(f"Saved all samples to {reference_based_metrics_file}")
+
+ return existing_data
+
+
+def merge_rating(refer_based_metrics_output_file_name):
+ refer_based_metrics_output_file = load(refer_based_metrics_output_file_name)
+
+ refer_based_metrics_output_file['acc_score'] = None # 初始化列
+ for idx, item in refer_based_metrics_output_file.iterrows():
+ ref_based_metrics = eval(item['reference_based_metrics'])
+ refer_based_metrics_output_file.at[idx, 'acc_score'] = ref_based_metrics['acc_score']['acc']
+
+ df = refer_based_metrics_output_file
+ metrics = ['acc_score']
+ # 计算cot为True的结果
+ cot_true_df = df[df['cot']]
+ cot_true_metrics = {
+ 'acc_score': [cot_true_df[metrics].mean().values[0]]
+ }
+
+ cot_false_df = df[~df['cot']]
+ cot_false_metrics = {
+ 'acc_score': [cot_false_df[metrics].mean().values[0]]
+ }
+
+ # 计算cot为True时不同language的结果
+ cot_lang_df = df[df['cot']].groupby('language')[metrics].mean()
+ cot_lang_metrics = {
+ 'acc_score': cot_lang_df['acc_score'].values
+ }
+
+ df['category_id'] = df['category_id'].apply(lambda x: eval(x) if isinstance(x, str) else x)
+ df['category_id'] = df['category_id'].apply(lambda x: [item[:2] for item in x])
+
+ # 只计算cot=True的数据
+ cot_df = df[df['cot']]
+
+ # 为每个数据行创建多行,每个category_id一行
+ expanded_rows = []
+ for idx, row in cot_df.iterrows():
+ for cat_id in row['category_id']:
+ new_row = row.copy()
+ new_row['category_id'] = cat_id
+ expanded_rows.append(new_row)
+
+ expanded_df = pd.DataFrame(expanded_rows)
+ category_id_df = expanded_df.groupby('category_id')[metrics].mean()
+ category_id_metrics = {
+ 'acc_score': category_id_df['acc_score'].values
+ }
+
+ # 合并所有结果
+ result_dict = {
+ 'CoT': cot_true_metrics['acc_score'],
+ 'no_CoT': cot_false_metrics['acc_score'],
+ 'En': [cot_lang_metrics['acc_score'][0]],
+ 'Zh': [cot_lang_metrics['acc_score'][1]]
+ }
+ id2name = {"02": "Arts",
+ "03": "Soc. Sci.",
+ "04": "Bus.",
+ "05": "Nat. Sci.",
+ "06": "ICTs",
+ "07": "Eng.",
+ "08": "Agri.",
+ "09": "Health",
+ "11": "UnClassified"}
+ # 添加不同category_id的COT结果
+ for cat_id, score in zip(category_id_df.index, category_id_metrics['acc_score']):
+ if cat_id != "11": # 跳过id为11的结果
+ result_dict[f'{id2name[cat_id]}'] = [score]
+ result_df = pd.DataFrame(result_dict)
+
+ return result_df
diff --git a/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/matching_util.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/matching_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a13f744070f57f4e8f6ce60bcab744cbc82048e2
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/utils/__pycache__/matching_util.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minimonkey.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minimonkey.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b565ff7e91f718bcd261c8bca868710f6667a3a
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/minimonkey.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/moondream.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/moondream.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8d7bbd383fb5b46da685c82147a553d4fa7674c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/moondream.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omchat.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omchat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb5d2ec95c683d9062a451ba91b335cbb6ad2c6c
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/omchat.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/open_flamingo.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/open_flamingo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c07b048e99b782531ddafcf86f44b5da0f8725f
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/open_flamingo.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/pandagpt.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/pandagpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3cd6ebf74236230057c072092c93167a212a868
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/pandagpt.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vintern_chat.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vintern_chat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f56aa13cebd79cda6325bcdf042a83cf26fc5a6
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/vintern_chat.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/wemm.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/wemm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df7af64132be40621544c79b05905f3f6439507f
Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/wemm.cpython-310.pyc differ
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/bunnyllama3.py b/VLMEvalKit-sudoku/vlmeval/vlm/bunnyllama3.py
new file mode 100644
index 0000000000000000000000000000000000000000..0016f47e3dc83581278acd98fc01704908beb17b
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/bunnyllama3.py
@@ -0,0 +1,133 @@
+import torch
+import transformers
+from transformers import AutoModelForCausalLM, AutoTokenizer
+from PIL import Image
+import warnings
+import re
+
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+
+
+class BunnyLLama3(BaseModel):
+ INSTALL_REQ = False
+ INTERLEAVE = False
+
+ def __init__(self, model_path='BAAI/Bunny-v1_1-Llama-3-8B-V', **kwargs):
+ assert model_path is not None
+ transformers.logging.set_verbosity_error()
+ transformers.logging.disable_progress_bar()
+ warnings.filterwarnings('ignore')
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ self.model = AutoModelForCausalLM.from_pretrained(model_path, device_map="cuda", trust_remote_code=True)
+ self.kwargs = kwargs
+
+ def use_custom_prompt(self, dataset):
+ if listinstr(['MCQ', 'Y/N'], DATASET_TYPE(dataset)) or listinstr(['mathvista'], dataset.lower()):
+ return True
+ else:
+ return False
+
+ def build_prompt(self, line, dataset):
+ if dataset is None:
+ dataset = self.dataset
+
+ if isinstance(line, int):
+ line = self.data.iloc[line]
+
+ tgt_path = self.dump_image(line, dataset)
+
+ prompt = line['question']
+
+ if DATASET_TYPE(dataset) == 'MCQ':
+ if listinstr(['mmmu'], dataset.lower()):
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ assert hint is None
+
+ question = line['question']
+ question = re.sub(r'', lambda x: x.group(0)[1:-1], question)
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = '\n'
+ for key, item in options.items():
+ options_prompt += f'({key}) {item}\n'
+
+ prompt = question
+ if len(options):
+ prompt += options_prompt
+ prompt += "\nAnswer with the option's letter from the given choices directly."
+ else:
+ prompt += '\nAnswer the question using a single word or phrase.'
+ else:
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = ''
+ if hint is not None:
+ prompt += f'{hint}\n'
+
+ question = line['question']
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = '\n'
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+
+ prompt += question + options_prompt
+ if listinstr(['cn', 'ccbench'], dataset.lower()):
+ prompt += '请直接回答选项字母。'
+ else:
+ prompt += "Answer with the option's letter from the given choices directly."
+ elif DATASET_TYPE(dataset) == 'Y/N':
+ if listinstr(['mme'], dataset.lower()):
+ if not listinstr(
+ ['code_reasoning', 'commonsense_reasoning', 'numerical_calculation', 'text_translation'],
+ line['category']):
+ prompt = prompt.replace(' Please answer yes or no.',
+ '\nAnswer the question using a single word or phrase.')
+ elif listinstr(['pope'], dataset.lower()):
+ prompt = prompt.replace(' Please answer yes or no.',
+ '\nAnswer the question using a single word or phrase.')
+ elif listinstr(['mathvista'], dataset.lower()):
+ match = re.search(r'Hint: (.*?)\nQuestion: (.*?)\n(Choices:\n(.*))?', prompt + '\n', re.DOTALL)
+
+ prompt = match.group(2)
+ if match.group(4) is not None:
+ prompt += '\n' + match.group(4).rstrip('\n')
+ prompt += '\n' + match.group(1)
+ else:
+ raise ValueError(
+ f"Bunny doesn't implement a custom prompt for {dataset}. It should use the default prompt, but didn't.")
+
+ msgs = []
+ if isinstance(tgt_path, list):
+ msgs.extend([dict(type='image', value=p) for p in tgt_path])
+ else:
+ msgs = [dict(type='image', value=tgt_path)]
+ msgs.append(dict(type='text', value=prompt))
+
+ return msgs
+
+ def generate_inner(self, message, dataset=None):
+
+ prompt, image_path = self.message_to_promptimg(message, dataset=dataset)
+
+ text = (f'A chat between a curious user and an artificial intelligence assistant. '
+ f"The assistant gives helpful, detailed, and polite answers to the user's questions. "
+ f'USER: \n{prompt} ASSISTANT:')
+
+ text_chunks = [self.tokenizer(chunk).input_ids for chunk in text.split('')]
+ input_ids = torch.tensor(text_chunks[0] + [-200] + text_chunks[1][1:], dtype=torch.long).unsqueeze(0)
+ image = Image.open(image_path).convert('RGB')
+ image_tensor = self.model.process_images([image], self.model.config).to(dtype=self.model.dtype)
+
+ output_ids = self.model.generate(input_ids, images=image_tensor, max_new_tokens=128, use_cache=True)[0]
+ response = self.tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True)
+ return response
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/cogvlm.py b/VLMEvalKit-sudoku/vlmeval/vlm/cogvlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a29158f0358628cd68a8d186134fddfd75474dc
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/cogvlm.py
@@ -0,0 +1,322 @@
+import re
+import torch
+from PIL import Image
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+
+
+class GLM4v(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = False
+
+ def __init__(self, model_path='THUDM/glm-4v-9b', **kwargs):
+ from transformers import AutoModelForCausalLM, LlamaTokenizer, AutoTokenizer
+ assert model_path is not None
+ self.model_path = model_path
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ self.model = AutoModelForCausalLM.from_pretrained(
+ model_path,
+ torch_dtype=torch.bfloat16,
+ low_cpu_mem_usage=True,
+ trust_remote_code=True
+ ).to('cuda').eval()
+ gen_kwargs = {'max_length': 2048, 'do_sample': False}
+ gen_kwargs.update(kwargs)
+ self.kwargs = gen_kwargs
+ self.end_text_token = '<|endoftext|>'
+
+ def generate_inner(self, message, dataset=None):
+ prompt, image_path = self.message_to_promptimg(message, dataset=dataset)
+ image = Image.open(image_path).convert('RGB')
+ if dataset is not None and DATASET_TYPE(dataset) in ['MCQ', 'Y/N']:
+ prompt += '\nShort Answer.'
+ inputs = self.tokenizer.apply_chat_template(
+ [{'role': 'user', 'image': image, 'content': prompt}],
+ add_generation_prompt=True, tokenize=True, return_tensors='pt', return_dict=True
+ )
+ inputs = inputs.to('cuda')
+
+ with torch.no_grad():
+ outputs = self.model.generate(**inputs, **self.kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ response = self.tokenizer.decode(outputs[0])
+ return response.split(self.end_text_token)[0]
+
+
+class GLMThinking(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = True
+
+ def __init__(self, model_path='THUDM/GLM-4.1V-9B-Thinking', **kwargs):
+ from transformers import AutoProcessor
+ self.device = 'cuda'
+ self.use_vllm = kwargs.get('use_vllm', False)
+ self.limit_mm_per_prompt = 24
+
+ print(f"Loading processor from {model_path}")
+ self.processor = AutoProcessor.from_pretrained(
+ model_path,
+ use_fast=True,
+ local_files_only=True,
+ trust_remote_code=True
+ )
+ if self.use_vllm:
+ from vllm import LLM
+ # Set tensor_parallel_size [8, 4, 2, 1] based on the number of available GPUs
+ gpu_count = torch.cuda.device_count()
+ if gpu_count >= 8:
+ tp_size = 8
+ elif gpu_count >= 4:
+ tp_size = 4
+ elif gpu_count >= 2:
+ tp_size = 2
+ else:
+ tp_size = 1
+ logging.info(
+ f'Using vLLM for Llama4 inference with {tp_size} GPUs (available: {gpu_count})'
+ )
+ if os.environ.get('VLLM_WORKER_MULTIPROC_METHOD') != 'spawn':
+ logging.warning(
+ 'VLLM_WORKER_MULTIPROC_METHOD is not set to spawn.'
+ 'Use \'export VLLM_WORKER_MULTIPROC_METHOD=spawn\' to avoid potential multi-process issues'
+ )
+ self.llm = LLM(
+ model=model_path,
+ max_num_seqs=4,
+ max_model_len=32768,
+ limit_mm_per_prompt={"image": self.limit_mm_per_prompt},
+ tensor_parallel_size=tp_size,
+ gpu_memory_utilization=kwargs.get("gpu_utils", 0.9),
+ )
+ else:
+ if "GLM-4.5V" in model_path:
+ from transformers import Glm4vMoeForConditionalGeneration
+ self.model = Glm4vMoeForConditionalGeneration.from_pretrained(
+ pretrained_model_name_or_path=model_path,
+ torch_dtype=torch.bfloat16,
+ local_files_only=True,
+ trust_remote_code=True,
+ low_cpu_mem_usage=True,
+ device_map="auto"
+ )
+ elif "GLM-4.1V" in model_path:
+ from transformers import Glm4vForConditionalGeneration
+ self.model = Glm4vForConditionalGeneration.from_pretrained(
+ pretrained_model_name_or_path=model_path,
+ torch_dtype=torch.bfloat16,
+ local_files_only=True,
+ trust_remote_code=True
+ ).to(self.device)
+
+ def build_msgs(self, msgs_raw, system_prompt=None, dataset=None):
+ msgs = cp.deepcopy(msgs_raw)
+ content = []
+ for i, msg in enumerate(msgs):
+ if msg['type'] == 'text':
+ content.append(dict(type='text', text=msg['value']))
+ elif msg['type'] == 'image':
+ content.append(dict(type='image',
+ url=f"data:image/jpeg;base64,{encode_image_file_to_base64(msg['value'])}"))
+ if dataset in {'HallusionBench', 'POPE'}:
+ content.append(dict(type="text", text="Please answer yes or no."))
+ ret = [dict(role='user', content=content)]
+ return ret
+
+ def build_msgs_vllm(self, message, dataset=None):
+ processed_message = []
+ images = []
+ num_images = 0
+ for item in message:
+ if item['type'] == 'text':
+ processed_message.append({
+ "type": "text",
+ "text": item['value']
+ })
+ elif item['type'] == 'image':
+ if num_images < self.limit_mm_per_prompt:
+ image_path = item['value']
+ img = Image.open(image_path).convert('RGB')
+ img.load()
+ processed_message.append({
+ "type": "image",
+ "url": "",
+ })
+ images.append(img)
+ num_images += 1
+ if num_images >= self.limit_mm_per_prompt:
+ logging.warning(
+ f"Number of images exceeds the limit of {self.limit_mm_per_prompt}."
+ f"Only the first {self.limit_mm_per_prompt} images will be used."
+ )
+ if dataset in {'HallusionBench', 'POPE'}:
+ processed_message.append(dict(type="text", text="Please answer yes or no."))
+ ret = [dict(role='user', content=processed_message)]
+ return ret, images
+
+ def extract_answer(self, response_text, dataset=None):
+ response_text = re.sub(r'.*? ', '', response_text, flags=re.DOTALL).strip()
+ match = re.search(r'(.*?) ', response_text, re.DOTALL)
+ if match:
+ response_text = match.group(1).strip()
+ if dataset in {'OCRBench', 'MMLongBench_DOC'}:
+ return response_text
+ # extract box
+ pattern_box = r'<\|begin_of_box\|>(.*?)<\|end_of_box\|>'
+ match = re.search(pattern_box, response_text, re.DOTALL)
+ if match:
+ return match.group(1).strip()
+ return response_text
+
+ def generate_inner_transformers(self, message, dataset=None):
+ try:
+ inputs = message
+ assert isinstance(inputs, str) or isinstance(inputs, list)
+ inputs = [inputs] if isinstance(inputs, str) else inputs
+
+ messages = self.build_msgs(msgs_raw=inputs, dataset=dataset)
+
+ inputs = self.processor.apply_chat_template(
+ messages,
+ tokenize=True,
+ add_generation_prompt=True,
+ return_dict=True,
+ return_tensors="pt"
+ ).to(self.device)
+ inputs.pop("token_type_ids", None)
+
+ # ✅ 执行生成
+ generated_ids = self.model.generate(**inputs, max_new_tokens=8192)
+
+ # ✅ 解码输出
+ answer = self.processor.decode(
+ generated_ids[0][inputs["input_ids"].shape[1]:],
+ skip_special_tokens=True
+ )
+ return self.extract_answer(answer, dataset=dataset)
+ except Exception as err:
+ print(err)
+ return 'Failed to obtain answer.'
+
+ def generate_inner_vllm(self, message, dataset=None):
+ from vllm import SamplingParams
+ try:
+ messages, images = self.build_msgs_vllm(message, dataset=dataset)
+ prompt = self.processor.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True
+ )
+ sampling_params = SamplingParams(temperature=0, max_tokens=8192)
+ outputs = self.llm.generate(
+ {
+ "prompt": prompt,
+ "multi_modal_data": {
+ "image": images
+ },
+ },
+ sampling_params=sampling_params
+ )
+
+ for o in outputs:
+ generated_text = o.outputs[0].text
+ return self.extract_answer(generated_text, dataset=dataset)
+ except Exception as err:
+ print(err)
+ return 'Failed to obtain answer.'
+
+ def generate_inner(self, message, dataset=None):
+ if self.use_vllm:
+ return self.generate_inner_vllm(message, dataset=dataset)
+ else:
+ return self.generate_inner_transformers(message, dataset=dataset)
+
+
+class CogVlm(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = False
+
+ def __init__(self, model_path='THUDM/cogvlm2-llama3-chat-19B', tokenizer_name=None, **kwargs):
+ from transformers import AutoModelForCausalLM, LlamaTokenizer, AutoTokenizer
+ assert model_path is not None
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path,
+ torch_dtype=torch.bfloat16,
+ trust_remote_code=True,
+ ).to('cuda').eval()
+
+ self.kwargs = kwargs
+ if tokenizer_name:
+ tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name)
+ gen_kwargs = {'max_length': 2048, 'do_sample': False}
+ self.end_text_token = ''
+ else:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ gen_kwargs = {'max_new_tokens': 2048, 'pad_token_id': 128002}
+ self.end_text_token = '<|end_of_text|>'
+ self.kwargs.update(gen_kwargs)
+ self.tokenizer = tokenizer
+ self.model = model
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ if DATASET_TYPE(dataset) == 'MCQ':
+ return True
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ assert dataset is None or isinstance(dataset, str)
+ assert self.use_custom_prompt(dataset)
+ tgt_path = self.dump_image(line, dataset)
+
+ if dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ option_candidate = string.ascii_uppercase
+ options = {
+ cand: line[cand]
+ for cand in option_candidate
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if not cn_string(prompt):
+ prompt = prompt + '\n' + "Answer with the option's letter from the given choices directly."
+ else:
+ prompt = prompt + '\n' + '请直接回答选项字母。'
+ else:
+ prompt = line['question']
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=p) for p in tgt_path])
+
+ return message
+
+ def generate_inner(self, message, dataset=None):
+ prompt, image_path = self.message_to_promptimg(message, dataset=dataset)
+ if dataset is not None and DATASET_TYPE(dataset) in ['MCQ', 'Y/N']:
+ prompt += '\nShort Answer.'
+
+ image = Image.open(image_path).convert('RGB')
+ inputs = self.model.build_conversation_input_ids(
+ self.tokenizer, query=prompt, history=[], images=[image]) # chat mode
+ inputs = {
+ 'input_ids': inputs['input_ids'].unsqueeze(0).to('cuda'),
+ 'token_type_ids': inputs['token_type_ids'].unsqueeze(0).to('cuda'),
+ 'attention_mask': inputs['attention_mask'].unsqueeze(0).to('cuda'),
+ 'images': [[inputs['images'][0].to('cuda').to(torch.bfloat16)]],
+ }
+
+ with torch.no_grad():
+ outputs = self.model.generate(**inputs, **self.kwargs)
+ outputs = outputs[:, inputs['input_ids'].shape[1]:]
+ response = self.tokenizer.decode(outputs[0])
+ response = response.split(self.end_text_token)[0].strip()
+ return response
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl.py b/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl.py
new file mode 100644
index 0000000000000000000000000000000000000000..0be9306b1f081c0d0fbd9da2c3555d35475929ad
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl.py
@@ -0,0 +1,82 @@
+import sys
+import torch
+from transformers import AutoModelForCausalLM
+import warnings
+from .base import BaseModel
+from ..smp import *
+
+
+class DeepSeekVL(BaseModel):
+
+ INSTALL_REQ = True
+ INTERLEAVE = True
+
+ def check_install(self):
+ try:
+ import deepseek_vl
+ except Exception as e:
+ logging.critical(
+ 'Please first install deepseek_vl from source codes in: https://github.com/deepseek-ai/DeepSeek-VL')
+ raise e
+
+ def __init__(self, model_path='deepseek-ai/deepseek-vl-1.3b-chat', **kwargs):
+ self.check_install()
+ assert model_path is not None
+ self.model_path = model_path
+ from deepseek_vl.models import VLChatProcessor
+
+ self.vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
+ self.tokenizer = self.vl_chat_processor.tokenizer
+
+ model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
+ self.model = model.to(torch.bfloat16).cuda().eval()
+
+ torch.cuda.empty_cache()
+ default_kwargs = dict(max_new_tokens=512, do_sample=False, use_cache=True)
+ default_kwargs.update(kwargs)
+ self.kwargs = default_kwargs
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+
+ def prepare_inputs(self, message):
+ def prepare_itlist(msgs):
+ content, images = '', []
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ content += ''
+ elif s['type'] == 'text':
+ content += s['value']
+ return content, images
+ conversation = []
+ if 'role' not in message[0]:
+ content, images = prepare_itlist(message)
+ conversation.append(dict(role='User', content=content, images=images))
+ else:
+ role_map = {'user': 'User', 'assistant': 'Assistant'}
+ for msgs in message:
+ role = role_map[msgs['role']]
+ content, images = prepare_itlist(msgs['content'])
+ conversation.append(dict(role=role, content=content, images=images))
+ conversation.append(dict(role='Assistant', content=''))
+ return conversation
+
+ def generate_inner(self, message, dataset=None):
+ conversation = self.prepare_inputs(message)
+ from deepseek_vl.utils.io import load_pil_images
+ pil_images = load_pil_images(conversation)
+ prepare_inputs = self.vl_chat_processor(conversations=conversation, images=pil_images, force_batchify=True)
+ prepare_inputs = prepare_inputs.to(self.model.device)
+ inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs)
+
+ outputs = self.model.language_model.generate(
+ inputs_embeds=inputs_embeds,
+ attention_mask=prepare_inputs.attention_mask,
+ pad_token_id=self.tokenizer.eos_token_id,
+ bos_token_id=self.tokenizer.bos_token_id,
+ eos_token_id=self.tokenizer.eos_token_id,
+ **self.kwargs)
+ answer = self.tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
+ return answer
+
+ def chat_inner(self, message, dataset=None):
+ return self.generate_inner(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl2.py b/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl2.py
new file mode 100644
index 0000000000000000000000000000000000000000..af6497ea2597a1ccb74f665c89cdf128c1d6033b
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/deepseek_vl2.py
@@ -0,0 +1,163 @@
+import sys
+import torch
+from transformers import AutoModelForCausalLM
+import warnings
+from .base import BaseModel
+from ..smp import *
+from PIL import Image
+
+
+class DeepSeekVL2(BaseModel):
+
+ INSTALL_REQ = True
+ INTERLEAVE = True
+
+ def check_install(self):
+ try:
+ import deepseek_vl2
+ except Exception as e:
+ logging.critical(
+ 'Please first install deepseek_vl2 from source codes in: https://github.com/deepseek-ai/DeepSeek-VL2')
+ raise e
+
+ def __init__(self, model_path='deepseek-ai/deepseek-vl2-tiny', **kwargs):
+ self.check_install()
+ assert model_path is not None
+ self.model_path = model_path
+ from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
+
+ self.vl_chat_processor = DeepseekVLV2Processor.from_pretrained(model_path)
+ self.tokenizer = self.vl_chat_processor.tokenizer
+
+ model: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path,
+ trust_remote_code=True,
+ torch_dtype=torch.bfloat16)
+ self.model = model.cuda().eval()
+
+ torch.cuda.empty_cache()
+ default_kwargs = dict(max_new_tokens=2048, do_sample=False, use_cache=True)
+ default_kwargs.update(kwargs)
+ self.kwargs = default_kwargs
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+
+ def prepare_inputs(self, message, dataset=None):
+
+ if dataset == 'MMMU_DEV_VAL':
+
+ def prepare_itlist(msgs):
+ content, images = '', []
+ image_idx = 1
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ content += f''
+ image_idx += 1
+ elif s['type'] == 'text':
+ content += s['value']
+ # content = '' * (image_idx-1) + '\n' + content
+ content = '' * (image_idx - 1) + '\n' + content
+ return content, images
+
+ conversation = []
+ if 'role' not in message[0]:
+ content, images = prepare_itlist(message)
+ content = content.replace(
+ 'Please select the correct answer from the options above.',
+ "Answer with the option's letter from the given choices directly. Answer the question using a single word or phrase.\n" # noqa
+ )
+ content = content.replace('Question:', "")
+ content = content.replace('Options:\n', "")
+ conversation.append(dict(role='<|User|>', content=content, images=images))
+ else:
+ role_map = {'user': '<|User|>', 'assistant': '<|Assistant|>'}
+ for msgs in message:
+ role = role_map[msgs['role']]
+ content, images = prepare_itlist(msgs['content'])
+ content = content.replace(
+ 'Please select the correct answer from the options above.',
+ "Answer with the option's letter from the given choices directly. Answer the question using a single word or phrase.\n" # noqa
+ )
+ content = content.replace('Question:', "")
+ content = content.replace('Options:\n', "")
+ conversation.append(dict(role=role, content=content, images=images))
+ conversation.append(dict(role='<|Assistant|>', content=''))
+
+ else:
+
+ def prepare_itlist(msgs):
+ content, images = '', []
+ for s in msgs:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ content += '\n'
+ elif s['type'] == 'text':
+ content += s['value']
+ return content, images
+
+ conversation = []
+ if 'role' not in message[0]:
+ content, images = prepare_itlist(message)
+ conversation.append(dict(role='<|User|>', content=content, images=images))
+ else:
+ role_map = {'user': '<|User|>', 'assistant': '<|Assistant|>'}
+ for msgs in message:
+ role = role_map[msgs['role']]
+ content, images = prepare_itlist(msgs['content'])
+ conversation.append(dict(role=role, content=content, images=images))
+ conversation.append(dict(role='<|Assistant|>', content=''))
+
+ return conversation
+
+ def generate_inner(self, message, dataset=None):
+ conversation = self.prepare_inputs(message, dataset)
+ from deepseek_vl2.utils.io import load_pil_images
+ pil_images = load_pil_images(conversation)
+
+ if dataset == 'MMMU_DEV_VAL':
+ if len(pil_images):
+ h, w = pil_images[0].size
+ pil_images[0] = pil_images[0].resize((2 * h, 2 * w), Image.BILINEAR)
+
+ prepare_inputs = self.vl_chat_processor(
+ conversations=conversation,
+ images=pil_images,
+ force_batchify=True,
+ system_prompt=""
+ )
+ prepare_inputs = prepare_inputs.to(self.model.device)
+ inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs)
+
+ inputs_embeds, past_key_values = self.model.incremental_prefilling(
+ input_ids=prepare_inputs.input_ids,
+ images=prepare_inputs.images,
+ images_seq_mask=prepare_inputs.images_seq_mask,
+ images_spatial_crop=prepare_inputs.images_spatial_crop,
+ attention_mask=prepare_inputs.attention_mask,
+ chunk_size=512
+ )
+
+ # run the model to get the response
+ outputs = self.model.generate(
+ inputs_embeds=inputs_embeds,
+ input_ids=prepare_inputs.input_ids,
+ images=prepare_inputs.images,
+ images_seq_mask=prepare_inputs.images_seq_mask,
+ images_spatial_crop=prepare_inputs.images_spatial_crop,
+ attention_mask=prepare_inputs.attention_mask,
+ past_key_values=past_key_values,
+ pad_token_id=self.tokenizer.eos_token_id,
+ bos_token_id=self.tokenizer.bos_token_id,
+ eos_token_id=self.tokenizer.eos_token_id,
+ **self.kwargs
+ )
+
+ answer = self.tokenizer.decode(
+ outputs[0][len(prepare_inputs.input_ids[0]):].cpu().tolist(),
+ skip_special_tokens=True
+ )
+ answer = answer.rstrip('.')
+
+ return answer
+
+ def chat_inner(self, message, dataset=None):
+ return self.generate_inner(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/eagle_x.py b/VLMEvalKit-sudoku/vlmeval/vlm/eagle_x.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6b3d4e7fc9e06f2f26eacdd72c1b2a1b8dcfe5a
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/eagle_x.py
@@ -0,0 +1,140 @@
+import torch
+from PIL import Image
+from abc import abstractproperty
+import sys
+import os.path as osp
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+import copy
+
+
+class Eagle(BaseModel):
+ INSTALL_REQ = True
+ INTERLEAVE = True
+
+ def __init__(self,
+ model_path='NVEagle/Eagle-X5-7B',
+ **kwargs):
+ try:
+ from eagle.model.builder import load_pretrained_model
+ from eagle.utils import disable_torch_init
+ from eagle.mm_utils import get_model_name_from_path
+ except Exception as e:
+ logging.critical('''Please install eagle before using Eagle,
+ you can install it from "https://github.com/NVlabs/EAGLE.git"''')
+ raise e
+
+ warnings.warn('Please install the latest version of eagle from github before you evaluate the Eagle model.')
+ assert osp.exists(model_path) or splitlen(model_path) == 2
+ model_name = get_model_name_from_path(model_path)
+
+ self.tokenizer, self.model, self.image_processor, self.context_len = (
+ load_pretrained_model(model_path, None, model_name, False, False, device_map="cuda")
+ )
+ self.model.eval()
+ self.conv_mode = 'vicuna_v1'
+
+ default_kwargs = dict(
+ do_sample=True,
+ temperature=0.2,
+ top_p=0.5,
+ num_beams=1,
+ max_new_tokens=512,
+ use_cache=True
+ )
+
+ default_kwargs.update(kwargs)
+ self.kwargs = default_kwargs
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+ torch.cuda.empty_cache()
+
+ def generate_inner(self, message, dataset=None):
+ try:
+ from eagle import conversation as conversation_lib
+ from eagle.constants import (IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN,
+ DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN)
+ from eagle.conversation import conv_templates, SeparatorStyle
+ from eagle.mm_utils import tokenizer_image_token, process_images, KeywordsStoppingCriteria
+ except Exception as e:
+ logging.critical('''Please install eagle before using Eagle,
+ you can install it from "https://github.com/NVlabs/EAGLE.git"''')
+ raise e
+
+ kwargs = self.kwargs
+
+ images = []
+ prompt = ''
+
+ for s in message:
+ if s['type'] == 'image':
+ images.append(s['value'])
+ elif s['type'] == 'text':
+ prompt += s['value']
+
+ DEFAULT_IMAGE_TOKEN = DEFAULT_IMAGE_TOKEN * len(images)
+ if self.model.config.mm_use_im_start_end:
+ prompt = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + prompt
+ else:
+ prompt = DEFAULT_IMAGE_TOKEN + '\n' + prompt
+
+ conv = conv_templates[self.conv_mode].copy()
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+ images = [Image.open(s).convert('RGB') for s in images]
+
+ image_tensor = process_images(images, self.image_processor, self.model.config)
+ input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
+ image_tensor = image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True)
+
+ with torch.inference_mode():
+ output_ids = self.model.generate(
+ input_ids.unsqueeze(0),
+ images=image_tensor,
+ image_sizes=[img.size for img in images],
+ **kwargs
+ )
+
+ outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+ return outputs
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ if listinstr(['MMMU'], dataset):
+ return False
+ if DATASET_TYPE(dataset) == 'MCQ' or dataset == 'MMVet':
+ return True
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ assert dataset is None or isinstance(dataset, str)
+ assert self.use_custom_prompt(dataset)
+ tgt_path = self.dump_image(line, dataset)
+ question = line['question']
+ if dataset == 'MMVet':
+ prompt = question + '\nAnswer the question directly. '
+ elif DATASET_TYPE(dataset) == 'MCQ':
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ options_prompt = ''
+ for key, item in options.items():
+ options_prompt += f'{key}. {item}\n'
+
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ prompt = f'Hint: {hint}\n' if hint is not None else ''
+ prompt += f'{question}\n'
+ prompt += (
+ f'{options_prompt}\nAnswer with the option’s letter from the given choices directly. '
+ if len(options) else 'Answer the question directly. '
+ )
+ else:
+ raise NotImplementedError
+
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=s) for s in tgt_path])
+ return message
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/h2ovl_mississippi.py b/VLMEvalKit-sudoku/vlmeval/vlm/h2ovl_mississippi.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5957d14df6728b0f6e20cf15ca8c3948111a058
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/h2ovl_mississippi.py
@@ -0,0 +1,117 @@
+import torch
+from transformers import AutoTokenizer, AutoModel
+import warnings
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+import pandas as pd
+import string
+
+
+class H2OVLChat(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = True
+
+ def __init__(self, model_path='h2oai/h2ovl-mississippi-2b', **kwargs):
+ assert model_path is not None
+
+ self.model_path = model_path
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
+
+ device = torch.cuda.current_device()
+ self.device = device
+ self.model = AutoModel.from_pretrained(
+ model_path,
+ torch_dtype=torch.bfloat16,
+ trust_remote_code=True).eval()
+ self.model = self.model.to(device)
+ self.image_size = self.model.config.vision_config.image_size
+
+ kwargs_default = dict(do_sample=False, max_new_tokens=1024, top_p=None, num_beams=1)
+ kwargs_default.update(kwargs)
+ self.kwargs = kwargs_default
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+
+ def use_custom_prompt(self, dataset):
+ return True
+
+ def build_multi_choice_prompt(self, line, dataset=None):
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if len(options):
+ prompt += '\n请直接回答选项字母。' if cn_string(
+ prompt) else "\nAnswer with the option's letter from the given choices directly."
+ else:
+ prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.'
+
+ return prompt
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+ tgt_path = self.dump_image(line, dataset)
+
+ if dataset is not None and listinstr(['MME'], dataset):
+ question = line['question']
+ prompt = question + ' Answer the question using a single word or phrase.'
+ elif dataset is not None and listinstr(['HallusionBench'], dataset):
+ question = line['question']
+ prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.'
+ elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ prompt = self.build_multi_choice_prompt(line, dataset)
+ elif dataset is not None and DATASET_TYPE(dataset) == 'VQA':
+ if 'MathVista' in dataset:
+ prompt = line['question']
+ elif listinstr(['LLaVABench'], dataset):
+ question = line['question']
+ prompt = question + '\nAnswer this question in detail.'
+ elif listinstr(['MMVet'], dataset):
+ prompt = line['question']
+ else:
+ question = line['question']
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ else:
+ prompt = line['question']
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=s) for s in tgt_path])
+ return message
+
+ def generate_inner(self, message, dataset=None):
+ image_num = len([x for x in message if x['type'] == 'image'])
+ question = ''
+ image_files = [x['value'] for x in message if x['type'] == 'image']
+
+ if image_num == 1:
+ question = '\n' + '\n'.join([x['value'] for x in message if x['type'] == 'text'])
+
+ elif image_num > 1:
+ text_part = ' '.join([x['value'] for x in message if x['type'] == 'text'])
+ image_part = ' '.join([f': ' for i in range(image_num)])
+ question = image_part + '\n' + text_part
+
+ else:
+ question = '\n'.join([x['value'] for x in message if x['type'] == 'text'])
+ image_files = None
+
+ response, history = self.model.chat(
+ self.tokenizer,
+ image_files=image_files,
+ question=question,
+ generation_config=self.kwargs,
+ max_tiles=6,
+ history=None,
+ return_history=True)
+ return response
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/llama4.py b/VLMEvalKit-sudoku/vlmeval/vlm/llama4.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb90c8b3dcdb0d7f73679857aa27ed0c61955ab4
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/llama4.py
@@ -0,0 +1,282 @@
+import torch
+from PIL import Image
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+from io import BytesIO
+import base64
+from mimetypes import guess_type
+
+
+class llama4(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = True
+
+ def __init__(self, model_path="meta-llama/Llama-4-Scout-17B-16E-Instruct", **kwargs):
+ try:
+ from transformers import AutoProcessor, Llama4ForConditionalGeneration
+ except Exception as e:
+ logging.critical('Please install transformers>=4.51.0 before using llama4.')
+ raise e
+ self.generate_kwargs = dict(
+ max_new_tokens=kwargs.get('max_new_tokens', 4096),
+ top_p=kwargs.get('top_p', 0.001),
+ top_k=kwargs.get('top_k', 1),
+ temperature=kwargs.get('temperature', 0.01),
+ repetition_penalty=kwargs.get('repetition_penalty', 1.0),
+ )
+ self.system_prompt = kwargs.get('system_prompt', None)
+
+ self.use_vllm = kwargs.get('use_vllm', False)
+ self.use_lmdeploy = kwargs.get('use_lmdeploy', False)
+ assert self.use_vllm + self.use_lmdeploy <= 1, "You can only set one flag between `use_vllm` and `use_lmdeploy` to True" # noqa: E501
+
+ self.limit_mm_per_prompt = 10 # vLLM support max 10 images per prompt for Llama 4
+ if self.use_vllm:
+ from vllm import LLM, SamplingParams
+ # Set tensor_parallel_size [8, 4, 2, 1] based on the number of available GPUs
+ gpu_count = torch.cuda.device_count()
+ if gpu_count >= 8:
+ tp_size = 8
+ elif gpu_count >= 4:
+ tp_size = 4
+ elif gpu_count >= 2:
+ tp_size = 2
+ else:
+ tp_size = 1
+ logging.info(
+ f'Using vLLM for Llama4 inference with {tp_size} GPUs (available: {gpu_count})'
+ )
+ import os
+ if os.environ.get('VLLM_WORKER_MULTIPROC_METHOD') != 'spawn':
+ logging.warning(
+ 'VLLM_WORKER_MULTIPROC_METHOD is not set to spawn.'
+ 'Use \'export VLLM_WORKER_MULTIPROC_METHOD=spawn\' to avoid potential multi-process issues'
+ )
+ self.llm = LLM(
+ model=model_path,
+ max_num_seqs=4,
+ max_model_len=32768,
+ limit_mm_per_prompt={"image": self.limit_mm_per_prompt},
+ tensor_parallel_size=tp_size,
+ gpu_memory_utilization=kwargs.get("gpu_utils", 0.9),
+ )
+ # export VLLM_WORKER_MULTIPROC_METHOD=spawn
+
+ elif self.use_lmdeploy:
+ from lmdeploy import TurbomindEngineConfig, pipeline, ChatTemplateConfig
+ num_gpus = torch.cuda.device_count()
+ self.model = pipeline(
+ model_path,
+ backend_config=TurbomindEngineConfig(session_len=32768, cache_max_entry_count=0.1, tp=num_gpus)
+ )
+ torch.cuda.set_device(0)
+ self.device = 'cuda'
+
+ else:
+ self.model = Llama4ForConditionalGeneration.from_pretrained(
+ model_path,
+ attn_implementation="flash_attention_2",
+ device_map="auto",
+ torch_dtype=torch.bfloat16,
+ )
+
+ self.device = 'cuda'
+ self.processor = AutoProcessor.from_pretrained(model_path)
+ self.model_name = model_path
+
+ def use_custom_prompt(self, dataset):
+ if dataset is None:
+ return False
+ if listinstr(['AI2D', 'MMMU', 'MathVista', 'ChartQA', 'DocVQA'], dataset):
+ # For Certain dataset we use custom prompt
+ return True
+ else:
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+ tgt_path = self.dump_image(line, dataset)
+ question = line['question']
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ if listinstr(['MMMU'], dataset):
+ self.kwargs['max_new_tokens'] = 2048
+ options = '\n'.join([f'{key}. {item}' for key, item in options.items()])
+ prompt = (
+ f'Look at the image carefully and solve the following question step-by-step. '
+ f'Question: {question} Options: {options} Indicate the correct answer at the end.'
+ )
+ for i in range(len(tgt_path)):
+ prompt = prompt.replace(f'', '')
+ elif listinstr(['MathVista'], dataset):
+ self.kwargs['max_new_tokens'] = 2048
+ prompt = f'{question}'
+ elif listinstr(['DocVQA'], dataset):
+ self.kwargs['max_new_tokens'] = 512
+ prompt = (
+ f'Read the text in the image carefully and answer the question '
+ f'with the text as seen exactly in the image. '
+ f'For yes/no questions, just respond Yes or No. '
+ f'If the answer is numeric, just respond with the number and nothing else. '
+ f'If the answer has multiple words, just respond with the words and absolutely nothing else. '
+ f'Never respond in a sentence or a phrase.\n Question: {question}'
+ )
+ else:
+ raise NotImplementedError(f'Dataset {dataset}) not supported.')
+
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=s) for s in tgt_path])
+ return message
+
+ def encode_image(self, image_path):
+ mime_type, _ = guess_type(image_path)
+ if mime_type is None:
+ mime_type = "image/jpeg"
+ image_format = mime_type.split("/")[-1].upper() if mime_type else "JPEG"
+ image = Image.open(image_path)
+ # Handle the alpha channel
+ if image.mode == "RGBA":
+ image = self._rgba_to_rgb(image)
+
+ encoded_image = self._encode_image(image, image_format)
+
+ return encoded_image
+
+ def _encode_image(self, image, image_format):
+ with BytesIO() as output:
+ image.convert("RGB").save(output, format=image_format)
+ base64_encoded_data = base64.b64encode(output.getvalue()).decode("utf-8")
+ return base64_encoded_data
+
+ @staticmethod
+ def _rgba_to_rgb(image):
+ background = Image.new("RGBA", image.size, (255, 255, 255, 255))
+ return Image.alpha_composite(background, image).convert("RGB")
+
+ def message_to_promptimg(self, message, dataset=None):
+ processed_message = []
+ for item in message:
+ if item['type'] == 'text':
+ processed_message.append({
+ "type": "text",
+ "text": f"{item['value']}"
+ })
+ elif item['type'] == 'image':
+ image_path = item['value']
+ encoded_image = self.encode_image(image_path)
+ processed_message.append({
+ "type": "image",
+ "url": f"{encoded_image}",
+ })
+ return processed_message
+
+ def generate_inner_transformers(self, message, dataset=None):
+ prompt = self.message_to_promptimg(message, dataset=dataset)
+ messages = [
+ {'role': 'user', 'content': prompt}
+ ]
+ inputs = self.processor.apply_chat_template(
+ messages,
+ add_generation_prompt=True,
+ tokenize=True,
+ return_dict=True,
+ return_tensors="pt",
+ ).to(self.model.device)
+ max_new_tokens = 8192
+ outputs = self.model.generate(**inputs, max_new_tokens=max_new_tokens)
+ generated_text = self.processor.batch_decode(
+ outputs[:, inputs["input_ids"].shape[-1]:]
+ )[0]
+ if generated_text.endswith("<|eot|>"):
+ generated_text = generated_text[:-7]
+ return generated_text
+
+ def message_to_promptimg_vllm(self, message, dataset=None):
+ processed_message = []
+ images = []
+ num_images = 0
+ for item in message:
+ if item['type'] == 'text':
+ processed_message.append({
+ "type": "text",
+ "text": item['value']
+ })
+ elif item['type'] == 'image':
+ if num_images < self.limit_mm_per_prompt:
+ image_path = item['value']
+ encoded_image = self.encode_image(image_path)
+ image = Image.open(BytesIO(base64.b64decode(encoded_image)))
+ image.load()
+ processed_message.append({
+ "type": "image",
+ "url": "",
+ })
+ images.append(image)
+ num_images += 1
+ if num_images >= self.limit_mm_per_prompt:
+ logging.warning(
+ f"Number of images exceeds the limit of {self.limit_mm_per_prompt}."
+ f"Only the first {self.limit_mm_per_prompt} images will be used."
+ )
+ return processed_message, images
+
+ def generate_inner_vllm(self, message, dataset=None):
+ from vllm import LLM, SamplingParams
+ prompt, images = self.message_to_promptimg_vllm(message, dataset=dataset)
+ messages = [
+ {'role': 'user', 'content': prompt}
+ ]
+ prompt = self.processor.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True
+ )
+ sampling_params = SamplingParams(temperature=self.generate_kwargs['temperature'],
+ max_tokens=self.generate_kwargs['max_new_tokens'])
+ outputs = self.llm.generate(
+ {
+ "prompt": prompt,
+ "multi_modal_data": {
+ "image": images
+ },
+ },
+ sampling_params=sampling_params
+ )
+
+ for o in outputs:
+ generated_text = o.outputs[0].text
+
+ if generated_text.endswith("<|eot|>"):
+ generated_text = generated_text[:-7] # 删除末尾的<|eot|>
+
+ return generated_text
+
+ def generate_inner_lmdeploy(self, message, dataset=None):
+ from lmdeploy import GenerationConfig
+ gen_config = GenerationConfig(
+ max_new_tokens=self.generate_kwargs['max_new_tokens'],
+ top_p=self.generate_kwargs['top_p'],
+ top_k=self.generate_kwargs['top_k'],
+ temperature=self.generate_kwargs['temperature'],
+ repetition_penalty=self.generate_kwargs['repetition_penalty'],
+ )
+ gen_config.random_seed = None
+ messages_list = self.message_to_lmdeploy(message, system_prompt=self.system_prompt)
+ assert len(messages_list) == 1
+ response = self.model(messages_list, gen_config=gen_config)[0]
+ response = response.text
+ return response
+
+ def generate_inner(self, message, dataset=None):
+ if self.use_vllm:
+ return self.generate_inner_vllm(message, dataset=dataset)
+ elif self.use_lmdeploy:
+ return self.generate_inner_lmdeploy(message, dataset=dataset)
+ else:
+ return self.generate_inner_transformers(message, dataset=dataset)
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/mgm.py b/VLMEvalKit-sudoku/vlmeval/vlm/mgm.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd2a15be8886d176f7748b3797401eee9b4db5c6
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/mgm.py
@@ -0,0 +1,158 @@
+import sys
+import torch
+import os.path as osp
+import os
+import warnings
+from .base import BaseModel
+from ..smp import *
+from PIL import Image
+
+'''
+ Please follow the instructions to download ckpt.
+ https://github.com/dvlab-research/MGM?tab=readme-ov-file#pretrained-weights
+'''
+
+
+class Mini_Gemini(BaseModel):
+ INSTALL_REQ = True
+ INTERLEAVE = False
+
+ def __init__(self, model_path, root=None, conv_mode='llava_v1', **kwargs):
+ if root is None:
+ warnings.warn('Please set `root` to Mini_Gemini code directory, \
+ which is cloned from here: "https://github.com/dvlab-research/MGM?tab=readme-ov-file" ')
+ raise ValueError
+ warnings.warn('Please follow the instructions of Mini_Gemini to put the ckpt file in the right place, \
+ which can be found at https://github.com/dvlab-research/MGM?tab=readme-ov-file#structure')
+ assert model_path == 'YanweiLi/MGM-7B-HD', 'We only support MGM-7B-HD for now'
+ self.model_path = model_path
+ sys.path.append(root)
+ try:
+ from mgm.model.builder import load_pretrained_model
+ from mgm.mm_utils import get_model_name_from_path
+ except Exception as e:
+ logging.critical(
+ 'Please first install Mini_Gemini and set the root path to use Mini_Gemini, '
+ 'which is cloned from here: "https://github.com/dvlab-research/MGM?tab=readme-ov-file" '
+ )
+ raise e
+
+ VLMEvalKit_path = os.getcwd()
+ os.chdir(root)
+ warnings.warn('Please set `root` to Mini_Gemini code directory, \
+ which is cloned from here: "https://github.com/dvlab-research/MGM?tab=readme-ov-file" ')
+ model_path = osp.join(root, 'work_dirs', 'MGM', 'MGM-7B-HD')
+ try:
+ model_name = get_model_name_from_path(model_path)
+ except Exception as e:
+ logging.critical(
+ 'Please follow the instructions of Mini_Gemini to put the ckpt file in the right place, '
+ 'which can be found at https://github.com/dvlab-research/MGM?tab=readme-ov-file#structure'
+ )
+ raise e
+
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name)
+ os.chdir(VLMEvalKit_path)
+ self.model = model
+ self.tokenizer = tokenizer
+ self.image_processor = image_processor
+ self.conv_mode = conv_mode
+
+ kwargs_default = dict(temperature=float(0), num_beams=1, top_p=None, max_new_tokens=1024, use_cache=True)
+ kwargs_default.update(kwargs)
+ do_sample = kwargs_default['temperature'] > 0
+ kwargs_default.update({'do_sample': do_sample})
+ self.kwargs = kwargs_default
+
+ def generate_inner(self, message, dataset=None):
+ try:
+ from mgm.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, \
+ DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+ from mgm.conversation import conv_templates
+ from mgm.mm_utils import tokenizer_image_token, process_images
+ except Exception as e:
+ logging.critical(
+ 'Please first install Mini_Gemini and set the root path to use Mini_Gemini, '
+ 'which is cloned from here: "https://github.com/dvlab-research/MGM?tab=readme-ov-file" '
+ )
+ raise e
+
+ prompt, image = self.message_to_promptimg(message, dataset=dataset)
+ image = Image.open(image)
+ prompt = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + prompt
+ conv = conv_templates[self.conv_mode].copy()
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
+ input_ids = input_ids.unsqueeze(0).cuda()
+ if hasattr(self.model.config, 'image_size_aux'):
+ if not hasattr(self.image_processor, 'image_size_raw'):
+ self.image_processor.image_size_raw = self.image_processor.crop_size.copy()
+ self.image_processor.crop_size['height'] = self.model.config.image_size_aux
+ self.image_processor.crop_size['width'] = self.model.config.image_size_aux
+ self.image_processor.size['shortest_edge'] = self.model.config.image_size_aux
+ image_tensor = process_images([image], self.image_processor, self.model.config)[0]
+ image_grid = getattr(self.model.config, 'image_grid', 1)
+ if hasattr(self.model.config, 'image_size_aux'):
+ raw_shape = [
+ self.image_processor.image_size_raw['height'] * image_grid,
+ self.image_processor.image_size_raw['width'] * image_grid
+ ]
+ image_tensor_aux = image_tensor
+ image_tensor = torch.nn.functional.interpolate(
+ image_tensor[None],
+ size=raw_shape,
+ mode='bilinear',
+ align_corners=False
+ )[0]
+ else:
+ image_tensor_aux = []
+ if image_grid >= 2:
+ raw_image = image_tensor.reshape(
+ 3, image_grid, self.image_processor.image_size_raw['height'],
+ image_grid, self.image_processor.image_size_raw['width']
+ )
+ raw_image = raw_image.permute(1, 3, 0, 2, 4)
+ raw_image = raw_image.reshape(
+ -1, 3, self.image_processor.image_size_raw['height'], self.image_processor.image_size_raw['width']
+ )
+
+ if getattr(self.model.config, 'image_global', False):
+ global_image = image_tensor
+ if len(global_image.shape) == 3:
+ global_image = global_image[None]
+ global_image = torch.nn.functional.interpolate(
+ global_image,
+ size=[
+ self.image_processor.image_size_raw['height'],
+ self.image_processor.image_size_raw['width']
+ ],
+ mode='bilinear',
+ align_corners=False
+ )
+ # [image_crops, image_global]
+ raw_image = torch.cat([raw_image, global_image], dim=0)
+ image_tensor = raw_image.contiguous()
+
+ images = image_tensor[None].to(dtype=self.model.dtype, device='cuda', non_blocking=True)
+ if len(image_tensor_aux) > 0:
+ images_aux = image_tensor_aux[None].to(dtype=self.model.dtype, device='cuda', non_blocking=True)
+ else:
+ images_aux = None
+
+ with torch.inference_mode():
+ output_ids = self.model.generate(
+ input_ids,
+ images=images,
+ images_aux=images_aux,
+ # no_repeat_ngram_size=3,
+ bos_token_id=self.tokenizer.bos_token_id, # Begin of sequence token
+ eos_token_id=self.tokenizer.eos_token_id, # End of sequence token
+ pad_token_id=self.tokenizer.pad_token_id, # Pad token
+ **self.kwargs
+ )
+
+ outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
+ return outputs
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/smolvlm.py b/VLMEvalKit-sudoku/vlmeval/vlm/smolvlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e896245231f50b9ad91ccc46cc1fd0c26ec723e
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/smolvlm.py
@@ -0,0 +1,869 @@
+import torch
+import os.path as osp
+import warnings
+from .base import BaseModel
+from ..smp import splitlen
+from PIL import Image
+
+import os
+import math
+
+
+class SmolVLM(BaseModel):
+ INSTALL_REQ = True
+ INTERLEAVE = True
+
+ def __init__(self, model_path="HuggingFaceTB/SmolVLM-Instruct", **kwargs):
+ from transformers import AutoProcessor, Idefics3ForConditionalGeneration
+
+ assert osp.exists(model_path) or splitlen(model_path) == 2
+
+ self.processor = AutoProcessor.from_pretrained(model_path)
+ self.model = Idefics3ForConditionalGeneration.from_pretrained(
+ model_path, torch_dtype=torch.float32, device_map="cuda"
+ )
+ kwargs_default = {"max_new_tokens": 2048, "use_cache": True}
+ kwargs_default.update(kwargs)
+ self.kwargs = kwargs_default
+ warnings.warn(
+ f"Following kwargs received: {self.kwargs}, will use as generation config."
+ )
+ torch.cuda.empty_cache()
+
+ def generate_inner(self, message, dataset=None):
+ if dataset in [
+ "MMBench_DEV_EN",
+ "MMBench_TEST_EN",
+ "MMBench_DEV_CN",
+ "MMBench_TEST_CN",
+ "MMBench",
+ "MMBench_CN",
+ "MMBench_DEV_EN_V11",
+ "MMBench_DEV_CN_V11",
+ "MMBench_TEST_EN_V11",
+ "MMBench_TEST_CN_V11",
+ "MMBench_V11",
+ "MMBench_CN_V11",
+ "CCBench",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_mmbench(message)
+ elif dataset in ["MMMU_DEV_VAL", "MMMU_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_mmmu(message)
+ elif dataset in ["MathVista_MINI"]:
+ formatted_messages, formatted_images = self.build_prompt_mathvista(message)
+ elif dataset in ["ChartQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_chartqa(message)
+ elif dataset in ["DocVQA_VAL", "DocVQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_docvqa(message)
+ elif dataset in ["TextVQA_VAL", "TextVQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_textvqa(message)
+ elif dataset in [
+ "MME",
+ "MMVet",
+ "OCRVQA_TEST",
+ "OCRVQA_TESTCORE",
+ "InfoVQA_VAL",
+ "InfoVQA_TEST",
+ "OCRBench",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_default(
+ message, add_brief=True
+ )
+ elif dataset == "HallusionBench":
+ formatted_messages, formatted_images = self.build_prompt_default(
+ message, add_yes_or_no=True
+ )
+ elif dataset in [
+ "MMStar",
+ "SEEDBench_IMG",
+ "AI2D_TEST",
+ "ScienceQA_VAL",
+ "ScienceQA_TEST",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_puremcq(message)
+ else:
+ formatted_messages, formatted_images = self.build_prompt_default(message)
+
+ images = (
+ [formatted_images]
+ if isinstance(formatted_images, Image.Image)
+ else formatted_images
+ )
+ inputs = self.processor(
+ text=formatted_messages, images=images, return_tensors="pt"
+ )
+ inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
+
+ generated_ids = self.model.generate(**inputs, **self.kwargs)
+ generated_text = self.processor.batch_decode(
+ generated_ids[:, inputs["input_ids"].size(1):], skip_special_tokens=True
+ )[0]
+
+ return generated_text.strip()
+
+ def build_prompt_default(self, message, add_brief=False, add_yes_or_no=False):
+ from transformers.image_utils import load_image
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += ""
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ if add_brief:
+ prompt += "\nGive a very brief answer."
+ if add_yes_or_no:
+ prompt += "\nAnswer yes or no."
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def build_prompt_puremcq(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "\nOptions:": "\nChoices:",
+ "Please select the correct answer from the options above.": "Answer with the letter.",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += ""
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction
+ prompt += "\nAssistant: Answer:"
+ return prompt, images
+
+ def build_prompt_mt(self, message):
+ from transformers.image_utils import load_image
+
+ prompt, images = "", []
+ for msg in message:
+ if msg["role"] == "user":
+ prompt += "User: "
+ elif msg["role"] == "assistant":
+ prompt += "Assistant: "
+ for item in msg["content"]:
+ if item["type"] == "image":
+ img = load_image(item["value"])
+ images.append(img)
+ elif item["type"] == "text":
+ prompt += item["value"].strip()
+ prompt += "\n"
+ return prompt + "Assistant: "
+
+ def build_prompt_mmbench(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "\nOptions:": "\nChoices:",
+ "Please select the correct answer from the options above.": "Answer with a letter.",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ # Swap hint and question
+ if instruction.startswith("Hint:"):
+ hint, question = instruction.split("\nQuestion:")
+ question, choices = question.split("\nChoices:")
+ instruction = (
+ "Question:" + question + "\n" + hint + "\nChoices:" + choices
+ )
+ prompt += instruction
+ prompt += "\nAssistant: Answer:"
+ return prompt, images
+
+ def build_prompt_mmmu(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "Question:": "",
+ "Please select the correct answer from the options above.": "Answer with the letter.",
+ "\nOptions:": "\nChoices:",
+ }
+
+ prompt, images, img_counter = "<|im_start|>User: Question: ", [], 1
+ for msg in message:
+ if msg["type"] == "image":
+ prompt += f":\n"
+ img_counter += 1
+ img_counter = 1
+
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += f" "
+ img_counter += 1
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction.strip()
+ prompt += "\nAssistant:"
+ if "A." in prompt and "B." in prompt:
+ prompt += " Answer:"
+ return prompt, images
+
+ def build_prompt_mathvista(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "(A) ": "A. ",
+ "(B) ": "B. ",
+ "(C) ": "C. ",
+ "(D) ": "D. ",
+ "(E) ": "E. ",
+ "(F) ": "F. ",
+ "(G) ": "G. ",
+ "(H) ": "H. ",
+ "\nOptions:": "\nChoices:",
+ "Hint: ": "",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction.strip()
+
+ prompt += "\nAssistant:"
+ if "A." in prompt and "B." in prompt:
+ prompt += " Answer:"
+ return prompt, images
+
+ def build_prompt_chartqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:For the question below, follow the following instructions:\n"
+ + "-The answer should contain as few words as possible.\n"
+ + "-Don’t paraphrase or reformat the text you see in the image.\n"
+ + "-Answer a binary question with Yes or No.\n"
+ + "-When asked to give a numerical value, provide a number like 2 instead of Two.\n"
+ + "-If the final answer has two or more items, provide it in the list format like [1, 2].\n"
+ + "-When asked to give a ratio, give out the decimal value like 0.25 instead of 1:4.\n"
+ + "-When asked to give a percentage, give out the whole value like 17 instead of decimal like 0.17%.\n"
+ + "-Don’t include any units in the answer.\n"
+ + "-Do not include any full stops at the end of the answer.\n"
+ + "-Try to include the full label from the graph when asked about an entity.\n"
+ + "Question: "
+ )
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def build_prompt_docvqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:Give a short and terse answer to the following question. "
+ + "Do not paraphrase or reformat the text you see in the image. Do not include any full stops. "
+ + "Just give the answer without additional explanation. Question: "
+ )
+
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def build_prompt_textvqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:Answer the following question about the image using as few words as possible. "
+ + "Follow these additional instructions:\n"
+ + "-Always answer a binary question with Yes or No.\n"
+ + "-When asked what time it is, reply with the time seen in the image.\n"
+ + "-Do not put any full stops at the end of the answer.\n"
+ + "-Do not put quotation marks around the answer.\n"
+ + "-An answer with one or two words is favorable.\n"
+ + "-Do not apply common sense knowledge. The answer can be found in the image.\n"
+ + "Question: "
+ )
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def chat_inner(self, message, dataset=None):
+ formatted_messages, formatted_images = self.build_prompt_mt(message)
+ images = (
+ [formatted_images]
+ if isinstance(formatted_images, Image.Image)
+ else formatted_images
+ )
+
+ resulting_messages = [
+ {
+ "role": "user",
+ "content": [{"type": "image"}]
+ + [{"type": "text", "text": formatted_messages}],
+ }
+ ]
+ prompt = self.processor.apply_chat_template(
+ resulting_messages, add_generation_prompt=True
+ )
+
+ inputs = self.processor(text=prompt, images=images, return_tensors="pt")
+ inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
+
+ generated_ids = self.model.generate(**inputs, **self.kwargs)
+ generated_text = self.processor.batch_decode(
+ generated_ids[:, inputs["input_ids"].size(1):], skip_special_tokens=True
+ )[0]
+
+ return generated_text.strip()
+
+
+class SmolVLM2(BaseModel):
+ INSTALL_REQ = True
+ INTERLEAVE = True
+
+ def __init__(self, model_path="HuggingFaceTB/SmolVLM2-2.2B-Instruct", **kwargs):
+ from transformers import AutoProcessor, AutoModelForImageTextToText
+ import torch
+
+ assert osp.exists(model_path) or splitlen(model_path) == 2
+
+ self.sampling_frames = 64
+ # Set resolution based on model
+ if "SmolVLM2-2.2B" in model_path:
+ self.resolution = 384
+ elif "SmolVLM2-256M" in model_path or "SmolVLM2-500M" in model_path:
+ self.resolution = 512
+ else:
+ raise ValueError(f"Unknown model {model_path}, cannot determine resolution")
+
+ self.processor = AutoProcessor.from_pretrained(model_path)
+ self.model = AutoModelForImageTextToText.from_pretrained(
+ model_path,
+ torch_dtype=torch.float32,
+ ).to("cuda")
+
+ kwargs_default = {"max_new_tokens": 2048, "do_sample": False, "use_cache": True}
+ kwargs_default.update(kwargs)
+ self.kwargs = kwargs_default
+ warnings.warn(
+ f"Following kwargs received: {self.kwargs}, will use as generation config."
+ )
+ torch.cuda.empty_cache()
+
+ def generate_inner(self, message, dataset=None):
+ if dataset in [
+ "MMBench_DEV_EN",
+ "MMBench_TEST_EN",
+ "MMBench_DEV_CN",
+ "MMBench_TEST_CN",
+ "MMBench",
+ "MMBench_CN",
+ "MMBench_DEV_EN_V11",
+ "MMBench_DEV_CN_V11",
+ "MMBench_TEST_EN_V11",
+ "MMBench_TEST_CN_V11",
+ "MMBench_V11",
+ "MMBench_CN_V11",
+ "CCBench",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_mmbench(message)
+ elif dataset in ["MMMU_DEV_VAL", "MMMU_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_mmmu(message)
+ elif dataset in ["MathVista_MINI"]:
+ formatted_messages, formatted_images = self.build_prompt_mathvista(message)
+ elif dataset in ["ChartQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_chartqa(message)
+ elif dataset in ["DocVQA_VAL", "DocVQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_docvqa(message)
+ elif dataset in ["TextVQA_VAL", "TextVQA_TEST"]:
+ formatted_messages, formatted_images = self.build_prompt_textvqa(message)
+ elif dataset in [
+ "MME",
+ "MMVet",
+ "OCRVQA_TEST",
+ "OCRVQA_TESTCORE",
+ "InfoVQA_VAL",
+ "InfoVQA_TEST",
+ "OCRBench",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_default(
+ message, add_brief=True
+ )
+ elif dataset == "HallusionBench":
+ formatted_messages, formatted_images = self.build_prompt_default(
+ message, add_yes_or_no=True
+ )
+ elif dataset in [
+ "MMStar",
+ "SEEDBench_IMG",
+ "AI2D_TEST",
+ "ScienceQA_VAL",
+ "ScienceQA_TEST",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_puremcq(message)
+ elif dataset in [
+ "MMBench-Video",
+ "MLVU",
+ "MLVU_MCQ",
+ "MLVU_OpenEnded",
+ "TempCompass",
+ "TempCompass_MCQ",
+ "TempCompass_Captioning",
+ "TempCompass_YorN",
+ "MVBench",
+ "MVBench_MP4",
+ "Video-MME",
+ "LongVideoBench",
+ ]:
+ formatted_messages, formatted_images = self.build_prompt_video(
+ message, dataset
+ )
+ else:
+ formatted_messages, formatted_images = self.build_prompt_default(message)
+
+ # Convert to list if single image
+ images = (
+ [formatted_images]
+ if isinstance(formatted_images, Image.Image)
+ else formatted_images
+ )
+
+ # Process text and images directly
+ inputs = self.processor(
+ text=formatted_messages, images=images, return_tensors="pt"
+ ).to(self.model.device)
+
+ # Generate response
+ generated_ids = self.model.generate(**inputs, **self.kwargs)
+
+ # Decode only the new tokens, not the entire sequence
+ generated_text = self.processor.batch_decode(
+ generated_ids[:, inputs["input_ids"].size(1):], skip_special_tokens=True
+ )[0]
+
+ return generated_text.strip()
+
+ def build_prompt_default(self, message, add_brief=False, add_yes_or_no=False):
+ from transformers.image_utils import load_image
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += ""
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ if add_brief:
+ prompt += "\nGive a very brief answer."
+ if add_yes_or_no:
+ prompt += "\nAnswer yes or no."
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def read_image(self, path):
+ """Read and convert an image to RGB format"""
+ from PIL import Image
+
+ return Image.open(path).convert("RGB")
+
+ def build_prompt_video(self, message, dataset, add_timestamps=True):
+ """Build prompt for video datasets with frame sampling"""
+ import numpy as np
+ from transformers.image_utils import load_image
+ from PIL import Image
+
+ # Configure processor for video frames
+ self.processor.image_processor.size = {"longest_edge": self.resolution}
+ self.processor.image_processor.do_resize = True
+ self.processor.image_processor.do_image_splitting = False
+ self.processor.do_image_splitting = False
+ self.processor.image_size = {"longest_edge": self.resolution}
+
+ # Initialize prompt parts and image lists
+ prompt_parts = []
+ image_blocks = []
+ images = []
+
+ # Find system message first
+ system_message = next(
+ (
+ msg
+ for msg in message
+ if msg["type"] == "text" and msg.get("role") == "system"
+ ),
+ None,
+ )
+
+ # Add system message with proper format if it exists
+ if system_message:
+ prompt_parts.extend(
+ ["<|im_start|>System:", system_message["value"], "\n"]
+ )
+ else:
+ # Adding default system message
+ prompt_parts.extend(
+ [
+ "<|im_start|>System:",
+ "pay attention to the video and answer the question",
+ "\n",
+ ]
+ )
+
+ # Add User prefix
+ prompt_parts.extend(
+ ["<|im_start|>User:", "Here are some frames sampled from a video:\n"]
+ )
+
+ # Process image blocks
+ text_messages = []
+ current_block = []
+
+ for msg in message:
+ if msg["type"] == "image":
+ current_block.append(msg)
+ else:
+ if current_block:
+ image_blocks.append(current_block)
+ current_block = []
+ if (
+ msg.get("role") != "system"
+ ): # Skip system message as it's already added
+ text_messages.append(msg)
+
+ if current_block:
+ image_blocks.append(current_block)
+
+ # Process image blocks with sampling if needed
+ for block in image_blocks:
+ if len(block) > self.sampling_frames:
+ frame_indices = np.linspace(
+ 0, len(block) - 1, self.sampling_frames, dtype=int
+ ).tolist()
+ trimmed_block = [block[i] for i in frame_indices]
+ block_timestamps = [f"{i // 60:02}:{i % 60:02}" for i in frame_indices]
+ else:
+ trimmed_block = block
+ block_timestamps = [
+ f"{i // 60:02}:{i % 60:02}" for i in range(len(block))
+ ]
+
+ # Add frames with optional timestamps
+ for img, ts in zip(trimmed_block, block_timestamps):
+ ts_str = f"{ts}" if add_timestamps else ""
+ prompt_parts.extend([f"Frame from {ts_str}:", ""])
+ try:
+ images.append(load_image(img["value"]))
+ except:
+ images.append(self.read_image(img["value"]))
+ prompt_parts.append("\n")
+
+ # Add remaining text
+ for msg in text_messages:
+ prompt_parts.append(msg["value"].strip())
+
+ # Finalize prompt
+ prompt_parts.append("")
+ prompt_parts.append("\nAssistant:")
+
+ # Combine prompt parts
+ prompt = " ".join(prompt_parts)
+
+ # Format prompt based on dataset type
+ if dataset in ["MLVU_MCQ", "MLVU_OpenEnded", "LongVideoBench"]:
+ prompt = prompt.replace(
+ "Options:",
+ "respond ONLY with one of the multiple choice letter options (A/B/C/D):",
+ )
+ elif dataset in [
+ "TempCompass_MCQ",
+ "TempCompass_Captioning",
+ "TempCompass_YorN",
+ ]:
+ if dataset == "TempCompass_MCQ":
+ prompt = prompt.replace("Options:", "Choices:")
+ prompt = prompt.replace(
+ "Please select the correct answer from the options above.",
+ "Answer with the letter.",
+ )
+ elif dataset in ["MVBench", "MVBench_MP4"]:
+ if "Options:" in prompt:
+ prompt = prompt.replace(
+ "Options:",
+ "respond ONLY with one of the multiple choice letter options (A/B/C/D):",
+ )
+ prompt = prompt.replace("Best option:(", "Answer:")
+ elif dataset in ["Video-MME"]:
+ if "Options:" in prompt:
+ prompt = prompt.replace("Options:", "Choices:")
+ prompt = prompt.replace(
+ "Please select the correct answer from the options above.",
+ "Answer with the letter.",
+ )
+ elif dataset in ["MLVU", "MMBench-Video", "TempCompass"]:
+ # Generic handling for MLVU, TempCompass, MMBench-Video dataset
+ pass
+ else:
+ print(f"Warning: No specific formatting for {dataset}, using default")
+
+ return prompt, images
+
+ def build_prompt_puremcq(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "\nOptions:": "\nChoices:",
+ "Please select the correct answer from the options above.": "Answer with the letter.",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += ""
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction
+ prompt += "\nAssistant: Answer:"
+ return prompt, images
+
+ def build_prompt_mt(self, message):
+ from transformers.image_utils import load_image
+
+ prompt, images = "", []
+ for msg in message:
+ if msg["role"] == "user":
+ prompt += "User: "
+ elif msg["role"] == "assistant":
+ prompt += "Assistant: "
+ for item in msg["content"]:
+ if item["type"] == "image":
+ img = load_image(item["value"])
+ images.append(img)
+ elif item["type"] == "text":
+ prompt += item["value"].strip()
+ prompt += "\n"
+ return prompt + "Assistant: "
+
+ def build_prompt_mmbench(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "\nOptions:": "\nChoices:",
+ "Please select the correct answer from the options above.": "Answer with a letter.",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ # Swap hint and question
+ if instruction.startswith("Hint:"):
+ hint, question = instruction.split("\nQuestion:")
+ question, choices = question.split("\nChoices:")
+ instruction = (
+ "Question:" + question + "\n" + hint + "\nChoices:" + choices
+ )
+ prompt += instruction
+ prompt += "\nAssistant: Answer:"
+ return prompt, images
+
+ def build_prompt_mmmu(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "Question:": "",
+ "Please select the correct answer from the options above.": "Answer with the letter.",
+ "\nOptions:": "\nChoices:",
+ }
+
+ prompt, images, img_counter = "<|im_start|>User: Question: ", [], 1
+ for msg in message:
+ if msg["type"] == "image":
+ prompt += f":\n"
+ img_counter += 1
+ img_counter = 1
+
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ prompt += f" "
+ img_counter += 1
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction.strip()
+ prompt += "\nAssistant:"
+ if "A." in prompt and "B." in prompt:
+ prompt += " Answer:"
+ return prompt, images
+
+ def build_prompt_mathvista(self, message):
+ from transformers.image_utils import load_image
+
+ replace_mapping = {
+ "(A) ": "A. ",
+ "(B) ": "B. ",
+ "(C) ": "C. ",
+ "(D) ": "D. ",
+ "(E) ": "E. ",
+ "(F) ": "F. ",
+ "(G) ": "G. ",
+ "(H) ": "H. ",
+ "\nOptions:": "\nChoices:",
+ "Hint: ": "",
+ }
+
+ prompt, images = "<|im_start|>User:", []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ instruction = msg["value"].strip()
+ for k, v in replace_mapping.items():
+ instruction = instruction.replace(k, v)
+ prompt += instruction.strip()
+
+ prompt += "\nAssistant:"
+ if "A." in prompt and "B." in prompt:
+ prompt += " Answer:"
+ return prompt, images
+
+ def build_prompt_chartqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:For the question below, follow the following instructions:\n"
+ + "-The answer should contain as few words as possible.\n"
+ + "-Don't paraphrase or reformat the text you see in the image.\n"
+ + "-Answer a binary question with Yes or No.\n"
+ + "-When asked to give a numerical value, provide a number like 2 instead of Two.\n"
+ + "-If the final answer has two or more items, provide it in the list format like [1, 2].\n"
+ + "-When asked to give a ratio, give out the decimal value like 0.25 instead of 1:4.\n"
+ + "-When asked to give a percentage, give out the whole value like 17 instead of decimal like 0.17%.\n"
+ + "-Don't include any units in the answer.\n"
+ + "-Do not include any full stops at the end of the answer.\n"
+ + "-Try to include the full label from the graph when asked about an entity.\n"
+ + "Question: "
+ )
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def build_prompt_docvqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:Give a short and terse answer to the following question. "
+ + "Do not paraphrase or reformat the text you see in the image. Do not include any full stops. "
+ + "Just give the answer without additional explanation. Question: "
+ )
+
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def build_prompt_textvqa(self, message):
+ from transformers.image_utils import load_image
+
+ prompt = (
+ "<|im_start|>User:Answer the following question about the image using as few words as possible. "
+ + "Follow these additional instructions:\n"
+ + "-Always answer a binary question with Yes or No.\n"
+ + "-When asked what time it is, reply with the time seen in the image.\n"
+ + "-Do not put any full stops at the end of the answer.\n"
+ + "-Do not put quotation marks around the answer.\n"
+ + "-An answer with one or two words is favorable.\n"
+ + "-Do not apply common sense knowledge. The answer can be found in the image.\n"
+ + "Question: "
+ )
+ images = []
+ for msg in message:
+ if msg["type"] == "image":
+ img = load_image(msg["value"])
+ images.append(img)
+ elif msg["type"] == "text":
+ prompt += msg["value"].strip()
+ prompt += "\nAssistant:"
+ return prompt, images
+
+ def chat_inner(self, message, dataset=None):
+ # Use the same build_prompt_mt method as in SmolVLM
+ formatted_messages, formatted_images = self.build_prompt_mt(message)
+ images = (
+ [formatted_images]
+ if isinstance(formatted_images, Image.Image)
+ else formatted_images
+ )
+
+ # Process text and images directly
+ inputs = self.processor(
+ text=formatted_messages, images=images, return_tensors="pt"
+ ).to(self.model.device)
+
+ # Generate response
+ generated_ids = self.model.generate(**inputs, **self.kwargs)
+
+ # Decode only the new tokens, not the entire sequence
+ generated_text = self.processor.batch_decode(
+ generated_ids[:, inputs["input_ids"].size(1):], skip_special_tokens=True
+ )[0]
+
+ return generated_text.strip()
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/transcore_m.py b/VLMEvalKit-sudoku/vlmeval/vlm/transcore_m.py
new file mode 100644
index 0000000000000000000000000000000000000000..b35250e200fb734af104de5e6d86b74a132992fa
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/transcore_m.py
@@ -0,0 +1,162 @@
+import sys
+import torch
+from abc import abstractproperty
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE
+from transformers import AutoTokenizer, BitsAndBytesConfig
+
+
+class TransCoreM(BaseModel):
+
+ INSTALL_REQ = True
+ INTERLEAVE = False
+
+ def load_pretrained_model(self, model_path, load_8bit=False, load_4bit=False, revision='main'):
+ from transcorem.model import TransCoreMQWenForCausalLM
+ from transcorem.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+ import transcorem.config_param as config_param
+ kwargs = {'revision': revision}
+ if load_8bit:
+ kwargs['load_in_8bit'] = True
+ elif load_4bit:
+ kwargs['load_in_4bit'] = True
+ kwargs['quantization_config'] = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4'
+ )
+ else:
+ kwargs['torch_dtype'] = torch.float16
+
+ config_param.model_path = model_path
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_path, use_fast=False, revision=revision, trust_remote_code=True)
+ model = TransCoreMQWenForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
+
+ image_processor = None
+ mm_use_im_start_end = getattr(model.config, 'mm_use_im_start_end', False)
+ mm_use_im_patch_token = getattr(model.config, 'mm_use_im_patch_token', True)
+ if mm_use_im_patch_token:
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
+ if mm_use_im_start_end:
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
+ model.resize_token_embeddings(len(tokenizer))
+
+ vision_tower = model.get_vision_tower()
+ if not vision_tower.is_loaded:
+ vision_tower.load_model()
+ vision_tower.to(device='cuda', dtype=torch.float16)
+ image_processor = vision_tower.image_processor
+
+ if hasattr(model.config, 'max_sequence_length'):
+ context_len = model.config.max_sequence_length
+ else:
+ context_len = 2048
+
+ return tokenizer, model, image_processor, context_len
+
+ def __init__(self,
+ root=None,
+ revision='main',
+ **kwargs):
+
+ self.root = root
+ self.revision = revision
+ sys.path.append(root)
+
+ model_path = 'PCIResearch/TransCore-M'
+ assert osp.exists(model_path) or splitlen(model_path) == 2
+ self.tokenizer, self.model, self.image_processor, self.context_len = self.load_pretrained_model(
+ model_path=model_path, revision=revision)
+ self.model = self.model.cuda()
+ print('==============conv_mode: transcorem_v1')
+ self.conv_mode = 'transcorem_v1'
+
+ kwargs_default = dict(do_sample=False, temperature=0.0, max_new_tokens=512, top_p=None, num_beams=1)
+ kwargs_default.update(kwargs)
+ self.kwargs = kwargs_default
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+
+ def use_custom_prompt(self, dataset):
+ assert dataset is not None
+ if DATASET_TYPE(dataset) == 'MCQ':
+ return True
+ return False
+
+ def build_prompt(self, line, dataset=None):
+ assert dataset is None or isinstance(dataset, str)
+ assert self.use_custom_prompt(dataset)
+ tgt_path = self.dump_image(line, dataset)
+
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if len(options):
+ prompt += (
+ '\n请直接回答选项字母。' if cn_string(prompt) else
+ "\nAnswer with the option's letter from the given choices directly."
+ )
+ else:
+ prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.'
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=f) for f in tgt_path])
+ return message
+
+ def generate_inner(self, message, dataset=None):
+ from transcorem.mm_utils import highres_process_images, tokenizer_image_token, KeywordsStoppingCriteria
+ from transcorem.constants import (
+ IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN)
+ from transcorem.conversation import conv_templates, SeparatorStyle
+
+ prompt, image_path = self.message_to_promptimg(message, dataset=dataset)
+ image = Image.open(image_path).convert('RGB')
+ args = abstractproperty()
+ args.image_aspect_ratio = 'pad'
+ image_patches = highres_process_images(image, self.image_processor, args, base_reso=336)
+ image_patches = [patch.unsqueeze(0).to('cuda', dtype=torch.float16) for patch in image_patches]
+ if self.model.config.mm_use_im_start_end:
+ inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + prompt
+ else:
+ inp = DEFAULT_IMAGE_TOKEN + '\n' + prompt
+
+ conv = conv_templates[self.conv_mode].copy()
+ conv.append_message(conv.roles[0], inp)
+ conv.append_message(conv.roles[1], None)
+ prompt_conv = conv.get_prompt()
+ input_ids = tokenizer_image_token(prompt_conv, self.tokenizer, IMAGE_TOKEN_INDEX,
+ return_tensors='pt').unsqueeze(0).cuda()
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
+ keywords = [stop_str]
+ stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)
+ with torch.inference_mode():
+ output_ids = self.model.generate(
+ input_ids,
+ images=image_patches,
+ use_cache=True,
+ stopping_criteria=[stopping_criteria],
+ **self.kwargs)
+
+ input_token_len = input_ids.shape[1]
+ n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
+ if n_diff_input_output > 0:
+ print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
+ outputs = self.tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
+ outputs = outputs.strip()
+ if outputs.endswith(stop_str):
+ outputs = outputs[:-len(stop_str)]
+ outputs = outputs.strip()
+ return outputs
diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/vintern_chat.py b/VLMEvalKit-sudoku/vlmeval/vlm/vintern_chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..f53d82f25cf5e0dfa8b8421c42bcddef0e40f5f0
--- /dev/null
+++ b/VLMEvalKit-sudoku/vlmeval/vlm/vintern_chat.py
@@ -0,0 +1,395 @@
+import torch
+from transformers import AutoTokenizer, AutoConfig, AutoModel, CLIPImageProcessor
+import warnings
+from PIL import Image
+from .base import BaseModel
+from ..smp import *
+from ..dataset import DATASET_TYPE, DATASET_MODALITY
+import pandas as pd
+import string
+import torch.distributed as dist
+import torchvision.transforms as T
+import transformers
+
+from torchvision.transforms.functional import InterpolationMode
+import re
+
+
+IMAGENET_MEAN = (0.485, 0.456, 0.406)
+IMAGENET_STD = (0.229, 0.224, 0.225)
+
+
+def build_transform(input_size):
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
+ transform = T.Compose([
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
+ T.ToTensor(),
+ T.Normalize(mean=MEAN, std=STD)
+ ])
+ return transform
+
+
+def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
+ best_ratio_diff = float('inf')
+ best_ratio = (1, 1)
+ area = width * height
+ for ratio in target_ratios:
+ target_aspect_ratio = ratio[0] / ratio[1]
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
+ if ratio_diff < best_ratio_diff:
+ best_ratio_diff = ratio_diff
+ best_ratio = ratio
+ elif ratio_diff == best_ratio_diff:
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
+ best_ratio = ratio
+ return best_ratio
+
+
+def dynamic_preprocess(image, min_num=1, max_num=4, image_size=448, use_thumbnail=False):
+ orig_width, orig_height = image.size
+ aspect_ratio = orig_width / orig_height
+
+ # calculate the existing image aspect ratio
+ target_ratios = set(
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
+ i * j <= max_num and i * j >= min_num)
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
+
+ # find the closest aspect ratio to the target
+ target_aspect_ratio = find_closest_aspect_ratio(
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
+
+ # calculate the target width and height
+ target_width = image_size * target_aspect_ratio[0]
+ target_height = image_size * target_aspect_ratio[1]
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
+
+ # resize the image
+ resized_img = image.resize((target_width, target_height))
+ processed_images = []
+ for i in range(blocks):
+ box = (
+ (i % (target_width // image_size)) * image_size,
+ (i // (target_width // image_size)) * image_size,
+ ((i % (target_width // image_size)) + 1) * image_size,
+ ((i // (target_width // image_size)) + 1) * image_size
+ )
+ # split the image
+ split_img = resized_img.crop(box)
+ processed_images.append(split_img)
+ assert len(processed_images) == blocks
+ if use_thumbnail and len(processed_images) != 1:
+ thumbnail_img = image.resize((image_size, image_size))
+ processed_images.append(thumbnail_img)
+ return processed_images
+
+
+def load_image(image_file, input_size=448, max_num=6, upscale=False):
+ image = Image.open(image_file).convert('RGB')
+ if upscale:
+ image = image.resize((image.width * 2, image.height * 2), Image.BILINEAR)
+ transform = build_transform(input_size=input_size)
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
+ pixel_values = [transform(image) for image in images]
+ pixel_values = torch.stack(pixel_values)
+ return pixel_values
+
+
+class VinternChat(BaseModel):
+
+ INSTALL_REQ = False
+ INTERLEAVE = True
+
+ def __init__(self, model_path='5CD-AI/Vintern-3B-beta', load_in_8bit=False, **kwargs):
+ assert model_path is not None
+ assert version_cmp(transformers.__version__, '4.36.2', 'ge')
+
+ self.model_path = model_path
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
+
+ # Regular expression to match the pattern 'Image' followed by a number, e.g. Image1
+ self.pattern = r'Image(\d+)'
+ # Replacement pattern to insert a hyphen between 'Image' and the number, e.g. Image-1
+ self.replacement = r'Image-\1'
+
+ # Convert InternVL2 response to dataset format
+ # e.g. Image1 -> Image-1
+
+ # Regular expression to match the pattern 'Image-' followed by a number
+ self.reverse_pattern = r'Image-(\d+)'
+ # Replacement pattern to remove the hyphen (Image-1 -> Image1)
+ self.reverse_replacement = r'Image\1'
+
+ device = torch.cuda.current_device()
+ self.device = device
+ self.model = AutoModel.from_pretrained(
+ model_path,
+ torch_dtype=torch.bfloat16,
+ trust_remote_code=True,
+ load_in_8bit=load_in_8bit).eval()
+ if not load_in_8bit:
+ self.model = self.model.to(device)
+
+ self.image_size = self.model.config.vision_config.image_size
+ kwargs_default = dict(do_sample=False, max_new_tokens=1024, top_p=None, num_beams=3)
+ kwargs_default.update(kwargs)
+ self.kwargs = kwargs_default
+
+ warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ')
+
+ def use_custom_prompt(self, dataset):
+ if dataset is None:
+ return False
+ if listinstr(['MMDU', 'MME-RealWorld', 'MME-RealWorld-CN'], dataset):
+ # For Multi-Turn we don't have custom prompt
+ return False
+ if DATASET_MODALITY(dataset) == 'VIDEO':
+ # For Video benchmarks we don't have custom prompt at here
+ return False
+ else:
+ return True
+
+ def build_multi_choice_prompt(self, line, dataset=None):
+ question = line['question']
+ hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None
+ if hint is not None:
+ question = hint + '\n' + question
+
+ options = {
+ cand: line[cand]
+ for cand in string.ascii_uppercase
+ if cand in line and not pd.isna(line[cand])
+ }
+ for key, item in options.items():
+ question += f'\n{key}. {item}'
+ prompt = question
+
+ if len(options):
+ prompt += '\n请直接回答选项字母。' if cn_string(
+ prompt) else "\nAnswer with the option's letter from the given choices directly."
+ else:
+ prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.'
+
+ return prompt
+
+ def build_video_prompt(self, prompt, dataset=None, max_frames=64):
+ for start in range(0, max_frames, 8):
+ images_to_remove = ''.join([f'' for i in range(start + 1, start + 9)])
+ prompt = prompt.replace(images_to_remove, '')
+ for i in range(max_frames):
+ prompt = prompt.replace(f'Image-{i + 1}', f'Frame-{i + 1}')
+ if listinstr(['MMBench-Video'], dataset):
+ prompt = prompt.replace('\nAnswer:', '')
+ elif listinstr(['Video-MME'], dataset):
+ prompt = prompt.replace('\nAnswer:', '')
+ prompt += "\nAnswer with the option's letter from the given choices directly."
+ elif listinstr(['MVBench'], dataset):
+ prompt = prompt.replace('Best option:(', '')
+
+ return prompt
+
+ def build_prompt(self, line, dataset=None):
+ assert self.use_custom_prompt(dataset)
+ assert dataset is None or isinstance(dataset, str)
+ tgt_path = self.dump_image(line, dataset)
+
+ kwargs_default = dict(do_sample=False, max_new_tokens=1024, top_p=None, num_beams=3)
+
+ if listinstr(['MTVQA'], dataset):
+ kwargs_default["max_new_tokens"] = 256
+
+ if listinstr(['MMMU_DEV_VAL','MMMU_TEST'], dataset):
+ kwargs_default["num_beams"] = 1
+
+ self.kwargs = kwargs_default
+
+ if dataset is not None and DATASET_TYPE(dataset) == 'Y/N':
+ question = line['question']
+ if listinstr(['MME'], dataset):
+ prompt = question + ' Answer the question using a single word or phrase.'
+ elif listinstr(['HallusionBench'], dataset):
+ prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.'
+ else:
+ prompt = line['question']
+ elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ':
+ prompt = self.build_multi_choice_prompt(line, dataset)
+ elif dataset is not None and DATASET_TYPE(dataset) == 'VQA':
+ question = line['question']
+ if listinstr(['MathVista', 'MathVision', 'VCR', 'MTVQA', 'MMVet', 'MathVerse'], dataset):
+ prompt = question
+ elif listinstr(['LLaVABench'], dataset):
+ prompt = question + '\nAnswer this question in detail.'
+ else:
+ prompt = question + '\nAnswer the question using a single word or phrase.'
+ else:
+ prompt = line['question']
+ message = [dict(type='text', value=prompt)]
+ message.extend([dict(type='image', value=s) for s in tgt_path])
+ return message
+
+ def set_max_num(self, dataset):
+ if dataset is None:
+ self.max_num = 1
+ return
+
+ # res_1_datasets = ['MMBench-Video', 'Video-MME', 'MVBench', 'Video']
+ res_12_datasets = ['ChartQA_TEST', 'MMMU_DEV_VAL', 'MMMU_TEST', 'MME-RealWorld',
+ 'MME-RealWorld', 'VCR_EN', 'VCR_ZH']
+ res_18_datasets = ['DocVQA_VAL', 'DocVQA_TEST']
+ res_24_datasets = ['InfoVQA_VAL', 'InfoVQA_TEST', 'OCRBench', 'HRBench4K', 'HRBench8K']
+ if DATASET_MODALITY(dataset) == 'VIDEO':
+ self.max_num = 1
+ elif listinstr(res_12_datasets, dataset):
+ self.max_num = 6 # 12
+ elif listinstr(res_18_datasets, dataset):
+ self.max_num = 6 # 18
+ elif listinstr(res_24_datasets, dataset):
+ self.max_num = 6 # 24
+ elif listinstr(["MME"], dataset):
+ self.max_num = 6 # 24
+ else:
+ self.max_num = 6 # 6
+
+ def generate_v2(self, message, dataset=None):
+ image_num = len([x for x in message if x['type'] == 'image'])
+ if image_num == 1:
+ prompt = '\n' + '\n'.join([x['value'] for x in message if x['type'] == 'text'])
+ else:
+ prompt, image_idx = '', 1
+ for x in message:
+ if x['type'] == 'text':
+ prompt += x['value']
+ elif x['type'] == 'image':
+ prompt += f''
+ image_idx += 1
+ prompt = '\n'.join([f'Image-{i + 1}: ' for i in range(image_num)]) + '\n' + prompt
+
+ if dataset is not None and DATASET_MODALITY(dataset) == 'VIDEO':
+ prompt = self.build_video_prompt(prompt, dataset)
+
+ if image_num > 1:
+ image_path = [x['value'] for x in message if x['type'] == 'image']
+ num_patches_list = []
+ pixel_values_list = []
+ for image_idx, file_name in enumerate(image_path):
+ upscale_flag = image_idx == 0 and dataset is not None and listinstr(['MMMU_DEV_VAL'], dataset)
+ curr_pixel_values = load_image(
+ file_name, max_num=self.max_num, upscale=upscale_flag).to(self.device).to(torch.bfloat16)
+ num_patches_list.append(curr_pixel_values.size(0))
+ pixel_values_list.append(curr_pixel_values)
+ pixel_values = torch.cat(pixel_values_list, dim=0)
+ elif image_num == 1:
+ image_path = [x['value'] for x in message if x['type'] == 'image'][0]
+ upscale_flag = dataset is not None and listinstr(['MMMU_DEV_VAL'], dataset)
+ pixel_values = load_image(
+ image_path, max_num=self.max_num, upscale=upscale_flag).to(self.device).to(torch.bfloat16)
+ num_patches_list = [pixel_values.size(0)]
+ else:
+ pixel_values = None
+ num_patches_list = []
+
+ with torch.no_grad():
+ response = self.model.chat(
+ self.tokenizer,
+ pixel_values=pixel_values,
+ num_patches_list=num_patches_list,
+ question=prompt,
+ generation_config=self.kwargs,
+ verbose=False
+ )
+ return response
+
+ def generate_inner(self, message, dataset=None):
+ self.set_max_num(dataset)
+ return self.generate_v2(message, dataset)
+
+ def build_history(self, message):
+ # Global Variables
+ image_path = []
+ image_cnt = 0
+
+ def concat_tilist(tilist):
+ nonlocal image_cnt # Declare image_cnt as nonlocal to modify it
+ prompt = ''
+ for item in tilist:
+ # Substitute the pattern in the text
+ if item['type'] == 'text':
+ prompt += re.sub(self.pattern, self.replacement, item['value'])
+ elif item['type'] == 'image':
+ image_cnt += 1
+ prompt += '\n'
+ image_path.append(item['value'])
+ return prompt
+
+ # Only previous messages
+ assert len(message) % 2 == 0
+ history = []
+ for i in range(len(message) // 2):
+ m1, m2 = message[2 * i], message[2 * i + 1]
+ assert m1['role'] == 'user' and m2['role'] == 'assistant'
+ history.append((concat_tilist(m1['content']), concat_tilist(m2['content'])))
+
+ return history, image_path, image_cnt
+
+ def chat_inner_v2(self, message, dataset=None):
+
+ image_cnt = 0
+ if len(message) > 1:
+ history, image_path, image_cnt = self.build_history(message[:-1])
+ else:
+ history, image_path, image_cnt = None, [], 1
+ current_msg = message[-1]
+ question = ''
+
+ # If message is just text in the conversation
+ if len(current_msg['content']) == 1 and current_msg['content'][0]['type'] == 'text':
+ question = current_msg['content'][0]['value']
+ question = re.sub(self.pattern, self.replacement, question) # Fix pattern as per InternVL
+ else:
+ for msg in current_msg['content']:
+ if msg['type'] == 'text':
+ question += re.sub(self.pattern, self.replacement, msg['value'])
+ elif msg['type'] == 'image':
+ image_cnt += 1
+ question += '\n'
+ image_path.append(msg['value'])
+
+ if image_cnt > 1:
+ num_patches_list = []
+ pixel_values_list = []
+ for image_idx, file_name in enumerate(image_path):
+ upscale_flag = image_idx == 0 and dataset is not None and listinstr(['MMMU_DEV_VAL'], dataset)
+ curr_pixel_values = load_image(
+ file_name, max_num=1, upscale=upscale_flag).to(self.device).to(torch.bfloat16)
+ num_patches_list.append(curr_pixel_values.size(0))
+ pixel_values_list.append(curr_pixel_values)
+ pixel_values = torch.cat(pixel_values_list, dim=0)
+ elif image_cnt == 1:
+ upscale_flag = dataset is not None and listinstr(['MMMU_DEV_VAL'], dataset)
+ pixel_values = load_image(
+ image_path, max_num=self.max_num, upscale=upscale_flag).to(self.device).to(torch.bfloat16)
+ num_patches_list = [pixel_values.size(0)]
+ else:
+ pixel_values = None
+ num_patches_list = []
+
+ response, history = self.model.chat(
+ self.tokenizer,
+ pixel_values=pixel_values,
+ num_patches_list=num_patches_list,
+ question=question,
+ generation_config=self.kwargs,
+ history=history,
+ return_history=True
+ )
+
+ response = re.sub(self.reverse_pattern, self.reverse_replacement, response)
+
+ return response
+
+ def chat_inner(self, message, dataset=None):
+ self.set_max_num(dataset)
+ kwargs_default = dict(do_sample=False, max_new_tokens=512, top_p=None, num_beams=3)
+ self.kwargs = kwargs_default
+ return self.chat_inner_v2(message, dataset)
diff --git a/eval_results/GNE_ShapeGrid_sudoku.xlsx b/eval_results/GNE_ShapeGrid_sudoku.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..2c2aae5205ca9959f8d94d85aa07ceed9cdc087c
--- /dev/null
+++ b/eval_results/GNE_ShapeGrid_sudoku.xlsx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c41e1a36f0b9501cc3d84776ee72b00152e48af95237baf284346214b76f783c
+size 112271
diff --git a/eval_results/MiniCPM-o-2_6_appendix_sudoku_.xlsx b/eval_results/MiniCPM-o-2_6_appendix_sudoku_.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..fe7884c6b1a52b76c60657eca78bd3ff07da6b34
Binary files /dev/null and b/eval_results/MiniCPM-o-2_6_appendix_sudoku_.xlsx differ
diff --git a/eval_results/Qwen2.5-VL-7B-Instruct_appendix_sudoku.xlsx b/eval_results/Qwen2.5-VL-7B-Instruct_appendix_sudoku.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..0be9248374c8a2a38df3b9c36bf46b2ab453621e
Binary files /dev/null and b/eval_results/Qwen2.5-VL-7B-Instruct_appendix_sudoku.xlsx differ
diff --git a/figs/appendix_sudoku.png b/figs/appendix_sudoku.png
new file mode 100644
index 0000000000000000000000000000000000000000..4a39a56fa3513995651feea6fb093e3c913c2131
--- /dev/null
+++ b/figs/appendix_sudoku.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:825e85aee6e452808cb8dedac2eca3c9a63a80a4eebfba1c7f992bc70dc845c6
+size 39418
diff --git a/figs/sudoku_result.png b/figs/sudoku_result.png
new file mode 100644
index 0000000000000000000000000000000000000000..703905ec1078b12fb1587fd071e1ea9d6f9d68e2
--- /dev/null
+++ b/figs/sudoku_result.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aebb652c5f1e0e1c0a03351af56718447618686b9caad836e278c01554fd21be
+size 350198