Add files using upload-large-folder tool
Browse files- VLMEvalKit-sudoku/docs/en/ConfigSystem.md +67 -0
- VLMEvalKit-sudoku/docs/en/Makefile +20 -0
- VLMEvalKit-sudoku/docs/en/_static/image/logo_icon.svg +31 -0
- VLMEvalKit-sudoku/docs/en/_static/js/custom.js +10 -0
- VLMEvalKit-sudoku/docs/en/index.rst +41 -0
- VLMEvalKit-sudoku/docs/zh-CN/Quickstart.md +216 -0
- VLMEvalKit-sudoku/docs/zh-CN/_templates/404.html +18 -0
- VLMEvalKit-sudoku/docs/zh-CN/_templates/callable.rst +14 -0
- VLMEvalKit-sudoku/llava/__init__.py +1 -0
- VLMEvalKit-sudoku/llava/__pycache__/utils.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/eval/evaluate_interleave.py +339 -0
- VLMEvalKit-sudoku/llava/eval/run_llava.py +145 -0
- VLMEvalKit-sudoku/llava/eval/summarize_gpt_review.py +60 -0
- VLMEvalKit-sudoku/llava/model/__pycache__/llava_arch.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/language_model/llava_mistral.py +127 -0
- VLMEvalKit-sudoku/llava/model/language_model/llava_mpt.py +105 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_siglip2_ps8.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/__init__.py +9 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/eva_vit_model.py +571 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_configs.py +57 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model.py +429 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json +24 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-B-16.json +29 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14.json +29 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json +25 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/modified_resnet.py +179 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/pretrained.py +314 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/rope.py +131 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transform.py +104 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transformer.py +683 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/utils.py +321 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_vit.py +141 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/factory.py +60 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B-plus.json +27 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-B-16.json +19 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json +24 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json +25 -0
- VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json +25 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_projector/resampler.py +273 -0
- VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/llava/model/multimodal_resampler/builder.py +34 -0
- VLMEvalKit-sudoku/llava/model/multimodal_resampler/masked_drop.py +80 -0
- VLMEvalKit-sudoku/llava/slice_process.py +281 -0
- VLMEvalKit-sudoku/scripts/srun.sh +3 -0
- VLMEvalKit-sudoku/setup.py +122 -0
- VLMEvalKit-sudoku/vlmeval/api/__pycache__/bailingmm.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/api/__pycache__/jt_vl_chat.cpython-310.pyc +0 -0
- VLMEvalKit-sudoku/vlmeval/api/__pycache__/kimivl_api.cpython-310.pyc +0 -0
VLMEvalKit-sudoku/docs/en/ConfigSystem.md
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Config System
|
| 2 |
+
|
| 3 |
+
By default, VLMEvalKit launches the evaluation by setting the model name(s) (defined in `/vlmeval/config.py`) and dataset name(s) (defined in `vlmeval/dataset/__init__.py` or `vlmeval/dataset/video_dataset_config.py`) in the `run.py` script with the `--model` and `--data` arguments. Such approach is simple and efficient in most scenarios, however, it may not be flexible enough when the user wants to evaluate multiple models / datasets with different settings.
|
| 4 |
+
|
| 5 |
+
To address this, VLMEvalKit provides a more flexible config system. The user can specify the model and dataset settings in a json file, and pass the path to the config file to the `run.py` script with the `--config` argument. Here is a sample config json:
|
| 6 |
+
|
| 7 |
+
```json
|
| 8 |
+
{
|
| 9 |
+
"model": {
|
| 10 |
+
"GPT4o_20240806_T00_HIGH": {
|
| 11 |
+
"class": "GPT4V",
|
| 12 |
+
"model": "gpt-4o-2024-08-06",
|
| 13 |
+
"temperature": 0,
|
| 14 |
+
"img_detail": "high"
|
| 15 |
+
},
|
| 16 |
+
"GPT4o_20240806_T10_Low": {
|
| 17 |
+
"class": "GPT4V",
|
| 18 |
+
"model": "gpt-4o-2024-08-06",
|
| 19 |
+
"temperature": 1.0,
|
| 20 |
+
"img_detail": "low"
|
| 21 |
+
},
|
| 22 |
+
"GPT4o_20241120": {}
|
| 23 |
+
},
|
| 24 |
+
"data": {
|
| 25 |
+
"MME-RealWorld-Lite": {
|
| 26 |
+
"class": "MMERealWorld",
|
| 27 |
+
"dataset": "MME-RealWorld-Lite"
|
| 28 |
+
},
|
| 29 |
+
"MMBench_DEV_EN_V11": {
|
| 30 |
+
"class": "ImageMCQDataset",
|
| 31 |
+
"dataset": "MMBench_DEV_EN_V11"
|
| 32 |
+
},
|
| 33 |
+
"MMBench_Video_8frame_nopack":{},
|
| 34 |
+
"Video-MME_16frame_subs": {
|
| 35 |
+
"class": "VideoMME",
|
| 36 |
+
"dataset": "Video-MME",
|
| 37 |
+
"nframe": 16,
|
| 38 |
+
"use_subtitle": true
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
Explanation of the config json:
|
| 45 |
+
|
| 46 |
+
1. Now we support two fields: `model` and `data`, each of which is a dictionary. The key of the dictionary is the name of the model / dataset (set by the user), and the value is the setting of the model / dataset.
|
| 47 |
+
2. For items in `model`, the value is a dictionary containing the following keys:
|
| 48 |
+
- `class`: The class name of the model, which should be a class name defined in `vlmeval/vlm/__init__.py` (open-source models) or `vlmeval/api/__init__.py` (API models).
|
| 49 |
+
- Other kwargs: Other kwargs are model-specific parameters, please refer to the definition of the model class for detailed usage. For example, `model`, `temperature`, `img_detail` are arguments of the `GPT4V` class. It's noteworthy that the `model` argument is required by most model classes.
|
| 50 |
+
- Tip: The defined model in the `supported_VLM` of `vlmeval/config.py` can be used as a shortcut, for example, `GPT4o_20241120: {}` is equivalent to `GPT4o_20241120: {'class': 'GPT4V', 'model': 'gpt-4o-2024-11-20', 'temperature': 0, 'img_size': -1, 'img_detail': 'high', 'retry': 10, 'verbose': False}`
|
| 51 |
+
3. For the dictionary `data`, we suggest users to use the official dataset name as the key (or part of the key), since we frequently determine the post-processing / judging settings based on the dataset name. For items in `data`, the value is a dictionary containing the following keys:
|
| 52 |
+
- `class`: The class name of the dataset, which should be a class name defined in `vlmeval/dataset/__init__.py`.
|
| 53 |
+
- Other kwargs: Other kwargs are dataset-specific parameters, please refer to the definition of the dataset class for detailed usage. Typically, the `dataset` argument is required by most dataset classes. It's noteworthy that the `nframe` argument or `fps` argument is required by most video dataset classes.
|
| 54 |
+
- Tip: The defined dataset in the `supported_video_datasets` of `vlmeval/dataset/video_dataset_config.py` can be used as a shortcut, for example, `MMBench_Video_8frame_nopack: {}` is equivalent to `MMBench_Video_8frame_nopack: {'class': 'MMBenchVideo', 'dataset': 'MMBench-Video', 'nframe': 8, 'pack': False}`.
|
| 55 |
+
Saving the example config json to `config.json`, you can launch the evaluation by:
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
python run.py --config config.json
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
That will generate the following output files under the working directory `$WORK_DIR` (Following the format `{$WORK_DIR}/{$MODEL_NAME}/{$MODEL_NAME}_{$DATASET_NAME}_*`):
|
| 62 |
+
|
| 63 |
+
- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MME-RealWorld-Lite*`
|
| 64 |
+
- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MME-RealWorld-Lite*`
|
| 65 |
+
- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MMBench_DEV_EN_V11*`
|
| 66 |
+
- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MMBench_DEV_EN_V11*`
|
| 67 |
+
...
|
VLMEvalKit-sudoku/docs/en/Makefile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Minimal makefile for Sphinx documentation
|
| 2 |
+
#
|
| 3 |
+
|
| 4 |
+
# You can set these variables from the command line, and also
|
| 5 |
+
# from the environment for the first two.
|
| 6 |
+
SPHINXOPTS ?=
|
| 7 |
+
SPHINXBUILD ?= sphinx-build
|
| 8 |
+
SOURCEDIR = .
|
| 9 |
+
BUILDDIR = _build
|
| 10 |
+
|
| 11 |
+
# Put it first so that "make" without argument is like "make help".
|
| 12 |
+
help:
|
| 13 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
| 14 |
+
|
| 15 |
+
.PHONY: help Makefile
|
| 16 |
+
|
| 17 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
| 18 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
| 19 |
+
%: Makefile
|
| 20 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
VLMEvalKit-sudoku/docs/en/_static/image/logo_icon.svg
ADDED
|
|
VLMEvalKit-sudoku/docs/en/_static/js/custom.js
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
var collapsedSections = [];
|
| 2 |
+
|
| 3 |
+
$(document).ready(function () {
|
| 4 |
+
$('.model-summary').DataTable({
|
| 5 |
+
"stateSave": false,
|
| 6 |
+
"lengthChange": false,
|
| 7 |
+
"pageLength": 20,
|
| 8 |
+
"order": []
|
| 9 |
+
});
|
| 10 |
+
});
|
VLMEvalKit-sudoku/docs/en/index.rst
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Welcome to the VLMEvalKit Tutorial!
|
| 2 |
+
==========================================
|
| 3 |
+
|
| 4 |
+
VLMEvalKit Getting Started Guide
|
| 5 |
+
-------------------------------
|
| 6 |
+
|
| 7 |
+
To help users get started quickly, we recommend the following process:
|
| 8 |
+
|
| 9 |
+
- For users who want to use VLMEvalKit, we recommend reading the "Start Your First Step" section to set up the environment and start a mini-experiment to familiarize yourself with the process.
|
| 10 |
+
|
| 11 |
+
- If you want to customize more modules, such as adding datasets and models, we provide an "Advanced Tutorial."
|
| 12 |
+
|
| 13 |
+
We always welcome users' PRs (Pull Requests) and Issues to improve VLMEvalKit!
|
| 14 |
+
|
| 15 |
+
.. _Start Your First Step:
|
| 16 |
+
.. toctree::
|
| 17 |
+
:maxdepth: 1
|
| 18 |
+
:caption: Start Your First Step
|
| 19 |
+
|
| 20 |
+
Quickstart.md
|
| 21 |
+
|
| 22 |
+
.. _Advanced Tutorial:
|
| 23 |
+
.. toctree::
|
| 24 |
+
:maxdepth: 1
|
| 25 |
+
:caption: Advanced Tutorial
|
| 26 |
+
|
| 27 |
+
Development.md
|
| 28 |
+
ConfigSystem.md
|
| 29 |
+
|
| 30 |
+
.. _Other Notes:
|
| 31 |
+
.. toctree::
|
| 32 |
+
:maxdepth: 1
|
| 33 |
+
:caption: Other Notes
|
| 34 |
+
|
| 35 |
+
Contributors.md
|
| 36 |
+
|
| 37 |
+
Index and Tables
|
| 38 |
+
==================
|
| 39 |
+
|
| 40 |
+
* :ref:`genindex`
|
| 41 |
+
* :ref:`search`
|
VLMEvalKit-sudoku/docs/zh-CN/Quickstart.md
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 快速开始
|
| 2 |
+
|
| 3 |
+
在运行评测脚本之前,你需要先**配置** VLMs,并正确设置模型路径。然后你可以使用脚本 `run.py` 进行多个VLMs和基准测试的推理和评估。
|
| 4 |
+
|
| 5 |
+
## 第0步 安装和设置必要的密钥
|
| 6 |
+
|
| 7 |
+
**安装**
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
git clone https://github.com/open-compass/VLMEvalKit.git
|
| 11 |
+
cd VLMEvalKit
|
| 12 |
+
pip install -e .
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
**设置密钥**
|
| 16 |
+
|
| 17 |
+
要使用 API 模型(如 GPT-4v, Gemini-Pro-V 等)进行推理,或使用 LLM API 作为**评判者或选择提取器**,你需要首先设置 API 密钥。如果你设置了密钥,VLMEvalKit 将使用一个评判 LLM 从输出中提取答案,否则它将使用**精确匹配模式**(在输出字符串中查找 "Yes", "No", "A", "B", "C"...)。**精确匹配模式只能应用于是或否任务和多项选择任务。**
|
| 18 |
+
|
| 19 |
+
- 你可以将所需的密钥放在 `$VLMEvalKit/.env` 中,或直接将它们设置为环境变量。如果你选择创建 `.env` 文件,其内容将如下所示:
|
| 20 |
+
|
| 21 |
+
```bash
|
| 22 |
+
# .env 文件,将其放置在 $VLMEvalKit 下
|
| 23 |
+
# 专有 VLMs 的 API 密钥
|
| 24 |
+
# QwenVL APIs
|
| 25 |
+
DASHSCOPE_API_KEY=
|
| 26 |
+
# Gemini w. Google Cloud Backends
|
| 27 |
+
GOOGLE_API_KEY=
|
| 28 |
+
# OpenAI API
|
| 29 |
+
OPENAI_API_KEY=
|
| 30 |
+
OPENAI_API_BASE=
|
| 31 |
+
# StepAI API
|
| 32 |
+
STEPAI_API_KEY=
|
| 33 |
+
# REKA API
|
| 34 |
+
REKA_API_KEY=
|
| 35 |
+
# GLMV API
|
| 36 |
+
GLMV_API_KEY=
|
| 37 |
+
# CongRong API
|
| 38 |
+
CW_API_BASE=
|
| 39 |
+
CW_API_KEY=
|
| 40 |
+
# SenseNova API
|
| 41 |
+
SENSENOVA_API_KEY=
|
| 42 |
+
# Hunyuan-Vision API
|
| 43 |
+
HUNYUAN_SECRET_KEY=
|
| 44 |
+
HUNYUAN_SECRET_ID=
|
| 45 |
+
# LMDeploy API
|
| 46 |
+
LMDEPLOY_API_BASE=
|
| 47 |
+
# 你可以设置一个评估时代理,评估阶段产生的 API 调用将通过这个代理进行
|
| 48 |
+
EVAL_PROXY=
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
- 如果需要使用 API 在对应键值空白处填写上你的密钥。这些 API 密钥将在进行推理和评估时自动加载。
|
| 52 |
+
## 第1步 配置
|
| 53 |
+
|
| 54 |
+
**VLM 配置**:所有 VLMs 都在 `vlmeval/config.py` 中配置。对于某些 VLMs(如 MiniGPT-4、LLaVA-v1-7B),需要额外的配置(在配置文件中配置代码 / 模型权重根目录)。在评估时,你应该使用 `vlmeval/config.py` 中 `supported_VLM` 指定的模型名称来选择 VLM。确保在开始评估之前,你可以成功使用 VLM 进行推理,使用以下命令 `vlmutil check {MODEL_NAME}`。
|
| 55 |
+
|
| 56 |
+
## 第2步 评测
|
| 57 |
+
|
| 58 |
+
**新功能!!!** 我们集成了一个新的配置系统,以实现更灵活的评估设置。查看[文档](/docs/zh-CN/ConfigSystem.md)或运行`python run.py --help`了解更多详情 🔥🔥🔥
|
| 59 |
+
|
| 60 |
+
我们使用 `run.py` 进行评估。你可以使用 `$VLMEvalKit/run.py` 或创建脚本的软链接运行(以便在任何地方使用该脚本):
|
| 61 |
+
|
| 62 |
+
**参数**
|
| 63 |
+
|
| 64 |
+
- `--data (list[str])`: 设置在 VLMEvalKit 中支持的数据集名称(可以在代码库首页的 README 中找到支持的数据集列表)
|
| 65 |
+
- `--model (list[str])`: 设置在 VLMEvalKit 中支持的 VLM 名称(在 `vlmeval/config.py` 中的 `supported_VLM` 中定义)
|
| 66 |
+
- `--mode (str, 默认值为 'all', 可选值为 ['all', 'infer'])`:当 mode 设置为 "all" 时,将执行推理和评估;当设置为 "infer" 时,只执行推理
|
| 67 |
+
- `--api-nproc (int, 默认值为 4)`: 调用 API 的线程数
|
| 68 |
+
- `--work-dir (str, default to '.')`: 存放测试结果的目录
|
| 69 |
+
|
| 70 |
+
**用于评测图像多模态评测集的命令**
|
| 71 |
+
|
| 72 |
+
你可以使用 `python` 或 `torchrun` 来运行脚本:
|
| 73 |
+
|
| 74 |
+
```bash
|
| 75 |
+
# 使用 `python` 运行时,只实例化一个 VLM,并且它可能使用多个 GPU。
|
| 76 |
+
# 这推荐用于评估参数量非常大的 VLMs(如 IDEFICS-80B-Instruct)。
|
| 77 |
+
|
| 78 |
+
# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-80B-Instruct 进行推理和评估
|
| 79 |
+
python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose
|
| 80 |
+
# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-80B-Instruct 仅进行推理
|
| 81 |
+
python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose --mode infer
|
| 82 |
+
|
| 83 |
+
# 使用 `torchrun` 运行时,每个 GPU 上实例化一个 VLM 实例。这可以加快推理速度。
|
| 84 |
+
# 但是,这仅适用于消耗少量 GPU 内存的 VLMs。
|
| 85 |
+
|
| 86 |
+
# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-9B-Instruct、Qwen-VL-Chat、mPLUG-Owl2。在具有 8 个 GPU 的节点上进行推理和评估。
|
| 87 |
+
torchrun --nproc-per-node=8 run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct qwen_chat mPLUG-Owl2 --verbose
|
| 88 |
+
# 在 MME 上使用 Qwen-VL-Chat。在具有 2 个 GPU 的节点上进行推理和评估。
|
| 89 |
+
torchrun --nproc-per-node=2 run.py --data MME --model qwen_chat --verbose
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
**用于评测视频多模态评测集的命令**
|
| 93 |
+
|
| 94 |
+
```bash
|
| 95 |
+
# 使用 `python` 运行时,只实例化一个 VLM,并且它可能使用多个 GPU。
|
| 96 |
+
# 这推荐用于评估参数量非常大的 VLMs(如 IDEFICS-80B-Instruct)。
|
| 97 |
+
|
| 98 |
+
# 在 MMBench-Video 上评测 IDEFCIS2-8B, 视频采样 8 帧作为输入,不采用 pack 模式评测. MMBench_Video_8frame_nopack 是一个定义在 `vlmeval/dataset/video_dataset_config.py` 的数据集设定.
|
| 99 |
+
torchrun --nproc-per-node=8 run.py --data MMBench_Video_8frame_nopack --model idefics2_8
|
| 100 |
+
# 在 MMBench-Video 上评测 GPT-4o (API 模型), 视频采样每秒一帧作为输入,采用 pack 模式评测
|
| 101 |
+
python run.py --data MMBench_Video_1fps_pack --model GPT4o
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
评估结果将作为日志打印出来。此外,**结果文件**也会在目录 `$YOUR_WORKING_DIRECTORY/{model_name}` 中生成。以 `.csv` 结尾的文件包含评估的指标。
|
| 105 |
+
### 常见问题
|
| 106 |
+
#### 构建输入prompt:`build_prompt()`函数
|
| 107 |
+
如果您在评测某个benchmark时,发现模型输出的结果与预期不符,可能是因为您使用的模型没有正确构建输入prompt。
|
| 108 |
+
|
| 109 |
+
在VLMEvalkit中,每个`dataset`类都包含一个名为`build_prompt()`的函数,用于构建输入问题的格式。不同的benchmark可以选择自定义`build_prompt()`函数,也可以使用默认的实现。
|
| 110 |
+
|
| 111 |
+
例如,在处理默认的[多选题/Multi-Choice QA]([vlmeval/dataset/image_mcq.py](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/dataset/image_mcq.py#L164))时,`ImageMCQDataset.build_prompt()`类会将`hint`、`question`、`options`等元素(若数据集中包含)组合成一个完整的问题格式,如下所示:
|
| 112 |
+
```
|
| 113 |
+
HINT
|
| 114 |
+
QUESTION
|
| 115 |
+
Options:
|
| 116 |
+
A. Option A
|
| 117 |
+
B. Option B
|
| 118 |
+
···
|
| 119 |
+
Please select the correct answer from the options above.
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
此外,由于不同模型对评测的需求可能有所不同,VLMEvalkit也支持在模型层面自定义对不同benchmark构建prompt的方法,即`model.build_prompt()`,具体示例可以参考[InternVL](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/vlm/internvl_chat.py#L324)。
|
| 123 |
+
|
| 124 |
+
**注意:当同时定义了`model.build_prompt()`以及`dataset.build_prompt()`时,`model.build_prompt()`将优先于`dataset.build_prompt()`,即前者会覆盖后者。**
|
| 125 |
+
|
| 126 |
+
由于部分模型(如Qwen2VL,InternVL等)对于不同类型的benchmark定义了广泛的prompt构建方法,为了更灵活地适应不同的benchmark,VLMEvalkit支持在模型中自定义`model.use_custom_prompt()`函数。通过添加或者修改`use_custom_prompt()`函数,您可以决定对于哪些benchmark使用模型自定义的`use_custom_prompt()`方法,示例如下:
|
| 127 |
+
```
|
| 128 |
+
def use_custom_prompt(self, dataset: str) -> bool:
|
| 129 |
+
from vlmeval.dataset import DATASET_TYPE, DATASET_MODALITY
|
| 130 |
+
dataset_type = DATASET_TYPE(dataset, default=None)
|
| 131 |
+
if not self._use_custom_prompt:
|
| 132 |
+
return False
|
| 133 |
+
if listinstr(['MMVet'], dataset):
|
| 134 |
+
return True
|
| 135 |
+
if dataset_type == 'MCQ':
|
| 136 |
+
return True
|
| 137 |
+
if DATASET_MODALITY(dataset) == 'VIDEO':
|
| 138 |
+
return False
|
| 139 |
+
return False
|
| 140 |
+
```
|
| 141 |
+
仅当`use_custom_prompt()`函数返回`True`时,VLMEvalkit才会对当前benchmark调用模型的`build_prompt()`函数。
|
| 142 |
+
通过这种方式,您可以根据具体需求灵活地控制哪些benchmark使用模型自定义的prompt构建逻辑,从而更好地适配不同模型和任务的需求。
|
| 143 |
+
|
| 144 |
+
#### 模型切分
|
| 145 |
+
|
| 146 |
+
目前 VLMEvalKit 的启动方式自动支持同机上进程间 GPU 资源的划分与模型切分。该功能在推理后端为 `lmdeploy` 或 `transformers` 时被支持,具体行为如下:
|
| 147 |
+
|
| 148 |
+
- 基于 `python` 命令启动时,模型默认分配到所有可用的 GPU 上,如想指定使用哪些 GPU,可以使用 `CUDA_VISIBLE_DEVICES` 环境变量。
|
| 149 |
+
- 基于 `torchrun` 命令启动时,每个模型实例会被分配到 `N_GPU // N_PROC` 个 GPU 上,`N_PROC` 为 torchrun 命令中的 `--nproc-per-node` 参数所指定的进程数。`N_GPU` 的取值为:
|
| 150 |
+
- 如 `CUDA_VISIBLE_DEVICES` 环境变量未设置,`N_GPU` 为全部可用 GPU 数量。
|
| 151 |
+
- 如 `CUDA_VISIBLE_DEVICES` 环境变量被设置,`N_GPU` 为 `CUDA_VISIBLE_DEVICES` 环境变量所指定的 GPU 数量,并且,仅有指定的 GPU 会被利用。
|
| 152 |
+
|
| 153 |
+
下面提供了,在一台配备 8 块 GPU 的机器上运行评测任务的具体示例:
|
| 154 |
+
```bash
|
| 155 |
+
# <!-- 起两个模型实例数据并行,每个实例用 4 GPU -->
|
| 156 |
+
torchrun --nproc-per-node=2 run.py --data MMBench_DEV_EN --model InternVL3-78B
|
| 157 |
+
# <!-- 起一个模型实例,每个实例用 8 GPU -->
|
| 158 |
+
python run.py --data MMBench_DEV_EN --model InternVL3-78B
|
| 159 |
+
# <!-- 起三个模型实例,每个实例用 2 GPU,0 号、7 号 GPU 未被使用 -->
|
| 160 |
+
CUDA_VISIBLE_DEVICES=1,2,3,4,5,6 torchrun --nproc-per-node=3 run.py --data MMBench_DEV_EN --model InternVL3-38B
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
注:此方式不支持 `vllm` 后端,基于 `vllm` 后端起评测任务时,请用 `python` 命令启动,默认调用所有可见的 GPU。
|
| 164 |
+
|
| 165 |
+
#### 性能差距
|
| 166 |
+
在不同的运行环境中,模型的性能表现可能会有所差异。因此,在评估过程中,您可能会发现自己的评测结果与VLMEvalKit官方榜单上的结果存在差距。这种差异可能与`transformers`, `cuda`, `torch`等版本的变化有关。
|
| 167 |
+
|
| 168 |
+
此外,对于异常的表现,我们建议您优先查看运行完成后的本地生成记录`{model}_{dataset}.xlsx`或者评估记录`{model}_{dataset}_{judge_model}.xlsx`,这可能会帮助��更好地理解评估结果并发现问题。
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
### 部署本地语言模型作为评判 / 选择提取器
|
| 173 |
+
上述默认设置使用 OpenAI 的 GPT 作为评判 LLM。你也可以使用 [LMDeploy](https://github.com/InternLM/lmdeploy) 部署本地评判 LLM。
|
| 174 |
+
|
| 175 |
+
首先进行安装:
|
| 176 |
+
```
|
| 177 |
+
pip install lmdeploy openai
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
然后可以通过一行代码部署本地评判 LLM。LMDeploy 将自动从 Huggingface 下载模型。假设我们使用 internlm2-chat-1_8b 作为评判,端口为 23333,密钥为 sk-123456(密钥必须以 "sk-" 开头,后跟任意数字):
|
| 181 |
+
```
|
| 182 |
+
lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
使用以下 Python 代码获取由 LMDeploy 注册的模型名称:
|
| 186 |
+
```
|
| 187 |
+
from openai import OpenAI
|
| 188 |
+
client = OpenAI(
|
| 189 |
+
api_key='sk-123456',
|
| 190 |
+
base_url="http://0.0.0.0:23333/v1"
|
| 191 |
+
)
|
| 192 |
+
model_name = client.models.list().data[0].id
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
配置对应环境变量,以告诉 VLMEvalKit 如何使用本地评判 LLM。正如上面提到的,也可以在 `$VLMEvalKit/.env` 文件中设置:
|
| 196 |
+
```
|
| 197 |
+
OPENAI_API_KEY=sk-123456
|
| 198 |
+
OPENAI_API_BASE=http://0.0.0.0:23333/v1/chat/completions
|
| 199 |
+
LOCAL_LLM=<model_name you get>
|
| 200 |
+
```
|
| 201 |
+
|
| 202 |
+
最后,你可以运行第2步中的命令,使用本地评判 LLM 来评估你的 VLM。
|
| 203 |
+
|
| 204 |
+
**请注意:**
|
| 205 |
+
|
| 206 |
+
- 如果你希望将评判 LLM 部署在单独的一个 GPU 上,并且由于 GPU 内存有限而希望在其他 GPU 上评估你的 VLM,可以使用 `CUDA_VISIBLE_DEVICES=x` 这样的方法,例如:
|
| 207 |
+
```
|
| 208 |
+
CUDA_VISIBLE_DEVICES=0 lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333
|
| 209 |
+
CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc-per-node=3 run.py --data HallusionBench --model qwen_chat --verbose
|
| 210 |
+
```
|
| 211 |
+
- 如果本地评判 LLM 在遵循指令方面不够好,评估过程可能会失败。请通过 issues 报告此类失败情况。
|
| 212 |
+
- 可以以不同的方式部署评判 LLM,例如使用私有 LLM(而非来自 HuggingFace)或使用量化 LLM。请参考 [LMDeploy doc](https://lmdeploy.readthedocs.io/en/latest/serving/api_server.html) 文档。也可以使用其他支持 OpenAI API 框架的方法。
|
| 213 |
+
|
| 214 |
+
### 使用 LMDeploy 加速模型推理
|
| 215 |
+
|
| 216 |
+
可参考[文档](/docs/zh-CN/EvalByLMDeploy.md)
|
VLMEvalKit-sudoku/docs/zh-CN/_templates/404.html
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{% extends "layout.html" %}
|
| 2 |
+
|
| 3 |
+
{% block body %}
|
| 4 |
+
|
| 5 |
+
<h1>Page Not Found</h1>
|
| 6 |
+
<p>
|
| 7 |
+
The page you are looking for cannot be found.
|
| 8 |
+
</p>
|
| 9 |
+
<p>
|
| 10 |
+
If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in
|
| 11 |
+
the content table left, or go to <a href="{{ pathto(root_doc) }}">the homepage</a>.
|
| 12 |
+
</p>
|
| 13 |
+
<!-- <p>
|
| 14 |
+
If you cannot find documentation you want, please <a
|
| 15 |
+
href="">open an issue</a> to tell us!
|
| 16 |
+
</p> -->
|
| 17 |
+
|
| 18 |
+
{% endblock %}
|
VLMEvalKit-sudoku/docs/zh-CN/_templates/callable.rst
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. role:: hidden
|
| 2 |
+
:class: hidden-section
|
| 3 |
+
.. currentmodule:: {{ module }}
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
{{ name | underline}}
|
| 7 |
+
|
| 8 |
+
.. autoclass:: {{ name }}
|
| 9 |
+
:members:
|
| 10 |
+
:special-members: __call__
|
| 11 |
+
|
| 12 |
+
..
|
| 13 |
+
autogenerated from _templates/callable.rst
|
| 14 |
+
note it does not have :inherited-members:
|
VLMEvalKit-sudoku/llava/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .model import LlavaLlamaForCausalLM
|
VLMEvalKit-sudoku/llava/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (6.97 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/eval/evaluate_interleave.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from rouge import Rouge
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
import numpy as np
|
| 7 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
| 8 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
spot_the_diff = ["Spot-the-Diff", "Birds-to-Words", "CLEVR-Change"]
|
| 12 |
+
image_edit_instruct = ["IEdit", "HQ-Edit", "MagicBrush"]
|
| 13 |
+
visual_story_telling = ["AESOP", "FlintstonesSV", "PororoSV", "VIST"]
|
| 14 |
+
visual_cloze = ["COMICS_Dialogue", "RecipeQA_VisualCloze"]
|
| 15 |
+
text_rich_vqa = ["WebQA", "TQA", "OCR-VQA", "DocVQA"]
|
| 16 |
+
multi_image_vqa = ["MIT-States_StateCoherence", "MIT-States_PropertyCoherence", "VISION", "RecipeQA_ImageCoherence"]
|
| 17 |
+
|
| 18 |
+
puzzle = ["RAVEN"]
|
| 19 |
+
nlrv2 = ["NLVR2_Mantis"]
|
| 20 |
+
qbench = ["QBench"]
|
| 21 |
+
|
| 22 |
+
class Eval:
|
| 23 |
+
def __init__(self):
|
| 24 |
+
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
|
| 25 |
+
self.commaStrip = re.compile("(\d)(\,)(\d)")
|
| 26 |
+
self.punct = [
|
| 27 |
+
";",
|
| 28 |
+
r"/",
|
| 29 |
+
"[",
|
| 30 |
+
"]",
|
| 31 |
+
'"',
|
| 32 |
+
"{",
|
| 33 |
+
"}",
|
| 34 |
+
"(",
|
| 35 |
+
")",
|
| 36 |
+
"=",
|
| 37 |
+
"+",
|
| 38 |
+
"\\",
|
| 39 |
+
"_",
|
| 40 |
+
"-",
|
| 41 |
+
">",
|
| 42 |
+
"<",
|
| 43 |
+
"@",
|
| 44 |
+
"`",
|
| 45 |
+
",",
|
| 46 |
+
"?",
|
| 47 |
+
"!",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
def processPunctuation(self, inText):
|
| 51 |
+
outText = inText
|
| 52 |
+
for p in self.punct:
|
| 53 |
+
if (p + " " in inText or " " + p in inText) or (
|
| 54 |
+
re.search(self.commaStrip, inText) != None
|
| 55 |
+
):
|
| 56 |
+
outText = outText.replace(p, "")
|
| 57 |
+
else:
|
| 58 |
+
outText = outText.replace(p, " ")
|
| 59 |
+
outText = self.periodStrip.sub("", outText, re.UNICODE)
|
| 60 |
+
return outText
|
| 61 |
+
|
| 62 |
+
def process(self, answer):
|
| 63 |
+
answer = answer.replace("\n", " ")
|
| 64 |
+
answer = answer.replace("\t", " ")
|
| 65 |
+
answer = answer.strip()
|
| 66 |
+
answer = self.processPunctuation(answer)
|
| 67 |
+
answer = answer.strip('\'')
|
| 68 |
+
answer = answer.strip('\"')
|
| 69 |
+
answer = answer.strip(')')
|
| 70 |
+
answer = answer.strip('(')
|
| 71 |
+
answer = answer.strip().lower()
|
| 72 |
+
return answer
|
| 73 |
+
|
| 74 |
+
def evaluate_rouge(self,preds):
|
| 75 |
+
rouge = Rouge()
|
| 76 |
+
acc = {'f': []}
|
| 77 |
+
eval_list = []
|
| 78 |
+
for i, res in enumerate(preds):
|
| 79 |
+
sample_id = res['sample_id']
|
| 80 |
+
# print(sample_id)
|
| 81 |
+
gt_ans = self.process(res["gt_response"])
|
| 82 |
+
pred_ans = self.process(res["pred_response"])
|
| 83 |
+
# assert gt_ans != ''
|
| 84 |
+
|
| 85 |
+
if gt_ans == '':
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
if pred_ans == '':
|
| 89 |
+
s = 0
|
| 90 |
+
else:
|
| 91 |
+
if len(pred_ans) > 512:
|
| 92 |
+
pred_ans = pred_ans[0: 512]
|
| 93 |
+
s = rouge.get_scores(pred_ans, gt_ans)[0]['rouge-l']['f']
|
| 94 |
+
acc['f'].append(s)
|
| 95 |
+
eval_list.append({'id':str(sample_id),'score':str(round(s,3))})
|
| 96 |
+
results = {'Rouge-L f': np.mean(acc['f'])}
|
| 97 |
+
return results,eval_list
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def judge_multi_choice(self,sample):
|
| 101 |
+
sample_id = sample['sample_id']
|
| 102 |
+
gt_ans = sample["gt_response"]
|
| 103 |
+
pred_ans = sample["pred_response"]
|
| 104 |
+
|
| 105 |
+
if ":" in pred_ans:
|
| 106 |
+
a_list = pred_ans.split(":")
|
| 107 |
+
a_list = [a.strip() for a in a_list ]
|
| 108 |
+
for a in a_list:
|
| 109 |
+
if len(a) == 1 and a[-1] in ["a", "b", "c", "d", "e", "f", "g", "h"]:
|
| 110 |
+
pred_ans = a
|
| 111 |
+
|
| 112 |
+
if pred_ans == gt_ans:
|
| 113 |
+
return 1
|
| 114 |
+
else:
|
| 115 |
+
return 0
|
| 116 |
+
|
| 117 |
+
def process_sample(self,sample):
|
| 118 |
+
sample["gt_response"] = self.process(sample["gt_response"])
|
| 119 |
+
sample["pred_response"] = self.process(sample["pred_response"])
|
| 120 |
+
|
| 121 |
+
def evaluate_multichoice(self, preditions):
|
| 122 |
+
correct = 0
|
| 123 |
+
eval_list = []
|
| 124 |
+
for i, sample in enumerate(preditions):
|
| 125 |
+
self.process_sample(sample)
|
| 126 |
+
score = self.judge_multi_choice(sample)
|
| 127 |
+
sample_id = sample['sample_id']
|
| 128 |
+
sample['result'] = score
|
| 129 |
+
eval_list.append({'id':str(sample_id),'score':str(score)})
|
| 130 |
+
correct+=score
|
| 131 |
+
return {'Accuracy':correct/len(preditions)},eval_list
|
| 132 |
+
|
| 133 |
+
def evaluate_multi_choice_image(self,preditions):
|
| 134 |
+
correct = 0
|
| 135 |
+
eval_list = []
|
| 136 |
+
for i,sample in enumerate(preditions):
|
| 137 |
+
gt_ans = self.process(sample["gt_response"])
|
| 138 |
+
pred_ans = self.process(sample["pred_response"])
|
| 139 |
+
sample_id = sample['sample_id']
|
| 140 |
+
|
| 141 |
+
if ":" in pred_ans:
|
| 142 |
+
a_list = pred_ans.split(":")
|
| 143 |
+
a_list = [a.strip() for a in a_list ]
|
| 144 |
+
for a in a_list:
|
| 145 |
+
if len(a) == 1 and a[-1] in ["a", "b", "c", "d", "e", "f", "g", "h"]:
|
| 146 |
+
pred_ans = a
|
| 147 |
+
|
| 148 |
+
if gt_ans == pred_ans:
|
| 149 |
+
score = 1
|
| 150 |
+
else:
|
| 151 |
+
score = 0
|
| 152 |
+
sample_id = sample['sample_id']
|
| 153 |
+
sample['result'] = score
|
| 154 |
+
eval_list.append({'id':str(sample_id),'score':str(score)})
|
| 155 |
+
correct+=score
|
| 156 |
+
return {'Accuracy':correct/len(preditions)},eval_list
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
if __name__ == "__main__":
|
| 160 |
+
parser = argparse.ArgumentParser()
|
| 161 |
+
parser.add_argument('--result-dir', type=str, required=True)
|
| 162 |
+
|
| 163 |
+
args = parser.parse_args()
|
| 164 |
+
|
| 165 |
+
result_file = os.path.join(args.result_dir, "result.jsonl")
|
| 166 |
+
|
| 167 |
+
if not os.path.exists(result_file):
|
| 168 |
+
print('No prediction file found')
|
| 169 |
+
exit(0)
|
| 170 |
+
with open(result_file, 'r') as f:
|
| 171 |
+
preds_all = [json.loads(line) for line in f]
|
| 172 |
+
|
| 173 |
+
preds_all_dict = dict()
|
| 174 |
+
for pred in preds_all:
|
| 175 |
+
if pred["dataset"] not in preds_all_dict:
|
| 176 |
+
preds_all_dict[pred["dataset"]] = list()
|
| 177 |
+
preds_all_dict[pred["dataset"]].append(pred)
|
| 178 |
+
|
| 179 |
+
image_choice_dataset_list = ["recipeqa-RecipeQA_VisualCloze", "RecipeQA_ImageCoherence", "COMICS_Panel"]
|
| 180 |
+
E = Eval()
|
| 181 |
+
|
| 182 |
+
eval_result_list = dict()
|
| 183 |
+
eval_result_list_detail = dict()
|
| 184 |
+
|
| 185 |
+
for dataset in preds_all_dict:
|
| 186 |
+
|
| 187 |
+
preds = preds_all_dict[dataset]
|
| 188 |
+
question_type = preds[0]["question_type"]
|
| 189 |
+
|
| 190 |
+
if question_type == 'open-ended':
|
| 191 |
+
eval_result, eval_list = E.evaluate_rouge(preds)
|
| 192 |
+
|
| 193 |
+
elif question_type == 'multi-choice' or dataset == 'nlrv2':
|
| 194 |
+
if dataset in image_choice_dataset_list:
|
| 195 |
+
eval_result, eval_list = E.evaluate_multi_choice_image(preds)
|
| 196 |
+
else:
|
| 197 |
+
eval_result, eval_list = E.evaluate_multichoice(preds)
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
eval_result = 'Dataset not supported'
|
| 201 |
+
print('Dataset not supported')
|
| 202 |
+
exit(0)
|
| 203 |
+
|
| 204 |
+
print(dataset, end = ': ')
|
| 205 |
+
print(eval_result)
|
| 206 |
+
|
| 207 |
+
eval_result_list[dataset] = eval_result
|
| 208 |
+
eval_result_list_detail[dataset] = eval_list
|
| 209 |
+
|
| 210 |
+
os.makedirs(args.result_dir, exist_ok=True)
|
| 211 |
+
with open(os.path.join(args.result_dir, 'eval_dataset.json'), 'w') as f:
|
| 212 |
+
json.dump(eval_result_list, f, indent=4)
|
| 213 |
+
|
| 214 |
+
with open(os.path.join(args.result_dir,'eval_dataset_details.json'), 'w') as f:
|
| 215 |
+
json.dump(eval_result_list_detail, f, indent=4)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
eval_cat_list = dict()
|
| 219 |
+
print()
|
| 220 |
+
|
| 221 |
+
# spot_the_diff
|
| 222 |
+
score = 0
|
| 223 |
+
count = 0
|
| 224 |
+
for dataset in eval_result_list:
|
| 225 |
+
if dataset in spot_the_diff:
|
| 226 |
+
count += 1
|
| 227 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 228 |
+
if count > 0:
|
| 229 |
+
score /= count
|
| 230 |
+
eval_cat_list["spot_the_diff"] = score
|
| 231 |
+
print("spot_the_diff", end = ': ')
|
| 232 |
+
print('{:.2f}'.format(100 * score))
|
| 233 |
+
|
| 234 |
+
# image_edit_instruct
|
| 235 |
+
score = 0
|
| 236 |
+
count = 0
|
| 237 |
+
for dataset in eval_result_list:
|
| 238 |
+
if dataset in image_edit_instruct:
|
| 239 |
+
count += 1
|
| 240 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 241 |
+
if count > 0:
|
| 242 |
+
score /= count
|
| 243 |
+
eval_cat_list["image_edit_instruct"] = score
|
| 244 |
+
print("image_edit_instruct", end = ': ')
|
| 245 |
+
print('{:.2f}'.format(100 * score))
|
| 246 |
+
|
| 247 |
+
# visual_story_telling
|
| 248 |
+
score = 0
|
| 249 |
+
count = 0
|
| 250 |
+
for dataset in eval_result_list:
|
| 251 |
+
if dataset in visual_story_telling:
|
| 252 |
+
count += 1
|
| 253 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 254 |
+
if count > 0:
|
| 255 |
+
score /= count
|
| 256 |
+
eval_cat_list["visual_story_telling"] = score
|
| 257 |
+
print("visual_story_telling", end = ': ')
|
| 258 |
+
print('{:.2f}'.format(100 * score))
|
| 259 |
+
|
| 260 |
+
# visual_cloze
|
| 261 |
+
score = 0
|
| 262 |
+
count = 0
|
| 263 |
+
for dataset in eval_result_list:
|
| 264 |
+
if dataset in visual_cloze:
|
| 265 |
+
count += 1
|
| 266 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 267 |
+
if count > 0:
|
| 268 |
+
score /= count
|
| 269 |
+
eval_cat_list["visual_cloze"] = score
|
| 270 |
+
print("visual_cloze", end = ': ')
|
| 271 |
+
print('{:.2f}'.format(100 * score))
|
| 272 |
+
|
| 273 |
+
# text_rich_vqa
|
| 274 |
+
score = 0
|
| 275 |
+
count = 0
|
| 276 |
+
for dataset in eval_result_list:
|
| 277 |
+
if dataset in text_rich_vqa:
|
| 278 |
+
count += 1
|
| 279 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 280 |
+
if count > 0:
|
| 281 |
+
score /= count
|
| 282 |
+
eval_cat_list["text_rich_vqa"] = score
|
| 283 |
+
print("text_rich_vqa", end = ': ')
|
| 284 |
+
print('{:.2f}'.format(100 * score))
|
| 285 |
+
|
| 286 |
+
# multi_image_vqa
|
| 287 |
+
score = 0
|
| 288 |
+
count = 0
|
| 289 |
+
for dataset in eval_result_list:
|
| 290 |
+
if dataset in multi_image_vqa:
|
| 291 |
+
count += 1
|
| 292 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 293 |
+
if count > 0:
|
| 294 |
+
score /= count
|
| 295 |
+
eval_cat_list["multi_image_vqa"] = score
|
| 296 |
+
print("multi_image_vqa", end = ': ')
|
| 297 |
+
print('{:.2f}'.format(100 * score))
|
| 298 |
+
|
| 299 |
+
# puzzle
|
| 300 |
+
score = 0
|
| 301 |
+
count = 0
|
| 302 |
+
for dataset in eval_result_list:
|
| 303 |
+
if dataset in puzzle:
|
| 304 |
+
count += 1
|
| 305 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 306 |
+
if count > 0:
|
| 307 |
+
score /= count
|
| 308 |
+
eval_cat_list["puzzle"] = score
|
| 309 |
+
print("puzzle", end = ': ')
|
| 310 |
+
print('{:.2f}'.format(100 * score))
|
| 311 |
+
|
| 312 |
+
# nlrv2
|
| 313 |
+
score = 0
|
| 314 |
+
count = 0
|
| 315 |
+
for dataset in eval_result_list:
|
| 316 |
+
if dataset in nlrv2:
|
| 317 |
+
count += 1
|
| 318 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 319 |
+
if count > 0:
|
| 320 |
+
score /= count
|
| 321 |
+
eval_cat_list["nlrv2"] = score
|
| 322 |
+
print("nlrv2", end = ': ')
|
| 323 |
+
print('{:.2f}'.format(100 * score))
|
| 324 |
+
|
| 325 |
+
# qbench
|
| 326 |
+
score = 0
|
| 327 |
+
count = 0
|
| 328 |
+
for dataset in eval_result_list:
|
| 329 |
+
if dataset in qbench:
|
| 330 |
+
count += 1
|
| 331 |
+
score += list(eval_result_list[dataset].values())[0]
|
| 332 |
+
if count > 0:
|
| 333 |
+
score /= count
|
| 334 |
+
eval_cat_list["qbench"] = score
|
| 335 |
+
print("qbench", end = ': ')
|
| 336 |
+
print('{:.2f}'.format(100 * score))
|
| 337 |
+
|
| 338 |
+
with open(os.path.join(args.result_dir,'eval_cat.json'), 'w') as f:
|
| 339 |
+
json.dump(eval_cat_list, f, indent=4)
|
VLMEvalKit-sudoku/llava/eval/run_llava.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from llava.constants import (
|
| 5 |
+
IMAGE_TOKEN_INDEX,
|
| 6 |
+
DEFAULT_IMAGE_TOKEN,
|
| 7 |
+
DEFAULT_IM_START_TOKEN,
|
| 8 |
+
DEFAULT_IM_END_TOKEN,
|
| 9 |
+
IMAGE_PLACEHOLDER,
|
| 10 |
+
)
|
| 11 |
+
from llava.conversation import conv_templates, SeparatorStyle
|
| 12 |
+
from llava.model.builder import load_pretrained_model
|
| 13 |
+
from llava.utils import disable_torch_init
|
| 14 |
+
from llava.mm_utils import (
|
| 15 |
+
process_images,
|
| 16 |
+
tokenizer_image_token,
|
| 17 |
+
get_model_name_from_path,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
from PIL import Image
|
| 21 |
+
|
| 22 |
+
import requests
|
| 23 |
+
from PIL import Image
|
| 24 |
+
from io import BytesIO
|
| 25 |
+
import re
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def image_parser(args):
|
| 29 |
+
out = args.image_file.split(args.sep)
|
| 30 |
+
return out
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def load_image(image_file):
|
| 34 |
+
if image_file.startswith("http") or image_file.startswith("https"):
|
| 35 |
+
response = requests.get(image_file)
|
| 36 |
+
image = Image.open(BytesIO(response.content)).convert("RGB")
|
| 37 |
+
else:
|
| 38 |
+
image = Image.open(image_file).convert("RGB")
|
| 39 |
+
return image
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def load_images(image_files):
|
| 43 |
+
out = []
|
| 44 |
+
for image_file in image_files:
|
| 45 |
+
image = load_image(image_file)
|
| 46 |
+
out.append(image)
|
| 47 |
+
return out
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def eval_model(args):
|
| 51 |
+
# Model
|
| 52 |
+
disable_torch_init()
|
| 53 |
+
|
| 54 |
+
model_name = get_model_name_from_path(args.model_path)
|
| 55 |
+
tokenizer, model, image_processor, context_len = load_pretrained_model(
|
| 56 |
+
args.model_path, args.model_base, model_name
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
qs = args.query
|
| 60 |
+
image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
|
| 61 |
+
if IMAGE_PLACEHOLDER in qs:
|
| 62 |
+
if model.config.mm_use_im_start_end:
|
| 63 |
+
qs = re.sub(IMAGE_PLACEHOLDER, image_token_se, qs)
|
| 64 |
+
else:
|
| 65 |
+
qs = re.sub(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN, qs)
|
| 66 |
+
else:
|
| 67 |
+
if model.config.mm_use_im_start_end:
|
| 68 |
+
qs = image_token_se + "\n" + qs
|
| 69 |
+
else:
|
| 70 |
+
qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
|
| 71 |
+
|
| 72 |
+
if "llama-2" in model_name.lower():
|
| 73 |
+
conv_mode = "llava_llama_2"
|
| 74 |
+
elif "mistral" in model_name.lower():
|
| 75 |
+
conv_mode = "mistral_instruct"
|
| 76 |
+
elif "v1.6-34b" in model_name.lower():
|
| 77 |
+
conv_mode = "chatml_direct"
|
| 78 |
+
elif "v1" in model_name.lower():
|
| 79 |
+
conv_mode = "llava_v1"
|
| 80 |
+
elif "mpt" in model_name.lower():
|
| 81 |
+
conv_mode = "mpt"
|
| 82 |
+
else:
|
| 83 |
+
conv_mode = "llava_v0"
|
| 84 |
+
|
| 85 |
+
if args.conv_mode is not None and conv_mode != args.conv_mode:
|
| 86 |
+
print(
|
| 87 |
+
"[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}".format(
|
| 88 |
+
conv_mode, args.conv_mode, args.conv_mode
|
| 89 |
+
)
|
| 90 |
+
)
|
| 91 |
+
else:
|
| 92 |
+
args.conv_mode = conv_mode
|
| 93 |
+
|
| 94 |
+
conv = conv_templates[args.conv_mode].copy()
|
| 95 |
+
conv.append_message(conv.roles[0], qs)
|
| 96 |
+
conv.append_message(conv.roles[1], None)
|
| 97 |
+
prompt = conv.get_prompt()
|
| 98 |
+
|
| 99 |
+
image_files = image_parser(args)
|
| 100 |
+
images = load_images(image_files)
|
| 101 |
+
image_sizes = [x.size for x in images]
|
| 102 |
+
images_tensor = process_images(
|
| 103 |
+
images,
|
| 104 |
+
image_processor,
|
| 105 |
+
model.config
|
| 106 |
+
).to(model.device, dtype=torch.float16)
|
| 107 |
+
|
| 108 |
+
input_ids = (
|
| 109 |
+
tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
|
| 110 |
+
.unsqueeze(0)
|
| 111 |
+
.cuda()
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
with torch.inference_mode():
|
| 115 |
+
output_ids = model.generate(
|
| 116 |
+
input_ids,
|
| 117 |
+
images=images_tensor,
|
| 118 |
+
image_sizes=image_sizes,
|
| 119 |
+
do_sample=True if args.temperature > 0 else False,
|
| 120 |
+
temperature=args.temperature,
|
| 121 |
+
top_p=args.top_p,
|
| 122 |
+
num_beams=args.num_beams,
|
| 123 |
+
max_new_tokens=args.max_new_tokens,
|
| 124 |
+
use_cache=True,
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
|
| 128 |
+
print(outputs)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
if __name__ == "__main__":
|
| 132 |
+
parser = argparse.ArgumentParser()
|
| 133 |
+
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
| 134 |
+
parser.add_argument("--model-base", type=str, default=None)
|
| 135 |
+
parser.add_argument("--image-file", type=str, required=True)
|
| 136 |
+
parser.add_argument("--query", type=str, required=True)
|
| 137 |
+
parser.add_argument("--conv-mode", type=str, default=None)
|
| 138 |
+
parser.add_argument("--sep", type=str, default=",")
|
| 139 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
| 140 |
+
parser.add_argument("--top_p", type=float, default=None)
|
| 141 |
+
parser.add_argument("--num_beams", type=int, default=1)
|
| 142 |
+
parser.add_argument("--max_new_tokens", type=int, default=512)
|
| 143 |
+
args = parser.parse_args()
|
| 144 |
+
|
| 145 |
+
eval_model(args)
|
VLMEvalKit-sudoku/llava/eval/summarize_gpt_review.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
import argparse
|
| 8 |
+
|
| 9 |
+
def parse_args():
|
| 10 |
+
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
|
| 11 |
+
parser.add_argument('-d', '--dir', default=None)
|
| 12 |
+
parser.add_argument('-v', '--version', default=None)
|
| 13 |
+
parser.add_argument('-s', '--select', nargs='*', default=None)
|
| 14 |
+
parser.add_argument('-f', '--files', nargs='*', default=[])
|
| 15 |
+
parser.add_argument('-i', '--ignore', nargs='*', default=[])
|
| 16 |
+
return parser.parse_args()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
if __name__ == '__main__':
|
| 20 |
+
args = parse_args()
|
| 21 |
+
|
| 22 |
+
if args.ignore is not None:
|
| 23 |
+
args.ignore = [int(x) for x in args.ignore]
|
| 24 |
+
|
| 25 |
+
if len(args.files) > 0:
|
| 26 |
+
review_files = args.files
|
| 27 |
+
else:
|
| 28 |
+
review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
|
| 29 |
+
|
| 30 |
+
for review_file in sorted(review_files):
|
| 31 |
+
config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
|
| 32 |
+
if args.select is not None and any(x not in config for x in args.select):
|
| 33 |
+
continue
|
| 34 |
+
if '0613' in config:
|
| 35 |
+
version = '0613'
|
| 36 |
+
else:
|
| 37 |
+
version = '0314'
|
| 38 |
+
if args.version is not None and args.version != version:
|
| 39 |
+
continue
|
| 40 |
+
scores = defaultdict(list)
|
| 41 |
+
print(config)
|
| 42 |
+
with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
|
| 43 |
+
for review_str in f:
|
| 44 |
+
review = json.loads(review_str)
|
| 45 |
+
if review['question_id'] in args.ignore:
|
| 46 |
+
continue
|
| 47 |
+
if 'category' in review:
|
| 48 |
+
scores[review['category']].append(review['tuple'])
|
| 49 |
+
scores['all'].append(review['tuple'])
|
| 50 |
+
else:
|
| 51 |
+
if 'tuple' in review:
|
| 52 |
+
scores['all'].append(review['tuple'])
|
| 53 |
+
else:
|
| 54 |
+
scores['all'].append(review['score'])
|
| 55 |
+
for k, v in sorted(scores.items()):
|
| 56 |
+
stats = np.asarray(v).mean(0).tolist()
|
| 57 |
+
stats = [round(x, 3) for x in stats]
|
| 58 |
+
# print(k, stats, round(stats[1]/stats[0]*100, 1))
|
| 59 |
+
print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
|
| 60 |
+
print('=================================')
|
VLMEvalKit-sudoku/llava/model/__pycache__/llava_arch.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/language_model/llava_mistral.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torch.nn import CrossEntropyLoss
|
| 21 |
+
|
| 22 |
+
from transformers import AutoConfig, AutoModelForCausalLM, MistralConfig, MistralModel, MistralForCausalLM, GenerationConfig
|
| 23 |
+
|
| 24 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 25 |
+
from transformers.generation.utils import GenerateOutput
|
| 26 |
+
|
| 27 |
+
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class LlavaMistralConfig(MistralConfig):
|
| 31 |
+
model_type = "llava_mistral"
|
| 32 |
+
temperature: float = 0.0 # reset to 0.0, previously 0.9 for Vicuna
|
| 33 |
+
max_new_tokens: int = 1024
|
| 34 |
+
do_sample: bool = False
|
| 35 |
+
top_p: Optional[float] = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class LlavaMistralModel(LlavaMetaModel, MistralModel):
|
| 39 |
+
config_class = LlavaMistralConfig
|
| 40 |
+
|
| 41 |
+
def __init__(self, config: MistralConfig):
|
| 42 |
+
super(LlavaMistralModel, self).__init__(config)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class LlavaMistralForCausalLM(MistralForCausalLM, LlavaMetaForCausalLM):
|
| 46 |
+
config_class = LlavaMistralConfig
|
| 47 |
+
|
| 48 |
+
def __init__(self, config):
|
| 49 |
+
super(MistralForCausalLM, self).__init__(config)
|
| 50 |
+
|
| 51 |
+
config.model_type = "llava_mistral"
|
| 52 |
+
config.rope_scaling = None
|
| 53 |
+
|
| 54 |
+
self.model = LlavaMistralModel(config)
|
| 55 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 56 |
+
# Initialize weights and apply final processing
|
| 57 |
+
self.post_init()
|
| 58 |
+
|
| 59 |
+
def get_model(self):
|
| 60 |
+
return self.model
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
input_ids: torch.LongTensor = None,
|
| 65 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 66 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 67 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 68 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 69 |
+
labels: Optional[torch.LongTensor] = None,
|
| 70 |
+
use_cache: Optional[bool] = None,
|
| 71 |
+
output_attentions: Optional[bool] = None,
|
| 72 |
+
output_hidden_states: Optional[bool] = None,
|
| 73 |
+
images: Optional[torch.FloatTensor] = None,
|
| 74 |
+
image_sizes: Optional[List[List[int]]] = None,
|
| 75 |
+
return_dict: Optional[bool] = None,
|
| 76 |
+
cache_position=None,
|
| 77 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 78 |
+
|
| 79 |
+
if inputs_embeds is None:
|
| 80 |
+
(input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, image_sizes)
|
| 81 |
+
|
| 82 |
+
return super().forward(
|
| 83 |
+
input_ids=input_ids,
|
| 84 |
+
attention_mask=attention_mask,
|
| 85 |
+
position_ids=position_ids,
|
| 86 |
+
past_key_values=past_key_values,
|
| 87 |
+
inputs_embeds=inputs_embeds,
|
| 88 |
+
labels=labels,
|
| 89 |
+
use_cache=use_cache,
|
| 90 |
+
output_attentions=output_attentions,
|
| 91 |
+
output_hidden_states=output_hidden_states,
|
| 92 |
+
return_dict=return_dict,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
@torch.no_grad()
|
| 96 |
+
def generate(
|
| 97 |
+
self,
|
| 98 |
+
inputs: Optional[torch.Tensor] = None,
|
| 99 |
+
images: Optional[torch.Tensor] = None,
|
| 100 |
+
image_sizes: Optional[torch.Tensor] = None,
|
| 101 |
+
**kwargs,
|
| 102 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
| 103 |
+
position_ids = kwargs.pop("position_ids", None)
|
| 104 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
| 105 |
+
if "inputs_embeds" in kwargs:
|
| 106 |
+
raise NotImplementedError("`inputs_embeds` is not supported")
|
| 107 |
+
|
| 108 |
+
if images is not None:
|
| 109 |
+
(inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, image_sizes=image_sizes)
|
| 110 |
+
else:
|
| 111 |
+
inputs_embeds = self.get_model().embed_tokens(inputs)
|
| 112 |
+
|
| 113 |
+
return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
|
| 114 |
+
|
| 115 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
| 116 |
+
images = kwargs.pop("images", None)
|
| 117 |
+
image_sizes = kwargs.pop("image_sizes", None)
|
| 118 |
+
inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
|
| 119 |
+
if images is not None:
|
| 120 |
+
inputs["images"] = images
|
| 121 |
+
if image_sizes is not None:
|
| 122 |
+
inputs["image_sizes"] = image_sizes
|
| 123 |
+
return inputs
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
AutoConfig.register("llava_mistral", LlavaMistralConfig)
|
| 127 |
+
AutoModelForCausalLM.register(LlavaMistralConfig, LlavaMistralForCausalLM)
|
VLMEvalKit-sudoku/llava/model/language_model/llava_mpt.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 Haotian Liu
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from typing import Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from transformers import AutoConfig, AutoModelForCausalLM, MptConfig, MptForCausalLM, MptModel, GenerationConfig
|
| 21 |
+
from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LlavaMptConfig(MptConfig):
|
| 25 |
+
model_type = "llava_mpt"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LlavaMptModel(LlavaMetaModel, MptModel):
|
| 29 |
+
config_class = LlavaMptConfig
|
| 30 |
+
|
| 31 |
+
def __init__(self, config: MptConfig):
|
| 32 |
+
config.hidden_size = config.d_model
|
| 33 |
+
super(LlavaMptModel, self).__init__(config)
|
| 34 |
+
|
| 35 |
+
def embed_tokens(self, x):
|
| 36 |
+
return self.wte(x)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class LlavaMptForCausalLM(MptForCausalLM, LlavaMetaForCausalLM):
|
| 40 |
+
config_class = LlavaMptConfig
|
| 41 |
+
supports_gradient_checkpointing = True
|
| 42 |
+
|
| 43 |
+
def __init__(self, config):
|
| 44 |
+
super(MptForCausalLM, self).__init__(config)
|
| 45 |
+
|
| 46 |
+
config.model_type = "llava_mpt"
|
| 47 |
+
config.rope_scaling = None
|
| 48 |
+
self.generation_config = GenerationConfig(
|
| 49 |
+
temperature=0.0,
|
| 50 |
+
max_new_tokens=1024,
|
| 51 |
+
do_sample=False,
|
| 52 |
+
top_p=None,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
self.transformer = LlavaMptModel(config)
|
| 56 |
+
self.lm_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 57 |
+
|
| 58 |
+
# Initialize weights and apply final processing
|
| 59 |
+
self.post_init()
|
| 60 |
+
|
| 61 |
+
def get_model(self):
|
| 62 |
+
return self.transformer
|
| 63 |
+
|
| 64 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
| 65 |
+
if isinstance(module, LlavaMptModel):
|
| 66 |
+
module.gradient_checkpointing = value
|
| 67 |
+
|
| 68 |
+
def forward(
|
| 69 |
+
self,
|
| 70 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 71 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
| 72 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 73 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 74 |
+
labels: Optional[torch.Tensor] = None,
|
| 75 |
+
use_cache: Optional[bool] = None,
|
| 76 |
+
output_attentions: Optional[bool] = None,
|
| 77 |
+
output_hidden_states: Optional[bool] = None,
|
| 78 |
+
return_dict: Optional[bool] = None,
|
| 79 |
+
cache_position=None,
|
| 80 |
+
images=None,
|
| 81 |
+
):
|
| 82 |
+
|
| 83 |
+
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
|
| 84 |
+
|
| 85 |
+
return super().forward(
|
| 86 |
+
input_ids,
|
| 87 |
+
past_key_values=past_key_values,
|
| 88 |
+
attention_mask=attention_mask,
|
| 89 |
+
inputs_embeds=inputs_embeds,
|
| 90 |
+
labels=labels,
|
| 91 |
+
use_cache=use_cache,
|
| 92 |
+
output_attentions=output_attentions,
|
| 93 |
+
output_hidden_states=output_hidden_states,
|
| 94 |
+
return_dict=return_dict,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
| 98 |
+
images = kwargs.pop("images", None)
|
| 99 |
+
_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
|
| 100 |
+
_inputs["images"] = images
|
| 101 |
+
return _inputs
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
AutoConfig.register("llava_mpt", LlavaMptConfig)
|
| 105 |
+
AutoModelForCausalLM.register(LlavaMptConfig, LlavaMptForCausalLM)
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-310.pyc
ADDED
|
Binary file (3.93 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_siglip2_ps8.cpython-310.pyc
ADDED
|
Binary file (54.9 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 2 |
+
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer
|
| 3 |
+
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
|
| 4 |
+
from .loss import ClipLoss
|
| 5 |
+
from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg, convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
|
| 6 |
+
from .openai import load_openai_model, list_openai_models
|
| 7 |
+
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model, get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
|
| 8 |
+
from .tokenizer import SimpleTokenizer, tokenize
|
| 9 |
+
from .transform import image_transform
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/eva_vit_model.py
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# Adapted from https://github.com/microsoft/unilm/tree/master/beit
|
| 3 |
+
# --------------------------------------------------------
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
| 12 |
+
except:
|
| 13 |
+
from timm.layers import drop_path, to_2tuple, trunc_normal_
|
| 14 |
+
|
| 15 |
+
from .transformer import PatchDropout
|
| 16 |
+
from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
|
| 17 |
+
|
| 18 |
+
if os.getenv("ENV_TYPE") == "deepspeed":
|
| 19 |
+
try:
|
| 20 |
+
from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
|
| 21 |
+
except:
|
| 22 |
+
from torch.utils.checkpoint import checkpoint
|
| 23 |
+
else:
|
| 24 |
+
from torch.utils.checkpoint import checkpoint
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
import xformers.ops as xops
|
| 28 |
+
except ImportError:
|
| 29 |
+
xops = None
|
| 30 |
+
# print("Please 'pip install xformers'")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class DropPath(nn.Module):
|
| 34 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 35 |
+
|
| 36 |
+
def __init__(self, drop_prob=None):
|
| 37 |
+
super(DropPath, self).__init__()
|
| 38 |
+
self.drop_prob = drop_prob
|
| 39 |
+
|
| 40 |
+
def forward(self, x):
|
| 41 |
+
return drop_path(x, self.drop_prob, self.training)
|
| 42 |
+
|
| 43 |
+
def extra_repr(self) -> str:
|
| 44 |
+
return "p={}".format(self.drop_prob)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class Mlp(nn.Module):
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
in_features,
|
| 51 |
+
hidden_features=None,
|
| 52 |
+
out_features=None,
|
| 53 |
+
act_layer=nn.GELU,
|
| 54 |
+
norm_layer=nn.LayerNorm,
|
| 55 |
+
drop=0.0,
|
| 56 |
+
subln=False,
|
| 57 |
+
):
|
| 58 |
+
super().__init__()
|
| 59 |
+
out_features = out_features or in_features
|
| 60 |
+
hidden_features = hidden_features or in_features
|
| 61 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 62 |
+
self.act = act_layer()
|
| 63 |
+
|
| 64 |
+
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
|
| 65 |
+
|
| 66 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 67 |
+
self.drop = nn.Dropout(drop)
|
| 68 |
+
|
| 69 |
+
def forward(self, x):
|
| 70 |
+
x = self.fc1(x)
|
| 71 |
+
x = self.act(x)
|
| 72 |
+
# x = self.drop(x)
|
| 73 |
+
# commit this for the orignal BERT implement
|
| 74 |
+
x = self.ffn_ln(x)
|
| 75 |
+
|
| 76 |
+
x = self.fc2(x)
|
| 77 |
+
x = self.drop(x)
|
| 78 |
+
return x
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class SwiGLU(nn.Module):
|
| 82 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.0, norm_layer=nn.LayerNorm, subln=False):
|
| 83 |
+
super().__init__()
|
| 84 |
+
out_features = out_features or in_features
|
| 85 |
+
hidden_features = hidden_features or in_features
|
| 86 |
+
|
| 87 |
+
self.w1 = nn.Linear(in_features, hidden_features)
|
| 88 |
+
self.w2 = nn.Linear(in_features, hidden_features)
|
| 89 |
+
|
| 90 |
+
self.act = act_layer()
|
| 91 |
+
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
|
| 92 |
+
self.w3 = nn.Linear(hidden_features, out_features)
|
| 93 |
+
|
| 94 |
+
self.drop = nn.Dropout(drop)
|
| 95 |
+
|
| 96 |
+
def forward(self, x):
|
| 97 |
+
x1 = self.w1(x)
|
| 98 |
+
x2 = self.w2(x)
|
| 99 |
+
hidden = self.act(x1) * x2
|
| 100 |
+
x = self.ffn_ln(hidden)
|
| 101 |
+
x = self.w3(x)
|
| 102 |
+
x = self.drop(x)
|
| 103 |
+
return x
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class Attention(nn.Module):
|
| 107 |
+
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
|
| 108 |
+
super().__init__()
|
| 109 |
+
self.num_heads = num_heads
|
| 110 |
+
head_dim = dim // num_heads
|
| 111 |
+
if attn_head_dim is not None:
|
| 112 |
+
head_dim = attn_head_dim
|
| 113 |
+
all_head_dim = head_dim * self.num_heads
|
| 114 |
+
self.scale = qk_scale or head_dim**-0.5
|
| 115 |
+
|
| 116 |
+
self.subln = subln
|
| 117 |
+
if self.subln:
|
| 118 |
+
self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 119 |
+
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 120 |
+
self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 121 |
+
else:
|
| 122 |
+
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
| 123 |
+
|
| 124 |
+
if qkv_bias:
|
| 125 |
+
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 126 |
+
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 127 |
+
else:
|
| 128 |
+
self.q_bias = None
|
| 129 |
+
self.v_bias = None
|
| 130 |
+
|
| 131 |
+
if window_size:
|
| 132 |
+
self.window_size = window_size
|
| 133 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 134 |
+
self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
| 135 |
+
# cls to token & token 2 cls & cls to cls
|
| 136 |
+
|
| 137 |
+
# get pair-wise relative position index for each token inside the window
|
| 138 |
+
coords_h = torch.arange(window_size[0])
|
| 139 |
+
coords_w = torch.arange(window_size[1])
|
| 140 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
| 141 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 142 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 143 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 144 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 145 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 146 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 147 |
+
relative_position_index = torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
| 148 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 149 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
| 150 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
| 151 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
| 152 |
+
|
| 153 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
| 154 |
+
else:
|
| 155 |
+
self.window_size = None
|
| 156 |
+
self.relative_position_bias_table = None
|
| 157 |
+
self.relative_position_index = None
|
| 158 |
+
|
| 159 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 160 |
+
self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
|
| 161 |
+
# self.proj = nn.Linear(all_head_dim, all_head_dim)
|
| 162 |
+
self.proj = nn.Linear(all_head_dim, dim)
|
| 163 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 164 |
+
self.xattn = xattn
|
| 165 |
+
self.xattn_drop = attn_drop
|
| 166 |
+
|
| 167 |
+
self.rope = rope
|
| 168 |
+
|
| 169 |
+
def forward(self, x, rel_pos_bias=None, attn_mask=None):
|
| 170 |
+
B, N, C = x.shape
|
| 171 |
+
if self.subln:
|
| 172 |
+
q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
|
| 173 |
+
k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
|
| 174 |
+
v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
|
| 175 |
+
|
| 176 |
+
q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
|
| 177 |
+
k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
|
| 178 |
+
v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
|
| 179 |
+
else:
|
| 180 |
+
|
| 181 |
+
qkv_bias = None
|
| 182 |
+
if self.q_bias is not None:
|
| 183 |
+
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
| 184 |
+
|
| 185 |
+
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
| 186 |
+
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
|
| 187 |
+
q, k, v = qkv[0], qkv[1], qkv[2]
|
| 188 |
+
|
| 189 |
+
if self.rope:
|
| 190 |
+
# slightly fast impl
|
| 191 |
+
q_t = q[:, :, 1:, :]
|
| 192 |
+
ro_q_t = self.rope(q_t)
|
| 193 |
+
q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
|
| 194 |
+
|
| 195 |
+
k_t = k[:, :, 1:, :]
|
| 196 |
+
ro_k_t = self.rope(k_t)
|
| 197 |
+
k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
|
| 198 |
+
|
| 199 |
+
if self.xattn:
|
| 200 |
+
q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
|
| 201 |
+
k = k.permute(0, 2, 1, 3)
|
| 202 |
+
v = v.permute(0, 2, 1, 3)
|
| 203 |
+
|
| 204 |
+
x = xops.memory_efficient_attention(
|
| 205 |
+
q,
|
| 206 |
+
k,
|
| 207 |
+
v,
|
| 208 |
+
p=self.xattn_drop,
|
| 209 |
+
scale=self.scale,
|
| 210 |
+
)
|
| 211 |
+
x = x.reshape(B, N, -1)
|
| 212 |
+
x = self.inner_attn_ln(x)
|
| 213 |
+
x = self.proj(x)
|
| 214 |
+
x = self.proj_drop(x)
|
| 215 |
+
else:
|
| 216 |
+
q = q * self.scale
|
| 217 |
+
attn = q @ k.transpose(-2, -1)
|
| 218 |
+
|
| 219 |
+
if self.relative_position_bias_table is not None:
|
| 220 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
| 221 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 222 |
+
attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
|
| 223 |
+
|
| 224 |
+
if rel_pos_bias is not None:
|
| 225 |
+
attn = attn + rel_pos_bias.type_as(attn)
|
| 226 |
+
|
| 227 |
+
if attn_mask is not None:
|
| 228 |
+
attn_mask = attn_mask.bool()
|
| 229 |
+
attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
|
| 230 |
+
|
| 231 |
+
attn = attn.softmax(dim=-1)
|
| 232 |
+
attn = self.attn_drop(attn)
|
| 233 |
+
|
| 234 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
| 235 |
+
x = self.inner_attn_ln(x)
|
| 236 |
+
x = self.proj(x)
|
| 237 |
+
x = self.proj_drop(x)
|
| 238 |
+
return x
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class Block(nn.Module):
|
| 242 |
+
|
| 243 |
+
def __init__(
|
| 244 |
+
self,
|
| 245 |
+
dim,
|
| 246 |
+
num_heads,
|
| 247 |
+
mlp_ratio=4.0,
|
| 248 |
+
qkv_bias=False,
|
| 249 |
+
qk_scale=None,
|
| 250 |
+
drop=0.0,
|
| 251 |
+
attn_drop=0.0,
|
| 252 |
+
drop_path=0.0,
|
| 253 |
+
init_values=None,
|
| 254 |
+
act_layer=nn.GELU,
|
| 255 |
+
norm_layer=nn.LayerNorm,
|
| 256 |
+
window_size=None,
|
| 257 |
+
attn_head_dim=None,
|
| 258 |
+
xattn=False,
|
| 259 |
+
rope=None,
|
| 260 |
+
postnorm=False,
|
| 261 |
+
subln=False,
|
| 262 |
+
naiveswiglu=False,
|
| 263 |
+
):
|
| 264 |
+
super().__init__()
|
| 265 |
+
self.norm1 = norm_layer(dim)
|
| 266 |
+
self.attn = Attention(
|
| 267 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer
|
| 268 |
+
)
|
| 269 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
| 270 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 271 |
+
self.norm2 = norm_layer(dim)
|
| 272 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 273 |
+
|
| 274 |
+
if naiveswiglu:
|
| 275 |
+
self.mlp = SwiGLU(
|
| 276 |
+
in_features=dim,
|
| 277 |
+
hidden_features=mlp_hidden_dim,
|
| 278 |
+
subln=subln,
|
| 279 |
+
norm_layer=norm_layer,
|
| 280 |
+
)
|
| 281 |
+
else:
|
| 282 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, subln=subln, drop=drop)
|
| 283 |
+
|
| 284 |
+
if init_values is not None and init_values > 0:
|
| 285 |
+
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
|
| 286 |
+
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
|
| 287 |
+
else:
|
| 288 |
+
self.gamma_1, self.gamma_2 = None, None
|
| 289 |
+
|
| 290 |
+
self.postnorm = postnorm
|
| 291 |
+
|
| 292 |
+
def forward(self, x, rel_pos_bias=None, attn_mask=None):
|
| 293 |
+
if self.gamma_1 is None:
|
| 294 |
+
if self.postnorm:
|
| 295 |
+
x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
|
| 296 |
+
x = x + self.drop_path(self.norm2(self.mlp(x)))
|
| 297 |
+
else:
|
| 298 |
+
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
|
| 299 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
| 300 |
+
else:
|
| 301 |
+
if self.postnorm:
|
| 302 |
+
x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
|
| 303 |
+
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
|
| 304 |
+
else:
|
| 305 |
+
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
|
| 306 |
+
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
| 307 |
+
return x
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class PatchEmbed(nn.Module):
|
| 311 |
+
"""Image to Patch Embedding"""
|
| 312 |
+
|
| 313 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
| 314 |
+
super().__init__()
|
| 315 |
+
img_size = to_2tuple(img_size)
|
| 316 |
+
patch_size = to_2tuple(patch_size)
|
| 317 |
+
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
| 318 |
+
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
|
| 319 |
+
self.img_size = img_size
|
| 320 |
+
self.patch_size = patch_size
|
| 321 |
+
self.num_patches = num_patches
|
| 322 |
+
|
| 323 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 324 |
+
|
| 325 |
+
def forward(self, x, **kwargs):
|
| 326 |
+
B, C, H, W = x.shape
|
| 327 |
+
# FIXME look at relaxing size constraints
|
| 328 |
+
assert H == self.img_size[0] and W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
| 329 |
+
x = self.proj(x).flatten(2).transpose(1, 2)
|
| 330 |
+
return x
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
class RelativePositionBias(nn.Module):
|
| 334 |
+
|
| 335 |
+
def __init__(self, window_size, num_heads):
|
| 336 |
+
super().__init__()
|
| 337 |
+
self.window_size = window_size
|
| 338 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 339 |
+
self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
| 340 |
+
# cls to token & token 2 cls & cls to cls
|
| 341 |
+
|
| 342 |
+
# get pair-wise relative position index for each token inside the window
|
| 343 |
+
coords_h = torch.arange(window_size[0])
|
| 344 |
+
coords_w = torch.arange(window_size[1])
|
| 345 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
| 346 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 347 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 348 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 349 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 350 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 351 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 352 |
+
relative_position_index = torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
| 353 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 354 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
| 355 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
| 356 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
| 357 |
+
|
| 358 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
| 359 |
+
|
| 360 |
+
def forward(self):
|
| 361 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
| 362 |
+
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class EVAVisionTransformer(nn.Module):
|
| 366 |
+
"""Vision Transformer with support for patch or hybrid CNN input stage"""
|
| 367 |
+
|
| 368 |
+
def __init__(
|
| 369 |
+
self,
|
| 370 |
+
img_size=224,
|
| 371 |
+
patch_size=16,
|
| 372 |
+
in_chans=3,
|
| 373 |
+
num_classes=1000,
|
| 374 |
+
embed_dim=768,
|
| 375 |
+
depth=12,
|
| 376 |
+
num_heads=12,
|
| 377 |
+
mlp_ratio=4.0,
|
| 378 |
+
qkv_bias=False,
|
| 379 |
+
qk_scale=None,
|
| 380 |
+
drop_rate=0.0,
|
| 381 |
+
attn_drop_rate=0.0,
|
| 382 |
+
drop_path_rate=0.0,
|
| 383 |
+
norm_layer=nn.LayerNorm,
|
| 384 |
+
init_values=None,
|
| 385 |
+
patch_dropout=0.0,
|
| 386 |
+
use_abs_pos_emb=True,
|
| 387 |
+
use_rel_pos_bias=False,
|
| 388 |
+
use_shared_rel_pos_bias=False,
|
| 389 |
+
rope=False,
|
| 390 |
+
use_mean_pooling=True,
|
| 391 |
+
init_scale=0.001,
|
| 392 |
+
grad_checkpointing=False,
|
| 393 |
+
xattn=False,
|
| 394 |
+
postnorm=False,
|
| 395 |
+
pt_hw_seq_len=16,
|
| 396 |
+
intp_freq=False,
|
| 397 |
+
naiveswiglu=False,
|
| 398 |
+
subln=False,
|
| 399 |
+
):
|
| 400 |
+
super().__init__()
|
| 401 |
+
self.image_size = img_size
|
| 402 |
+
self.num_classes = num_classes
|
| 403 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 404 |
+
|
| 405 |
+
self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 406 |
+
num_patches = self.patch_embed.num_patches
|
| 407 |
+
|
| 408 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 409 |
+
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 410 |
+
if use_abs_pos_emb:
|
| 411 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
| 412 |
+
else:
|
| 413 |
+
self.pos_embed = None
|
| 414 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
| 415 |
+
|
| 416 |
+
if use_shared_rel_pos_bias:
|
| 417 |
+
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
|
| 418 |
+
else:
|
| 419 |
+
self.rel_pos_bias = None
|
| 420 |
+
|
| 421 |
+
if rope:
|
| 422 |
+
half_head_dim = embed_dim // num_heads // 2
|
| 423 |
+
hw_seq_len = img_size // patch_size
|
| 424 |
+
self.rope = VisionRotaryEmbeddingFast(
|
| 425 |
+
dim=half_head_dim,
|
| 426 |
+
pt_seq_len=pt_hw_seq_len,
|
| 427 |
+
ft_seq_len=hw_seq_len if intp_freq else None,
|
| 428 |
+
# patch_dropout=patch_dropout
|
| 429 |
+
)
|
| 430 |
+
else:
|
| 431 |
+
self.rope = None
|
| 432 |
+
|
| 433 |
+
self.naiveswiglu = naiveswiglu
|
| 434 |
+
|
| 435 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 436 |
+
self.use_rel_pos_bias = use_rel_pos_bias
|
| 437 |
+
self.blocks = nn.ModuleList(
|
| 438 |
+
[
|
| 439 |
+
Block(
|
| 440 |
+
dim=embed_dim,
|
| 441 |
+
num_heads=num_heads,
|
| 442 |
+
mlp_ratio=mlp_ratio,
|
| 443 |
+
qkv_bias=qkv_bias,
|
| 444 |
+
qk_scale=qk_scale,
|
| 445 |
+
drop=drop_rate,
|
| 446 |
+
attn_drop=attn_drop_rate,
|
| 447 |
+
drop_path=dpr[i],
|
| 448 |
+
norm_layer=norm_layer,
|
| 449 |
+
init_values=init_values,
|
| 450 |
+
window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
|
| 451 |
+
xattn=xattn,
|
| 452 |
+
rope=self.rope,
|
| 453 |
+
postnorm=postnorm,
|
| 454 |
+
subln=subln,
|
| 455 |
+
naiveswiglu=naiveswiglu,
|
| 456 |
+
)
|
| 457 |
+
for i in range(depth)
|
| 458 |
+
]
|
| 459 |
+
)
|
| 460 |
+
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
|
| 461 |
+
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
|
| 462 |
+
self.head = nn.Linear(embed_dim, num_classes, bias=qkv_bias) if num_classes > 0 else nn.Identity()
|
| 463 |
+
|
| 464 |
+
if self.pos_embed is not None:
|
| 465 |
+
trunc_normal_(self.pos_embed, std=0.02)
|
| 466 |
+
|
| 467 |
+
trunc_normal_(self.cls_token, std=0.02)
|
| 468 |
+
|
| 469 |
+
self.apply(self._init_weights)
|
| 470 |
+
self.fix_init_weight()
|
| 471 |
+
|
| 472 |
+
if isinstance(self.head, nn.Linear):
|
| 473 |
+
trunc_normal_(self.head.weight, std=0.02)
|
| 474 |
+
self.head.weight.data.mul_(init_scale)
|
| 475 |
+
if self.head.bias is not None:
|
| 476 |
+
self.head.bias.data.mul_(init_scale)
|
| 477 |
+
|
| 478 |
+
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
|
| 479 |
+
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0.0 else nn.Identity()
|
| 480 |
+
|
| 481 |
+
self.grad_checkpointing = grad_checkpointing
|
| 482 |
+
|
| 483 |
+
def fix_init_weight(self):
|
| 484 |
+
def rescale(param, layer_id):
|
| 485 |
+
param.div_(math.sqrt(2.0 * layer_id))
|
| 486 |
+
|
| 487 |
+
for layer_id, layer in enumerate(self.blocks):
|
| 488 |
+
rescale(layer.attn.proj.weight.data, layer_id + 1)
|
| 489 |
+
if self.naiveswiglu:
|
| 490 |
+
rescale(layer.mlp.w3.weight.data, layer_id + 1)
|
| 491 |
+
else:
|
| 492 |
+
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
|
| 493 |
+
|
| 494 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 495 |
+
return self.blocks[0].mlp.fc2.weight.dtype
|
| 496 |
+
|
| 497 |
+
def _init_weights(self, m):
|
| 498 |
+
if isinstance(m, nn.Linear):
|
| 499 |
+
trunc_normal_(m.weight, std=0.02)
|
| 500 |
+
if m.bias is not None:
|
| 501 |
+
nn.init.constant_(m.bias, 0)
|
| 502 |
+
elif isinstance(m, nn.LayerNorm):
|
| 503 |
+
nn.init.constant_(m.bias, 0)
|
| 504 |
+
nn.init.constant_(m.weight, 1.0)
|
| 505 |
+
|
| 506 |
+
def get_num_layers(self):
|
| 507 |
+
return len(self.blocks)
|
| 508 |
+
|
| 509 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 510 |
+
assert unlocked_groups == 0, "partial locking not currently supported for this model"
|
| 511 |
+
for param in self.parameters():
|
| 512 |
+
param.requires_grad = False
|
| 513 |
+
|
| 514 |
+
@torch.jit.ignore
|
| 515 |
+
def set_grad_checkpointing(self, enable=True):
|
| 516 |
+
self.grad_checkpointing = enable
|
| 517 |
+
|
| 518 |
+
@torch.jit.ignore
|
| 519 |
+
def no_weight_decay(self):
|
| 520 |
+
return {"pos_embed", "cls_token"}
|
| 521 |
+
|
| 522 |
+
def get_classifier(self):
|
| 523 |
+
return self.head
|
| 524 |
+
|
| 525 |
+
def reset_classifier(self, num_classes, global_pool=""):
|
| 526 |
+
self.num_classes = num_classes
|
| 527 |
+
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
| 528 |
+
|
| 529 |
+
def forward_features(self, x, return_all_features=False):
|
| 530 |
+
|
| 531 |
+
x = self.patch_embed(x)
|
| 532 |
+
batch_size, seq_len, _ = x.size()
|
| 533 |
+
|
| 534 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
| 535 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 536 |
+
if self.pos_embed is not None:
|
| 537 |
+
x = x + self.pos_embed
|
| 538 |
+
x = self.pos_drop(x)
|
| 539 |
+
|
| 540 |
+
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
|
| 541 |
+
# if os.getenv("RoPE") == "1":
|
| 542 |
+
# if self.training and not isinstance(self.patch_dropout, nn.Identity):
|
| 543 |
+
# x, patch_indices_keep = self.patch_dropout(x)
|
| 544 |
+
# self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)
|
| 545 |
+
# else:
|
| 546 |
+
# self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)
|
| 547 |
+
# x = self.patch_dropout(x)
|
| 548 |
+
# else:
|
| 549 |
+
x = self.patch_dropout(x)
|
| 550 |
+
|
| 551 |
+
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
| 552 |
+
for blk in self.blocks:
|
| 553 |
+
if self.grad_checkpointing:
|
| 554 |
+
x = checkpoint(blk, x, (rel_pos_bias,))
|
| 555 |
+
else:
|
| 556 |
+
x = blk(x, rel_pos_bias=rel_pos_bias)
|
| 557 |
+
|
| 558 |
+
if not return_all_features:
|
| 559 |
+
x = self.norm(x)
|
| 560 |
+
if self.fc_norm is not None:
|
| 561 |
+
return self.fc_norm(x.mean(1))
|
| 562 |
+
else:
|
| 563 |
+
return x[:, 0]
|
| 564 |
+
return x
|
| 565 |
+
|
| 566 |
+
def forward(self, x, return_all_features=False):
|
| 567 |
+
if return_all_features:
|
| 568 |
+
return self.forward_features(x, return_all_features)
|
| 569 |
+
x = self.forward_features(x)
|
| 570 |
+
x = self.head(x)
|
| 571 |
+
return x
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_configs.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HF architecture dict:
|
| 2 |
+
arch_dict = {
|
| 3 |
+
# https://huggingface.co/docs/transformers/model_doc/roberta#roberta
|
| 4 |
+
"roberta": {
|
| 5 |
+
"config_names": {
|
| 6 |
+
"context_length": "max_position_embeddings",
|
| 7 |
+
"vocab_size": "vocab_size",
|
| 8 |
+
"width": "hidden_size",
|
| 9 |
+
"heads": "num_attention_heads",
|
| 10 |
+
"layers": "num_hidden_layers",
|
| 11 |
+
"layer_attr": "layer",
|
| 12 |
+
"token_embeddings_attr": "embeddings",
|
| 13 |
+
},
|
| 14 |
+
"pooler": "mean_pooler",
|
| 15 |
+
},
|
| 16 |
+
# https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
|
| 17 |
+
"xlm-roberta": {
|
| 18 |
+
"config_names": {
|
| 19 |
+
"context_length": "max_position_embeddings",
|
| 20 |
+
"vocab_size": "vocab_size",
|
| 21 |
+
"width": "hidden_size",
|
| 22 |
+
"heads": "num_attention_heads",
|
| 23 |
+
"layers": "num_hidden_layers",
|
| 24 |
+
"layer_attr": "layer",
|
| 25 |
+
"token_embeddings_attr": "embeddings",
|
| 26 |
+
},
|
| 27 |
+
"pooler": "mean_pooler",
|
| 28 |
+
},
|
| 29 |
+
# https://huggingface.co/docs/transformers/model_doc/mt5#mt5
|
| 30 |
+
"mt5": {
|
| 31 |
+
"config_names": {
|
| 32 |
+
# unlimited seqlen
|
| 33 |
+
# https://github.com/google-research/text-to-text-transfer-transformer/issues/273
|
| 34 |
+
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
|
| 35 |
+
"context_length": "",
|
| 36 |
+
"vocab_size": "vocab_size",
|
| 37 |
+
"width": "d_model",
|
| 38 |
+
"heads": "num_heads",
|
| 39 |
+
"layers": "num_layers",
|
| 40 |
+
"layer_attr": "block",
|
| 41 |
+
"token_embeddings_attr": "embed_tokens",
|
| 42 |
+
},
|
| 43 |
+
"pooler": "mean_pooler",
|
| 44 |
+
},
|
| 45 |
+
"bert": {
|
| 46 |
+
"config_names": {
|
| 47 |
+
"context_length": "max_position_embeddings",
|
| 48 |
+
"vocab_size": "vocab_size",
|
| 49 |
+
"width": "hidden_size",
|
| 50 |
+
"heads": "num_attention_heads",
|
| 51 |
+
"layers": "num_hidden_layers",
|
| 52 |
+
"layer_attr": "layer",
|
| 53 |
+
"token_embeddings_attr": "embeddings",
|
| 54 |
+
},
|
| 55 |
+
"pooler": "mean_pooler",
|
| 56 |
+
},
|
| 57 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model.py
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" CLIP Model
|
| 2 |
+
|
| 3 |
+
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Optional, Tuple, Union
|
| 9 |
+
from functools import partial
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from torch import nn
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from .hf_model import HFTextEncoder
|
| 18 |
+
except:
|
| 19 |
+
HFTextEncoder = None
|
| 20 |
+
from .modified_resnet import ModifiedResNet
|
| 21 |
+
from .timm_model import TimmModel
|
| 22 |
+
from .eva_vit_model import EVAVisionTransformer
|
| 23 |
+
from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
from apex.normalization import FusedLayerNorm
|
| 27 |
+
except:
|
| 28 |
+
FusedLayerNorm = LayerNorm
|
| 29 |
+
# print("Please 'pip install apex'")
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
import xformers.ops as xops
|
| 33 |
+
except ImportError:
|
| 34 |
+
xops = None
|
| 35 |
+
# print("Please 'pip install xformers'")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class RMSnorm(nn.Module):
|
| 39 |
+
"""
|
| 40 |
+
adepted from transformers T5LayerNorm
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 44 |
+
"""
|
| 45 |
+
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
|
| 46 |
+
"""
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 49 |
+
self.variance_epsilon = eps
|
| 50 |
+
|
| 51 |
+
def forward(self, hidden_states):
|
| 52 |
+
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
|
| 53 |
+
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
|
| 54 |
+
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
|
| 55 |
+
# half-precision inputs is done in fp32
|
| 56 |
+
|
| 57 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
| 58 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 59 |
+
|
| 60 |
+
# convert into half-precision if necessary
|
| 61 |
+
if self.weight.dtype in [torch.float16, torch.bfloat16]:
|
| 62 |
+
hidden_states = hidden_states.to(self.weight.dtype)
|
| 63 |
+
|
| 64 |
+
return self.weight * hidden_states
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@dataclass
|
| 68 |
+
class CLIPVisionCfg:
|
| 69 |
+
layers: Union[Tuple[int, int, int, int], int] = 12
|
| 70 |
+
width: int = 768
|
| 71 |
+
head_width: int = 64
|
| 72 |
+
mlp_ratio: float = 4.0
|
| 73 |
+
patch_size: int = 16
|
| 74 |
+
image_size: Union[Tuple[int, int], int] = 224
|
| 75 |
+
ls_init_value: Optional[float] = None # layer scale initial value
|
| 76 |
+
patch_dropout: float = 0.0 # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
|
| 77 |
+
global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
|
| 78 |
+
drop_path_rate: Optional[float] = None # drop path rate
|
| 79 |
+
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
|
| 80 |
+
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
|
| 81 |
+
timm_pool: str = "avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
|
| 82 |
+
timm_proj: str = "linear" # linear projection for timm model output ('linear', 'mlp', '')
|
| 83 |
+
timm_proj_bias: bool = False # enable bias final projection
|
| 84 |
+
eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
|
| 85 |
+
qkv_bias: bool = True
|
| 86 |
+
fusedLN: bool = False
|
| 87 |
+
xattn: bool = False
|
| 88 |
+
postnorm: bool = False
|
| 89 |
+
rope: bool = False
|
| 90 |
+
pt_hw_seq_len: int = 16 # 224/14
|
| 91 |
+
intp_freq: bool = False
|
| 92 |
+
naiveswiglu: bool = False
|
| 93 |
+
subln: bool = False
|
| 94 |
+
use_rms_norm: bool = False
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclass
|
| 98 |
+
class CLIPTextCfg:
|
| 99 |
+
context_length: int = 77
|
| 100 |
+
vocab_size: int = 49408
|
| 101 |
+
width: int = 512
|
| 102 |
+
heads: int = 8
|
| 103 |
+
layers: int = 12
|
| 104 |
+
ls_init_value: Optional[float] = None # layer scale initial value
|
| 105 |
+
hf_model_name: str = None
|
| 106 |
+
hf_tokenizer_name: str = None
|
| 107 |
+
hf_model_pretrained: bool = True
|
| 108 |
+
proj: str = "mlp"
|
| 109 |
+
pooler_type: str = "mean_pooler"
|
| 110 |
+
masked_language_modeling: bool = False
|
| 111 |
+
fusedLN: bool = False
|
| 112 |
+
xattn: bool = False
|
| 113 |
+
attn_mask: bool = True
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_cast_dtype(precision: str):
|
| 117 |
+
cast_dtype = None
|
| 118 |
+
if precision == "bf16":
|
| 119 |
+
cast_dtype = torch.bfloat16
|
| 120 |
+
elif precision == "fp16":
|
| 121 |
+
cast_dtype = torch.float16
|
| 122 |
+
return cast_dtype
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _build_vision_tower(embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None):
|
| 126 |
+
if isinstance(vision_cfg, dict):
|
| 127 |
+
vision_cfg = CLIPVisionCfg(**vision_cfg)
|
| 128 |
+
|
| 129 |
+
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
|
| 130 |
+
# memory efficient in recent PyTorch releases (>= 1.10).
|
| 131 |
+
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
|
| 132 |
+
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 133 |
+
|
| 134 |
+
if vision_cfg.eva_model_name:
|
| 135 |
+
vision_heads = vision_cfg.width // vision_cfg.head_width
|
| 136 |
+
|
| 137 |
+
norm_layer = RMSnorm if vision_cfg.use_rms_norm else LayerNorm
|
| 138 |
+
|
| 139 |
+
visual = EVAVisionTransformer(
|
| 140 |
+
img_size=vision_cfg.image_size,
|
| 141 |
+
patch_size=vision_cfg.patch_size,
|
| 142 |
+
num_classes=embed_dim,
|
| 143 |
+
use_mean_pooling=vision_cfg.global_average_pool, # False
|
| 144 |
+
init_values=vision_cfg.ls_init_value,
|
| 145 |
+
patch_dropout=vision_cfg.patch_dropout,
|
| 146 |
+
embed_dim=vision_cfg.width,
|
| 147 |
+
depth=vision_cfg.layers,
|
| 148 |
+
num_heads=vision_heads,
|
| 149 |
+
mlp_ratio=vision_cfg.mlp_ratio,
|
| 150 |
+
qkv_bias=vision_cfg.qkv_bias,
|
| 151 |
+
drop_path_rate=vision_cfg.drop_path_rate,
|
| 152 |
+
norm_layer=partial(norm_layer, eps=1e-6),
|
| 153 |
+
xattn=vision_cfg.xattn,
|
| 154 |
+
rope=vision_cfg.rope,
|
| 155 |
+
postnorm=vision_cfg.postnorm,
|
| 156 |
+
pt_hw_seq_len=vision_cfg.pt_hw_seq_len, # 224/14
|
| 157 |
+
intp_freq=vision_cfg.intp_freq,
|
| 158 |
+
naiveswiglu=vision_cfg.naiveswiglu,
|
| 159 |
+
subln=vision_cfg.subln,
|
| 160 |
+
)
|
| 161 |
+
elif vision_cfg.timm_model_name:
|
| 162 |
+
visual = TimmModel(
|
| 163 |
+
vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, embed_dim=embed_dim, image_size=vision_cfg.image_size
|
| 164 |
+
)
|
| 165 |
+
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
|
| 166 |
+
elif isinstance(vision_cfg.layers, (tuple, list)):
|
| 167 |
+
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
|
| 168 |
+
visual = ModifiedResNet(layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width)
|
| 169 |
+
else:
|
| 170 |
+
vision_heads = vision_cfg.width // vision_cfg.head_width
|
| 171 |
+
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 172 |
+
visual = VisionTransformer(
|
| 173 |
+
image_size=vision_cfg.image_size,
|
| 174 |
+
patch_size=vision_cfg.patch_size,
|
| 175 |
+
width=vision_cfg.width,
|
| 176 |
+
layers=vision_cfg.layers,
|
| 177 |
+
heads=vision_heads,
|
| 178 |
+
mlp_ratio=vision_cfg.mlp_ratio,
|
| 179 |
+
ls_init_value=vision_cfg.ls_init_value,
|
| 180 |
+
patch_dropout=vision_cfg.patch_dropout,
|
| 181 |
+
global_average_pool=vision_cfg.global_average_pool,
|
| 182 |
+
output_dim=embed_dim,
|
| 183 |
+
act_layer=act_layer,
|
| 184 |
+
norm_layer=norm_layer,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
return visual
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _build_text_tower(
|
| 191 |
+
embed_dim: int,
|
| 192 |
+
text_cfg: CLIPTextCfg,
|
| 193 |
+
quick_gelu: bool = False,
|
| 194 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 195 |
+
):
|
| 196 |
+
if isinstance(text_cfg, dict):
|
| 197 |
+
text_cfg = CLIPTextCfg(**text_cfg)
|
| 198 |
+
|
| 199 |
+
if text_cfg.hf_model_name:
|
| 200 |
+
text = HFTextEncoder(text_cfg.hf_model_name, output_dim=embed_dim, tokenizer_name=text_cfg.hf_tokenizer_name, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, masked_language_modeling=text_cfg.masked_language_modeling)
|
| 201 |
+
else:
|
| 202 |
+
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 203 |
+
norm_layer = LayerNorm
|
| 204 |
+
|
| 205 |
+
text = TextTransformer(
|
| 206 |
+
context_length=text_cfg.context_length,
|
| 207 |
+
vocab_size=text_cfg.vocab_size,
|
| 208 |
+
width=text_cfg.width,
|
| 209 |
+
heads=text_cfg.heads,
|
| 210 |
+
layers=text_cfg.layers,
|
| 211 |
+
ls_init_value=text_cfg.ls_init_value,
|
| 212 |
+
output_dim=embed_dim,
|
| 213 |
+
act_layer=act_layer,
|
| 214 |
+
norm_layer=FusedLayerNorm if text_cfg.fusedLN else norm_layer,
|
| 215 |
+
xattn=text_cfg.xattn,
|
| 216 |
+
attn_mask=text_cfg.attn_mask,
|
| 217 |
+
)
|
| 218 |
+
return text
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
class CLIP(nn.Module):
|
| 222 |
+
def __init__(
|
| 223 |
+
self,
|
| 224 |
+
embed_dim: int,
|
| 225 |
+
vision_cfg: CLIPVisionCfg,
|
| 226 |
+
text_cfg: CLIPTextCfg,
|
| 227 |
+
quick_gelu: bool = False,
|
| 228 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 229 |
+
):
|
| 230 |
+
super().__init__()
|
| 231 |
+
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 232 |
+
|
| 233 |
+
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 234 |
+
self.transformer = text.transformer
|
| 235 |
+
self.vocab_size = text.vocab_size
|
| 236 |
+
self.token_embedding = text.token_embedding
|
| 237 |
+
self.positional_embedding = text.positional_embedding
|
| 238 |
+
self.ln_final = text.ln_final
|
| 239 |
+
self.text_projection = text.text_projection
|
| 240 |
+
self.register_buffer("attn_mask", text.attn_mask, persistent=False)
|
| 241 |
+
|
| 242 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 243 |
+
|
| 244 |
+
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 245 |
+
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 246 |
+
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 247 |
+
|
| 248 |
+
@torch.jit.ignore
|
| 249 |
+
def set_grad_checkpointing(self, enable=True):
|
| 250 |
+
self.visual.set_grad_checkpointing(enable)
|
| 251 |
+
self.transformer.grad_checkpointing = enable
|
| 252 |
+
|
| 253 |
+
@torch.jit.ignore
|
| 254 |
+
def no_weight_decay(self):
|
| 255 |
+
return {"logit_scale"}
|
| 256 |
+
|
| 257 |
+
def encode_image(self, image, normalize: bool = False):
|
| 258 |
+
features = self.visual(image)
|
| 259 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 260 |
+
|
| 261 |
+
def encode_text(self, text, normalize: bool = False):
|
| 262 |
+
cast_dtype = self.transformer.get_cast_dtype()
|
| 263 |
+
|
| 264 |
+
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 265 |
+
|
| 266 |
+
x = x + self.positional_embedding.to(cast_dtype)
|
| 267 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 268 |
+
x = self.transformer(x, attn_mask=self.attn_mask)
|
| 269 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 270 |
+
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
|
| 271 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 272 |
+
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 273 |
+
return F.normalize(x, dim=-1) if normalize else x
|
| 274 |
+
|
| 275 |
+
def forward(self, image, text):
|
| 276 |
+
image_features = self.encode_image(image, normalize=True)
|
| 277 |
+
text_features = self.encode_text(text, normalize=True)
|
| 278 |
+
return image_features, text_features, self.logit_scale.exp()
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class CustomCLIP(nn.Module):
|
| 282 |
+
def __init__(
|
| 283 |
+
self,
|
| 284 |
+
embed_dim: int,
|
| 285 |
+
vision_cfg: CLIPVisionCfg,
|
| 286 |
+
text_cfg: CLIPTextCfg,
|
| 287 |
+
quick_gelu: bool = False,
|
| 288 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 289 |
+
itm_task: bool = False,
|
| 290 |
+
):
|
| 291 |
+
super().__init__()
|
| 292 |
+
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 293 |
+
self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 294 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 295 |
+
|
| 296 |
+
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 297 |
+
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 298 |
+
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 299 |
+
|
| 300 |
+
def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
|
| 301 |
+
self.text.lock(unlocked_layers, freeze_layer_norm)
|
| 302 |
+
|
| 303 |
+
@torch.jit.ignore
|
| 304 |
+
def set_grad_checkpointing(self, enable=True):
|
| 305 |
+
self.visual.set_grad_checkpointing(enable)
|
| 306 |
+
self.text.set_grad_checkpointing(enable)
|
| 307 |
+
|
| 308 |
+
@torch.jit.ignore
|
| 309 |
+
def no_weight_decay(self):
|
| 310 |
+
return {"logit_scale"}
|
| 311 |
+
|
| 312 |
+
def encode_image(self, image, normalize: bool = False):
|
| 313 |
+
features = self.visual(image)
|
| 314 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 315 |
+
|
| 316 |
+
def encode_text(self, text, normalize: bool = False):
|
| 317 |
+
features = self.text(text)
|
| 318 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 319 |
+
|
| 320 |
+
def forward(self, image, text):
|
| 321 |
+
image_features = self.encode_image(image, normalize=True)
|
| 322 |
+
text_features = self.encode_text(text, normalize=True)
|
| 323 |
+
return image_features, text_features, self.logit_scale.exp()
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
|
| 327 |
+
"""Convert applicable model parameters to low-precision (bf16 or fp16)"""
|
| 328 |
+
|
| 329 |
+
def _convert_weights(l):
|
| 330 |
+
|
| 331 |
+
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
| 332 |
+
l.weight.data = l.weight.data.to(dtype)
|
| 333 |
+
if l.bias is not None:
|
| 334 |
+
l.bias.data = l.bias.data.to(dtype)
|
| 335 |
+
|
| 336 |
+
if isinstance(l, (nn.MultiheadAttention, Attention)):
|
| 337 |
+
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
| 338 |
+
tensor = getattr(l, attr, None)
|
| 339 |
+
if tensor is not None:
|
| 340 |
+
tensor.data = tensor.data.to(dtype)
|
| 341 |
+
|
| 342 |
+
if isinstance(l, nn.Parameter):
|
| 343 |
+
l.data = l.data.to(dtype)
|
| 344 |
+
|
| 345 |
+
for name in ["text_projection", "proj"]:
|
| 346 |
+
if hasattr(l, name) and isinstance(l, nn.Parameter):
|
| 347 |
+
attr = getattr(l, name, None)
|
| 348 |
+
if attr is not None:
|
| 349 |
+
attr.data = attr.data.to(dtype)
|
| 350 |
+
|
| 351 |
+
model.apply(_convert_weights)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
# used to maintain checkpoint compatibility
|
| 358 |
+
def convert_to_custom_text_state_dict(state_dict: dict):
|
| 359 |
+
if "text_projection" in state_dict:
|
| 360 |
+
# old format state_dict, move text tower -> .text
|
| 361 |
+
new_state_dict = {}
|
| 362 |
+
for k, v in state_dict.items():
|
| 363 |
+
if any(k.startswith(p) for p in ("text_projection", "positional_embedding", "token_embedding", "transformer", "ln_final", "logit_scale")):
|
| 364 |
+
k = "text." + k
|
| 365 |
+
new_state_dict[k] = v
|
| 366 |
+
return new_state_dict
|
| 367 |
+
return state_dict
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def build_model_from_openai_state_dict(
|
| 371 |
+
state_dict: dict,
|
| 372 |
+
quick_gelu=True,
|
| 373 |
+
cast_dtype=torch.float16,
|
| 374 |
+
):
|
| 375 |
+
vit = "visual.proj" in state_dict
|
| 376 |
+
|
| 377 |
+
if vit:
|
| 378 |
+
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 379 |
+
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 380 |
+
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 381 |
+
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 382 |
+
image_size = vision_patch_size * grid_size
|
| 383 |
+
else:
|
| 384 |
+
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
| 385 |
+
vision_layers = tuple(counts)
|
| 386 |
+
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
| 387 |
+
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 388 |
+
vision_patch_size = None
|
| 389 |
+
assert output_width**2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
| 390 |
+
image_size = output_width * 32
|
| 391 |
+
|
| 392 |
+
embed_dim = state_dict["text_projection"].shape[1]
|
| 393 |
+
context_length = state_dict["positional_embedding"].shape[0]
|
| 394 |
+
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 395 |
+
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 396 |
+
transformer_heads = transformer_width // 64
|
| 397 |
+
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
| 398 |
+
|
| 399 |
+
vision_cfg = CLIPVisionCfg(
|
| 400 |
+
layers=vision_layers,
|
| 401 |
+
width=vision_width,
|
| 402 |
+
patch_size=vision_patch_size,
|
| 403 |
+
image_size=image_size,
|
| 404 |
+
)
|
| 405 |
+
text_cfg = CLIPTextCfg(context_length=context_length, vocab_size=vocab_size, width=transformer_width, heads=transformer_heads, layers=transformer_layers)
|
| 406 |
+
model = CLIP(
|
| 407 |
+
embed_dim,
|
| 408 |
+
vision_cfg=vision_cfg,
|
| 409 |
+
text_cfg=text_cfg,
|
| 410 |
+
quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
|
| 411 |
+
cast_dtype=cast_dtype,
|
| 412 |
+
)
|
| 413 |
+
|
| 414 |
+
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 415 |
+
state_dict.pop(key, None)
|
| 416 |
+
|
| 417 |
+
convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
|
| 418 |
+
model.load_state_dict(state_dict)
|
| 419 |
+
return model.eval()
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def trace_model(model, batch_size=256, device=torch.device("cpu")):
|
| 423 |
+
model.eval()
|
| 424 |
+
image_size = model.visual.image_size
|
| 425 |
+
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
|
| 426 |
+
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
|
| 427 |
+
model = torch.jit.trace_module(model, inputs=dict(forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,)))
|
| 428 |
+
model.visual.image_size = image_size
|
| 429 |
+
return model
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 40,
|
| 6 |
+
"width": 1408,
|
| 7 |
+
"head_width": 88,
|
| 8 |
+
"mlp_ratio": 4.3637,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-g-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true
|
| 14 |
+
},
|
| 15 |
+
"text_cfg": {
|
| 16 |
+
"context_length": 77,
|
| 17 |
+
"vocab_size": 49408,
|
| 18 |
+
"width": 1024,
|
| 19 |
+
"heads": 16,
|
| 20 |
+
"layers": 24,
|
| 21 |
+
"xattn": false,
|
| 22 |
+
"fusedLN": true
|
| 23 |
+
}
|
| 24 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-B-16.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 512,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 12,
|
| 6 |
+
"width": 768,
|
| 7 |
+
"head_width": 64,
|
| 8 |
+
"patch_size": 16,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"eva_model_name": "eva-clip-b-16-X",
|
| 11 |
+
"drop_path_rate": 0.0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 512,
|
| 24 |
+
"heads": 8,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": true,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 768,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 24,
|
| 6 |
+
"width": 1024,
|
| 7 |
+
"drop_path_rate": 0,
|
| 8 |
+
"head_width": 64,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"patch_size": 14,
|
| 11 |
+
"eva_model_name": "eva-clip-l-14",
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 768,
|
| 24 |
+
"heads": 12,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": false,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 64,
|
| 6 |
+
"width": 1792,
|
| 7 |
+
"head_width": 112,
|
| 8 |
+
"mlp_ratio": 8.571428571428571,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-4b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"postnorm": true,
|
| 14 |
+
"fusedLN": true
|
| 15 |
+
},
|
| 16 |
+
"text_cfg": {
|
| 17 |
+
"context_length": 77,
|
| 18 |
+
"vocab_size": 49408,
|
| 19 |
+
"width": 1280,
|
| 20 |
+
"heads": 20,
|
| 21 |
+
"layers": 32,
|
| 22 |
+
"xattn": false,
|
| 23 |
+
"fusedLN": true
|
| 24 |
+
}
|
| 25 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/modified_resnet.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from .utils import freeze_batch_norm_2d
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Bottleneck(nn.Module):
|
| 11 |
+
expansion = 4
|
| 12 |
+
|
| 13 |
+
def __init__(self, inplanes, planes, stride=1):
|
| 14 |
+
super().__init__()
|
| 15 |
+
|
| 16 |
+
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
| 17 |
+
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
| 18 |
+
self.bn1 = nn.BatchNorm2d(planes)
|
| 19 |
+
self.act1 = nn.ReLU(inplace=True)
|
| 20 |
+
|
| 21 |
+
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
| 22 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 23 |
+
self.act2 = nn.ReLU(inplace=True)
|
| 24 |
+
|
| 25 |
+
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
| 26 |
+
|
| 27 |
+
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
| 28 |
+
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| 29 |
+
self.act3 = nn.ReLU(inplace=True)
|
| 30 |
+
|
| 31 |
+
self.downsample = None
|
| 32 |
+
self.stride = stride
|
| 33 |
+
|
| 34 |
+
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
| 35 |
+
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
| 36 |
+
self.downsample = nn.Sequential(OrderedDict([("-1", nn.AvgPool2d(stride)), ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), ("1", nn.BatchNorm2d(planes * self.expansion))]))
|
| 37 |
+
|
| 38 |
+
def forward(self, x: torch.Tensor):
|
| 39 |
+
identity = x
|
| 40 |
+
|
| 41 |
+
out = self.act1(self.bn1(self.conv1(x)))
|
| 42 |
+
out = self.act2(self.bn2(self.conv2(out)))
|
| 43 |
+
out = self.avgpool(out)
|
| 44 |
+
out = self.bn3(self.conv3(out))
|
| 45 |
+
|
| 46 |
+
if self.downsample is not None:
|
| 47 |
+
identity = self.downsample(x)
|
| 48 |
+
|
| 49 |
+
out += identity
|
| 50 |
+
out = self.act3(out)
|
| 51 |
+
return out
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class AttentionPool2d(nn.Module):
|
| 55 |
+
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
| 56 |
+
super().__init__()
|
| 57 |
+
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
|
| 58 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
| 59 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
| 60 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
| 61 |
+
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
| 62 |
+
self.num_heads = num_heads
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
|
| 66 |
+
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
| 67 |
+
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
| 68 |
+
x, _ = F.multi_head_attention_forward(
|
| 69 |
+
query=x,
|
| 70 |
+
key=x,
|
| 71 |
+
value=x,
|
| 72 |
+
embed_dim_to_check=x.shape[-1],
|
| 73 |
+
num_heads=self.num_heads,
|
| 74 |
+
q_proj_weight=self.q_proj.weight,
|
| 75 |
+
k_proj_weight=self.k_proj.weight,
|
| 76 |
+
v_proj_weight=self.v_proj.weight,
|
| 77 |
+
in_proj_weight=None,
|
| 78 |
+
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
| 79 |
+
bias_k=None,
|
| 80 |
+
bias_v=None,
|
| 81 |
+
add_zero_attn=False,
|
| 82 |
+
dropout_p=0.0,
|
| 83 |
+
out_proj_weight=self.c_proj.weight,
|
| 84 |
+
out_proj_bias=self.c_proj.bias,
|
| 85 |
+
use_separate_proj_weight=True,
|
| 86 |
+
training=self.training,
|
| 87 |
+
need_weights=False,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
return x[0]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class ModifiedResNet(nn.Module):
|
| 94 |
+
"""
|
| 95 |
+
A ResNet class that is similar to torchvision's but contains the following changes:
|
| 96 |
+
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
| 97 |
+
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
| 98 |
+
- The final pooling layer is a QKV attention instead of an average pool
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
|
| 102 |
+
super().__init__()
|
| 103 |
+
self.output_dim = output_dim
|
| 104 |
+
self.image_size = image_size
|
| 105 |
+
|
| 106 |
+
# the 3-layer stem
|
| 107 |
+
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
| 108 |
+
self.bn1 = nn.BatchNorm2d(width // 2)
|
| 109 |
+
self.act1 = nn.ReLU(inplace=True)
|
| 110 |
+
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
| 111 |
+
self.bn2 = nn.BatchNorm2d(width // 2)
|
| 112 |
+
self.act2 = nn.ReLU(inplace=True)
|
| 113 |
+
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
| 114 |
+
self.bn3 = nn.BatchNorm2d(width)
|
| 115 |
+
self.act3 = nn.ReLU(inplace=True)
|
| 116 |
+
self.avgpool = nn.AvgPool2d(2)
|
| 117 |
+
|
| 118 |
+
# residual layers
|
| 119 |
+
self._inplanes = width # this is a *mutable* variable used during construction
|
| 120 |
+
self.layer1 = self._make_layer(width, layers[0])
|
| 121 |
+
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
| 122 |
+
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
| 123 |
+
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
| 124 |
+
|
| 125 |
+
embed_dim = width * 32 # the ResNet feature dimension
|
| 126 |
+
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
|
| 127 |
+
|
| 128 |
+
self.init_parameters()
|
| 129 |
+
|
| 130 |
+
def _make_layer(self, planes, blocks, stride=1):
|
| 131 |
+
layers = [Bottleneck(self._inplanes, planes, stride)]
|
| 132 |
+
|
| 133 |
+
self._inplanes = planes * Bottleneck.expansion
|
| 134 |
+
for _ in range(1, blocks):
|
| 135 |
+
layers.append(Bottleneck(self._inplanes, planes))
|
| 136 |
+
|
| 137 |
+
return nn.Sequential(*layers)
|
| 138 |
+
|
| 139 |
+
def init_parameters(self):
|
| 140 |
+
if self.attnpool is not None:
|
| 141 |
+
std = self.attnpool.c_proj.in_features**-0.5
|
| 142 |
+
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
|
| 143 |
+
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
|
| 144 |
+
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
|
| 145 |
+
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
|
| 146 |
+
|
| 147 |
+
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
|
| 148 |
+
for name, param in resnet_block.named_parameters():
|
| 149 |
+
if name.endswith("bn3.weight"):
|
| 150 |
+
nn.init.zeros_(param)
|
| 151 |
+
|
| 152 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 153 |
+
assert unlocked_groups == 0, "partial locking not currently supported for this model"
|
| 154 |
+
for param in self.parameters():
|
| 155 |
+
param.requires_grad = False
|
| 156 |
+
if freeze_bn_stats:
|
| 157 |
+
freeze_batch_norm_2d(self)
|
| 158 |
+
|
| 159 |
+
@torch.jit.ignore
|
| 160 |
+
def set_grad_checkpointing(self, enable=True):
|
| 161 |
+
# FIXME support for non-transformer
|
| 162 |
+
pass
|
| 163 |
+
|
| 164 |
+
def stem(self, x):
|
| 165 |
+
x = self.act1(self.bn1(self.conv1(x)))
|
| 166 |
+
x = self.act2(self.bn2(self.conv2(x)))
|
| 167 |
+
x = self.act3(self.bn3(self.conv3(x)))
|
| 168 |
+
x = self.avgpool(x)
|
| 169 |
+
return x
|
| 170 |
+
|
| 171 |
+
def forward(self, x):
|
| 172 |
+
x = self.stem(x)
|
| 173 |
+
x = self.layer1(x)
|
| 174 |
+
x = self.layer2(x)
|
| 175 |
+
x = self.layer3(x)
|
| 176 |
+
x = self.layer4(x)
|
| 177 |
+
x = self.attnpool(x)
|
| 178 |
+
|
| 179 |
+
return x
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/pretrained.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
import urllib
|
| 4 |
+
import warnings
|
| 5 |
+
from typing import Dict, Union
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
from huggingface_hub import hf_hub_download
|
| 11 |
+
|
| 12 |
+
_has_hf_hub = True
|
| 13 |
+
except ImportError:
|
| 14 |
+
hf_hub_download = None
|
| 15 |
+
_has_hf_hub = False
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _pcfg(url="", hf_hub="", filename="", mean=None, std=None):
|
| 19 |
+
return dict(
|
| 20 |
+
url=url,
|
| 21 |
+
hf_hub=hf_hub,
|
| 22 |
+
mean=mean,
|
| 23 |
+
std=std,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_VITB32 = dict(
|
| 28 |
+
openai=_pcfg("https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
|
| 29 |
+
laion400m_e31=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
|
| 30 |
+
laion400m_e32=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
|
| 31 |
+
laion2b_e16=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
|
| 32 |
+
laion2b_s34b_b79k=_pcfg(hf_hub="laion/CLIP-ViT-B-32-laion2B-s34B-b79K/"),
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
_VITB32_quickgelu = dict(
|
| 36 |
+
openai=_pcfg("https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
|
| 37 |
+
laion400m_e31=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
|
| 38 |
+
laion400m_e32=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
_VITB16 = dict(
|
| 42 |
+
openai=_pcfg("https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
|
| 43 |
+
laion400m_e31=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
|
| 44 |
+
laion400m_e32=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
|
| 45 |
+
laion2b_s34b_b88k=_pcfg(hf_hub="laion/CLIP-ViT-B-16-laion2B-s34B-b88K/"),
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
_EVAB16 = dict(
|
| 49 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt"),
|
| 50 |
+
eva02=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt"),
|
| 51 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt"),
|
| 52 |
+
eva02_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt"),
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
_VITB16_PLUS_240 = dict(
|
| 56 |
+
laion400m_e31=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
|
| 57 |
+
laion400m_e32=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
_VITL14 = dict(
|
| 61 |
+
openai=_pcfg("https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
|
| 62 |
+
laion400m_e31=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
|
| 63 |
+
laion400m_e32=_pcfg("https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
|
| 64 |
+
laion2b_s32b_b82k=_pcfg(hf_hub="laion/CLIP-ViT-L-14-laion2B-s32B-b82K/", mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
_EVAL14 = dict(
|
| 68 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_L_psz14.pt"),
|
| 69 |
+
eva02=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_L_psz14.pt"),
|
| 70 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt"),
|
| 71 |
+
eva02_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt"),
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
_VITL14_336 = dict(
|
| 75 |
+
openai=_pcfg("https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
_EVAL14_336 = dict(
|
| 79 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt"),
|
| 80 |
+
eva02_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt"),
|
| 81 |
+
eva_clip_224to336=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt"),
|
| 82 |
+
eva02_clip_224to336=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt"),
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
_VITH14 = dict(
|
| 86 |
+
laion2b_s32b_b79k=_pcfg(hf_hub="laion/CLIP-ViT-H-14-laion2B-s32B-b79K/"),
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
_VITg14 = dict(
|
| 90 |
+
laion2b_s12b_b42k=_pcfg(hf_hub="laion/CLIP-ViT-g-14-laion2B-s12B-b42K/"),
|
| 91 |
+
laion2b_s34b_b88k=_pcfg(hf_hub="laion/CLIP-ViT-g-14-laion2B-s34B-b88K/"),
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
_EVAg14 = dict(
|
| 95 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/"),
|
| 96 |
+
eva01=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_g_psz14.pt"),
|
| 97 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt"),
|
| 98 |
+
eva01_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt"),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
_EVAg14_PLUS = dict(
|
| 102 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/"),
|
| 103 |
+
eva01=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_g_psz14.pt"),
|
| 104 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt"),
|
| 105 |
+
eva01_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt"),
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
_VITbigG14 = dict(
|
| 109 |
+
laion2b_s39b_b160k=_pcfg(hf_hub="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/"),
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
_EVAbigE14 = dict(
|
| 113 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_E_psz14.pt"),
|
| 114 |
+
eva02=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_E_psz14.pt"),
|
| 115 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt"),
|
| 116 |
+
eva02_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt"),
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
_EVAbigE14_PLUS = dict(
|
| 120 |
+
eva=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_E_psz14.pt"),
|
| 121 |
+
eva02=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_E_psz14.pt"),
|
| 122 |
+
eva_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt"),
|
| 123 |
+
eva02_clip=_pcfg(hf_hub="QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt"),
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
_EVA_8B = dict(
|
| 127 |
+
eva=_pcfg(hf_hub="BAAI/EVA-CLIP-8B/EVA_8B_psz14.bin"),
|
| 128 |
+
eva_clip=_pcfg(hf_hub="BAAI/EVA-CLIP-8B/EVA_CLIP_8B_psz14_s9B.pt"),
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
_EVA_8B_PLUS = dict(
|
| 132 |
+
eva_clip=_pcfg(hf_hub="BAAI/EVA-CLIP-8B-448/EVA_CLIP_8B_psz14_plus_s0.6B.pt"),
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
_PRETRAINED = {
|
| 137 |
+
# "ViT-B-32": _VITB32,
|
| 138 |
+
"OpenaiCLIP-B-32": _VITB32,
|
| 139 |
+
"OpenCLIP-B-32": _VITB32,
|
| 140 |
+
# "ViT-B-32-quickgelu": _VITB32_quickgelu,
|
| 141 |
+
"OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu,
|
| 142 |
+
"OpenCLIP-B-32-quickgelu": _VITB32_quickgelu,
|
| 143 |
+
# "ViT-B-16": _VITB16,
|
| 144 |
+
"OpenaiCLIP-B-16": _VITB16,
|
| 145 |
+
"OpenCLIP-B-16": _VITB16,
|
| 146 |
+
"EVA02-B-16": _EVAB16,
|
| 147 |
+
"EVA02-CLIP-B-16": _EVAB16,
|
| 148 |
+
# "ViT-B-16-plus-240": _VITB16_PLUS_240,
|
| 149 |
+
"OpenCLIP-B-16-plus-240": _VITB16_PLUS_240,
|
| 150 |
+
# "ViT-L-14": _VITL14,
|
| 151 |
+
"OpenaiCLIP-L-14": _VITL14,
|
| 152 |
+
"OpenCLIP-L-14": _VITL14,
|
| 153 |
+
"EVA02-L-14": _EVAL14,
|
| 154 |
+
"EVA02-CLIP-L-14": _EVAL14,
|
| 155 |
+
# "ViT-L-14-336": _VITL14_336,
|
| 156 |
+
"OpenaiCLIP-L-14-336": _VITL14_336,
|
| 157 |
+
"EVA02-CLIP-L-14-336": _EVAL14_336,
|
| 158 |
+
# "ViT-H-14": _VITH14,
|
| 159 |
+
# "ViT-g-14": _VITg14,
|
| 160 |
+
"OpenCLIP-H-14": _VITH14,
|
| 161 |
+
"OpenCLIP-g-14": _VITg14,
|
| 162 |
+
"EVA01-CLIP-g-14": _EVAg14,
|
| 163 |
+
"EVA01-CLIP-g-14-plus": _EVAg14_PLUS,
|
| 164 |
+
# "ViT-bigG-14": _VITbigG14,
|
| 165 |
+
"OpenCLIP-bigG-14": _VITbigG14,
|
| 166 |
+
"EVA02-CLIP-bigE-14": _EVAbigE14,
|
| 167 |
+
"EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS,
|
| 168 |
+
"EVA-CLIP-8B": _EVA_8B,
|
| 169 |
+
"EVA-CLIP-8B-448": _EVA_8B_PLUS,
|
| 170 |
+
"EVA-CLIP-8B-plus": _EVA_8B_PLUS,
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def _clean_tag(tag: str):
|
| 175 |
+
# normalize pretrained tags
|
| 176 |
+
return tag.lower().replace("-", "_")
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def list_pretrained(as_str: bool = False):
|
| 180 |
+
"""returns list of pretrained models
|
| 181 |
+
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
|
| 182 |
+
"""
|
| 183 |
+
return [":".join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def list_pretrained_models_by_tag(tag: str):
|
| 187 |
+
"""return all models having the specified pretrain tag"""
|
| 188 |
+
models = []
|
| 189 |
+
tag = _clean_tag(tag)
|
| 190 |
+
for k in _PRETRAINED.keys():
|
| 191 |
+
if tag in _PRETRAINED[k]:
|
| 192 |
+
models.append(k)
|
| 193 |
+
return models
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def list_pretrained_tags_by_model(model: str):
|
| 197 |
+
"""return all pretrain tags for the specified model architecture"""
|
| 198 |
+
tags = []
|
| 199 |
+
if model in _PRETRAINED:
|
| 200 |
+
tags.extend(_PRETRAINED[model].keys())
|
| 201 |
+
return tags
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def is_pretrained_cfg(model: str, tag: str):
|
| 205 |
+
if model not in _PRETRAINED:
|
| 206 |
+
return False
|
| 207 |
+
return _clean_tag(tag) in _PRETRAINED[model]
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def get_pretrained_cfg(model: str, tag: str):
|
| 211 |
+
if model not in _PRETRAINED:
|
| 212 |
+
return {}
|
| 213 |
+
model_pretrained = _PRETRAINED[model]
|
| 214 |
+
return model_pretrained.get(_clean_tag(tag), {})
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def get_pretrained_url(model: str, tag: str):
|
| 218 |
+
cfg = get_pretrained_cfg(model, _clean_tag(tag))
|
| 219 |
+
return cfg.get("url", "")
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def download_pretrained_from_url(
|
| 223 |
+
url: str,
|
| 224 |
+
cache_dir: Union[str, None] = None,
|
| 225 |
+
):
|
| 226 |
+
if not cache_dir:
|
| 227 |
+
cache_dir = os.path.expanduser("~/.cache/clip")
|
| 228 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 229 |
+
filename = os.path.basename(url)
|
| 230 |
+
|
| 231 |
+
if "openaipublic" in url:
|
| 232 |
+
expected_sha256 = url.split("/")[-2]
|
| 233 |
+
elif "mlfoundations" in url:
|
| 234 |
+
expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
|
| 235 |
+
else:
|
| 236 |
+
expected_sha256 = ""
|
| 237 |
+
|
| 238 |
+
download_target = os.path.join(cache_dir, filename)
|
| 239 |
+
|
| 240 |
+
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
| 241 |
+
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
| 242 |
+
|
| 243 |
+
if os.path.isfile(download_target):
|
| 244 |
+
if expected_sha256:
|
| 245 |
+
if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
|
| 246 |
+
return download_target
|
| 247 |
+
else:
|
| 248 |
+
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
| 249 |
+
else:
|
| 250 |
+
return download_target
|
| 251 |
+
|
| 252 |
+
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
| 253 |
+
with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit="iB", unit_scale=True) as loop:
|
| 254 |
+
while True:
|
| 255 |
+
buffer = source.read(8192)
|
| 256 |
+
if not buffer:
|
| 257 |
+
break
|
| 258 |
+
|
| 259 |
+
output.write(buffer)
|
| 260 |
+
loop.update(len(buffer))
|
| 261 |
+
|
| 262 |
+
if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
|
| 263 |
+
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
|
| 264 |
+
|
| 265 |
+
return download_target
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def has_hf_hub(necessary=False):
|
| 269 |
+
if not _has_hf_hub and necessary:
|
| 270 |
+
# if no HF Hub module installed, and it is necessary to continue, raise error
|
| 271 |
+
raise RuntimeError("Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.")
|
| 272 |
+
return _has_hf_hub
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def download_pretrained_from_hf(
|
| 276 |
+
model_id: str,
|
| 277 |
+
filename: str = "open_clip_pytorch_model.bin",
|
| 278 |
+
revision=None,
|
| 279 |
+
cache_dir: Union[str, None] = None,
|
| 280 |
+
):
|
| 281 |
+
has_hf_hub(True)
|
| 282 |
+
cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
|
| 283 |
+
return cached_file
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def download_pretrained(
|
| 287 |
+
cfg: Dict,
|
| 288 |
+
force_hf_hub: bool = False,
|
| 289 |
+
cache_dir: Union[str, None] = None,
|
| 290 |
+
):
|
| 291 |
+
target = ""
|
| 292 |
+
if not cfg:
|
| 293 |
+
return target
|
| 294 |
+
|
| 295 |
+
download_url = cfg.get("url", "")
|
| 296 |
+
download_hf_hub = cfg.get("hf_hub", "")
|
| 297 |
+
if download_hf_hub and force_hf_hub:
|
| 298 |
+
# use HF hub even if url exists
|
| 299 |
+
download_url = ""
|
| 300 |
+
|
| 301 |
+
if download_url:
|
| 302 |
+
target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
|
| 303 |
+
elif download_hf_hub:
|
| 304 |
+
has_hf_hub(True)
|
| 305 |
+
# we assume the hf_hub entries in pretrained config combine model_id + filename in
|
| 306 |
+
# 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
|
| 307 |
+
# use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
|
| 308 |
+
model_id, filename = os.path.split(download_hf_hub)
|
| 309 |
+
if filename:
|
| 310 |
+
target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
|
| 311 |
+
else:
|
| 312 |
+
target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
|
| 313 |
+
|
| 314 |
+
return target
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/rope.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import pi
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def broadcat(tensors, dim=-1):
|
| 9 |
+
num_tensors = len(tensors)
|
| 10 |
+
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
|
| 11 |
+
assert len(shape_lens) == 1, "tensors must all have the same number of dimensions"
|
| 12 |
+
shape_len = list(shape_lens)[0]
|
| 13 |
+
dim = (dim + shape_len) if dim < 0 else dim
|
| 14 |
+
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
|
| 15 |
+
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
|
| 16 |
+
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), "invalid dimensions for broadcastable concatentation"
|
| 17 |
+
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
|
| 18 |
+
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
|
| 19 |
+
expanded_dims.insert(dim, (dim, dims[dim]))
|
| 20 |
+
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
|
| 21 |
+
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
|
| 22 |
+
return torch.cat(tensors, dim=dim)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def rotate_half(x):
|
| 26 |
+
x = rearrange(x, "... (d r) -> ... d r", r=2)
|
| 27 |
+
x1, x2 = x.unbind(dim=-1)
|
| 28 |
+
x = torch.stack((-x2, x1), dim=-1)
|
| 29 |
+
return rearrange(x, "... d r -> ... (d r)")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class VisionRotaryEmbedding(nn.Module):
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
dim,
|
| 36 |
+
pt_seq_len,
|
| 37 |
+
ft_seq_len=None,
|
| 38 |
+
custom_freqs=None,
|
| 39 |
+
freqs_for="lang",
|
| 40 |
+
theta=10000,
|
| 41 |
+
max_freq=10,
|
| 42 |
+
num_freqs=1,
|
| 43 |
+
):
|
| 44 |
+
super().__init__()
|
| 45 |
+
if custom_freqs:
|
| 46 |
+
freqs = custom_freqs
|
| 47 |
+
elif freqs_for == "lang":
|
| 48 |
+
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
|
| 49 |
+
elif freqs_for == "pixel":
|
| 50 |
+
freqs = torch.linspace(1.0, max_freq / 2, dim // 2) * pi
|
| 51 |
+
elif freqs_for == "constant":
|
| 52 |
+
freqs = torch.ones(num_freqs).float()
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError(f"unknown modality {freqs_for}")
|
| 55 |
+
|
| 56 |
+
if ft_seq_len is None:
|
| 57 |
+
ft_seq_len = pt_seq_len
|
| 58 |
+
t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
|
| 59 |
+
|
| 60 |
+
freqs_h = torch.einsum("..., f -> ... f", t, freqs)
|
| 61 |
+
freqs_h = repeat(freqs_h, "... n -> ... (n r)", r=2)
|
| 62 |
+
|
| 63 |
+
freqs_w = torch.einsum("..., f -> ... f", t, freqs)
|
| 64 |
+
freqs_w = repeat(freqs_w, "... n -> ... (n r)", r=2)
|
| 65 |
+
|
| 66 |
+
freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim=-1)
|
| 67 |
+
|
| 68 |
+
self.register_buffer("freqs_cos", freqs.cos())
|
| 69 |
+
self.register_buffer("freqs_sin", freqs.sin())
|
| 70 |
+
|
| 71 |
+
logging.info(f"Shape of rope freq: {self.freqs_cos.shape}")
|
| 72 |
+
|
| 73 |
+
def forward(self, t, start_index=0):
|
| 74 |
+
rot_dim = self.freqs_cos.shape[-1]
|
| 75 |
+
end_index = start_index + rot_dim
|
| 76 |
+
assert rot_dim <= t.shape[-1], f"feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}"
|
| 77 |
+
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
|
| 78 |
+
t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)
|
| 79 |
+
|
| 80 |
+
return torch.cat((t_left, t, t_right), dim=-1)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class VisionRotaryEmbeddingFast(nn.Module):
|
| 84 |
+
def __init__(self, dim, pt_seq_len, ft_seq_len=None, custom_freqs=None, freqs_for="lang", theta=10000, max_freq=10, num_freqs=1, patch_dropout=0.0):
|
| 85 |
+
super().__init__()
|
| 86 |
+
if custom_freqs:
|
| 87 |
+
freqs = custom_freqs
|
| 88 |
+
elif freqs_for == "lang":
|
| 89 |
+
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
|
| 90 |
+
elif freqs_for == "pixel":
|
| 91 |
+
freqs = torch.linspace(1.0, max_freq / 2, dim // 2) * pi
|
| 92 |
+
elif freqs_for == "constant":
|
| 93 |
+
freqs = torch.ones(num_freqs).float()
|
| 94 |
+
else:
|
| 95 |
+
raise ValueError(f"unknown modality {freqs_for}")
|
| 96 |
+
|
| 97 |
+
if ft_seq_len is None:
|
| 98 |
+
ft_seq_len = pt_seq_len
|
| 99 |
+
t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
|
| 100 |
+
|
| 101 |
+
freqs = torch.einsum("..., f -> ... f", t, freqs)
|
| 102 |
+
freqs = repeat(freqs, "... n -> ... (n r)", r=2)
|
| 103 |
+
freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim=-1)
|
| 104 |
+
|
| 105 |
+
freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
|
| 106 |
+
freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
|
| 107 |
+
|
| 108 |
+
self.patch_dropout = patch_dropout
|
| 109 |
+
|
| 110 |
+
self.register_buffer("freqs_cos", freqs_cos)
|
| 111 |
+
self.register_buffer("freqs_sin", freqs_sin)
|
| 112 |
+
|
| 113 |
+
logging.info(f"Shape of rope freq: {self.freqs_cos.shape}")
|
| 114 |
+
|
| 115 |
+
def forward(self, t, patch_indices_keep=None):
|
| 116 |
+
if patch_indices_keep is not None:
|
| 117 |
+
batch = t.size()[0]
|
| 118 |
+
batch_indices = torch.arange(batch)
|
| 119 |
+
batch_indices = batch_indices[..., None]
|
| 120 |
+
|
| 121 |
+
freqs_cos = repeat(self.freqs_cos, "i j -> n i m j", n=t.shape[0], m=t.shape[1])
|
| 122 |
+
freqs_sin = repeat(self.freqs_sin, "i j -> n i m j", n=t.shape[0], m=t.shape[1])
|
| 123 |
+
|
| 124 |
+
freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
|
| 125 |
+
freqs_cos = rearrange(freqs_cos, "n i m j -> n m i j")
|
| 126 |
+
freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
|
| 127 |
+
freqs_sin = rearrange(freqs_sin, "n i m j -> n m i j")
|
| 128 |
+
|
| 129 |
+
return t * freqs_cos + rotate_half(t) * freqs_sin
|
| 130 |
+
|
| 131 |
+
return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transform.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Sequence, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torchvision.transforms.functional as F
|
| 6 |
+
|
| 7 |
+
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, CenterCrop
|
| 8 |
+
|
| 9 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ResizeMaxSize(nn.Module):
|
| 13 |
+
|
| 14 |
+
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn="max", fill=0):
|
| 15 |
+
super().__init__()
|
| 16 |
+
if not isinstance(max_size, int):
|
| 17 |
+
raise TypeError(f"Size should be int. Got {type(max_size)}")
|
| 18 |
+
self.max_size = max_size
|
| 19 |
+
self.interpolation = interpolation
|
| 20 |
+
self.fn = min if fn == "min" else min
|
| 21 |
+
self.fill = fill
|
| 22 |
+
|
| 23 |
+
def forward(self, img):
|
| 24 |
+
if isinstance(img, torch.Tensor):
|
| 25 |
+
height, width = img.shape[:2]
|
| 26 |
+
else:
|
| 27 |
+
width, height = img.size
|
| 28 |
+
scale = self.max_size / float(max(height, width))
|
| 29 |
+
if scale != 1.0:
|
| 30 |
+
new_size = tuple(round(dim * scale) for dim in (height, width))
|
| 31 |
+
img = F.resize(img, new_size, self.interpolation)
|
| 32 |
+
pad_h = self.max_size - new_size[0]
|
| 33 |
+
pad_w = self.max_size - new_size[1]
|
| 34 |
+
img = F.pad(img, padding=[pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2], fill=self.fill)
|
| 35 |
+
return img
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _convert_to_rgb(image):
|
| 39 |
+
return image.convert("RGB")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# class CatGen(nn.Module):
|
| 43 |
+
# def __init__(self, num=4):
|
| 44 |
+
# self.num = num
|
| 45 |
+
# def mixgen_batch(image, text):
|
| 46 |
+
# batch_size = image.shape[0]
|
| 47 |
+
# index = np.random.permutation(batch_size)
|
| 48 |
+
|
| 49 |
+
# cat_images = []
|
| 50 |
+
# for i in range(batch_size):
|
| 51 |
+
# # image mixup
|
| 52 |
+
# image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:]
|
| 53 |
+
# # text concat
|
| 54 |
+
# text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0]
|
| 55 |
+
# text = torch.stack(text)
|
| 56 |
+
# return image, text
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def image_transform(
|
| 60 |
+
image_size: int,
|
| 61 |
+
is_train: bool,
|
| 62 |
+
mean: Optional[Tuple[float, ...]] = None,
|
| 63 |
+
std: Optional[Tuple[float, ...]] = None,
|
| 64 |
+
resize_longest_max: bool = False,
|
| 65 |
+
fill_color: int = 0,
|
| 66 |
+
):
|
| 67 |
+
mean = mean or OPENAI_DATASET_MEAN
|
| 68 |
+
if not isinstance(mean, (list, tuple)):
|
| 69 |
+
mean = (mean,) * 3
|
| 70 |
+
|
| 71 |
+
std = std or OPENAI_DATASET_STD
|
| 72 |
+
if not isinstance(std, (list, tuple)):
|
| 73 |
+
std = (std,) * 3
|
| 74 |
+
|
| 75 |
+
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
|
| 76 |
+
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
|
| 77 |
+
image_size = image_size[0]
|
| 78 |
+
|
| 79 |
+
normalize = Normalize(mean=mean, std=std)
|
| 80 |
+
if is_train:
|
| 81 |
+
return Compose(
|
| 82 |
+
[
|
| 83 |
+
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
|
| 84 |
+
_convert_to_rgb,
|
| 85 |
+
ToTensor(),
|
| 86 |
+
normalize,
|
| 87 |
+
]
|
| 88 |
+
)
|
| 89 |
+
else:
|
| 90 |
+
if resize_longest_max:
|
| 91 |
+
transforms = [ResizeMaxSize(image_size, fill=fill_color)]
|
| 92 |
+
else:
|
| 93 |
+
transforms = [
|
| 94 |
+
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
|
| 95 |
+
CenterCrop(image_size),
|
| 96 |
+
]
|
| 97 |
+
transforms.extend(
|
| 98 |
+
[
|
| 99 |
+
_convert_to_rgb,
|
| 100 |
+
ToTensor(),
|
| 101 |
+
normalize,
|
| 102 |
+
]
|
| 103 |
+
)
|
| 104 |
+
return Compose(transforms)
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transformer.py
ADDED
|
@@ -0,0 +1,683 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
import math
|
| 5 |
+
from typing import Callable, Optional, Sequence
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
from torch.nn import functional as F
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from timm.models.layers import trunc_normal_
|
| 13 |
+
except:
|
| 14 |
+
from timm.layers import trunc_normal_
|
| 15 |
+
|
| 16 |
+
from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
|
| 17 |
+
from .utils import to_2tuple
|
| 18 |
+
|
| 19 |
+
if os.getenv("ENV_TYPE") == "deepspeed":
|
| 20 |
+
try:
|
| 21 |
+
import deepspeed
|
| 22 |
+
from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
|
| 23 |
+
except:
|
| 24 |
+
print("Please 'pip install deepspeed'")
|
| 25 |
+
deepspeed = None
|
| 26 |
+
from torch.utils.checkpoint import checkpoint
|
| 27 |
+
else:
|
| 28 |
+
from torch.utils.checkpoint import checkpoint
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
import xformers.ops as xops
|
| 32 |
+
except ImportError:
|
| 33 |
+
xops = None
|
| 34 |
+
# print("Please 'pip install xformers'")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LayerNormFp32(nn.LayerNorm):
|
| 38 |
+
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
|
| 39 |
+
|
| 40 |
+
def __init__(self, *args, **kwargs):
|
| 41 |
+
super().__init__(*args, **kwargs)
|
| 42 |
+
|
| 43 |
+
def forward(self, x: torch.Tensor):
|
| 44 |
+
output = F.layer_norm(
|
| 45 |
+
x.float(),
|
| 46 |
+
self.normalized_shape,
|
| 47 |
+
self.weight.float() if self.weight is not None else None,
|
| 48 |
+
self.bias.float() if self.bias is not None else None,
|
| 49 |
+
self.eps,
|
| 50 |
+
)
|
| 51 |
+
return output.type_as(x)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class LayerNorm(nn.LayerNorm):
|
| 55 |
+
"""Subclass torch's LayerNorm (with cast back to input dtype)."""
|
| 56 |
+
|
| 57 |
+
def forward(self, x: torch.Tensor):
|
| 58 |
+
orig_type = x.dtype
|
| 59 |
+
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
| 60 |
+
return x.to(orig_type)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class QuickGELU(nn.Module):
|
| 64 |
+
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
|
| 65 |
+
def forward(self, x: torch.Tensor):
|
| 66 |
+
return x * torch.sigmoid(1.702 * x)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class LayerScale(nn.Module):
|
| 70 |
+
def __init__(self, dim, init_values=1e-5, inplace=False):
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.inplace = inplace
|
| 73 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 74 |
+
|
| 75 |
+
def forward(self, x):
|
| 76 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class PatchDropout(nn.Module):
|
| 80 |
+
"""
|
| 81 |
+
https://arxiv.org/abs/2212.00794
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(self, prob, exclude_first_token=True):
|
| 85 |
+
super().__init__()
|
| 86 |
+
assert 0 <= prob < 1.0
|
| 87 |
+
self.prob = prob
|
| 88 |
+
self.exclude_first_token = exclude_first_token # exclude CLS token
|
| 89 |
+
logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
|
| 90 |
+
|
| 91 |
+
def forward(self, x):
|
| 92 |
+
if not self.training or self.prob == 0.0:
|
| 93 |
+
return x
|
| 94 |
+
|
| 95 |
+
if self.exclude_first_token:
|
| 96 |
+
cls_tokens, x = x[:, :1], x[:, 1:]
|
| 97 |
+
else:
|
| 98 |
+
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
|
| 99 |
+
|
| 100 |
+
batch = x.size()[0]
|
| 101 |
+
num_tokens = x.size()[1]
|
| 102 |
+
|
| 103 |
+
batch_indices = torch.arange(batch)
|
| 104 |
+
batch_indices = batch_indices[..., None]
|
| 105 |
+
|
| 106 |
+
keep_prob = 1 - self.prob
|
| 107 |
+
num_patches_keep = max(1, int(num_tokens * keep_prob))
|
| 108 |
+
|
| 109 |
+
rand = torch.randn(batch, num_tokens)
|
| 110 |
+
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
|
| 111 |
+
|
| 112 |
+
x = x[batch_indices, patch_indices_keep]
|
| 113 |
+
|
| 114 |
+
if self.exclude_first_token:
|
| 115 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 116 |
+
|
| 117 |
+
if self.training and os.getenv("RoPE") == "1":
|
| 118 |
+
return x, patch_indices_keep
|
| 119 |
+
|
| 120 |
+
return x
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def _in_projection_packed(
|
| 124 |
+
q: torch.Tensor,
|
| 125 |
+
k: torch.Tensor,
|
| 126 |
+
v: torch.Tensor,
|
| 127 |
+
w: torch.Tensor,
|
| 128 |
+
b: Optional[torch.Tensor] = None,
|
| 129 |
+
):
|
| 130 |
+
"""
|
| 131 |
+
https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726
|
| 132 |
+
"""
|
| 133 |
+
E = q.size(-1)
|
| 134 |
+
if k is v:
|
| 135 |
+
if q is k:
|
| 136 |
+
# self-attention
|
| 137 |
+
return F.linear(q, w, b).chunk(3, dim=-1)
|
| 138 |
+
else:
|
| 139 |
+
# encoder-decoder attention
|
| 140 |
+
w_q, w_kv = w.split([E, E * 2])
|
| 141 |
+
if b is None:
|
| 142 |
+
b_q = b_kv = None
|
| 143 |
+
else:
|
| 144 |
+
b_q, b_kv = b.split([E, E * 2])
|
| 145 |
+
return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
|
| 146 |
+
else:
|
| 147 |
+
w_q, w_k, w_v = w.chunk(3)
|
| 148 |
+
if b is None:
|
| 149 |
+
b_q = b_k = b_v = None
|
| 150 |
+
else:
|
| 151 |
+
b_q, b_k, b_v = b.chunk(3)
|
| 152 |
+
return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class Attention(nn.Module):
|
| 156 |
+
def __init__(self, dim, num_heads=8, qkv_bias=True, scaled_cosine=False, scale_heads=False, logit_scale_max=math.log(1.0 / 0.01), attn_drop=0.0, proj_drop=0.0, xattn=False, rope=False):
|
| 157 |
+
super().__init__()
|
| 158 |
+
self.scaled_cosine = scaled_cosine
|
| 159 |
+
self.scale_heads = scale_heads
|
| 160 |
+
assert dim % num_heads == 0, "dim should be divisible by num_heads"
|
| 161 |
+
self.num_heads = num_heads
|
| 162 |
+
self.head_dim = dim // num_heads
|
| 163 |
+
self.scale = self.head_dim**-0.5
|
| 164 |
+
self.logit_scale_max = logit_scale_max
|
| 165 |
+
|
| 166 |
+
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
|
| 167 |
+
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
|
| 168 |
+
if qkv_bias:
|
| 169 |
+
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
|
| 170 |
+
else:
|
| 171 |
+
self.in_proj_bias = None
|
| 172 |
+
|
| 173 |
+
if self.scaled_cosine:
|
| 174 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
|
| 175 |
+
else:
|
| 176 |
+
self.logit_scale = None
|
| 177 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 178 |
+
if self.scale_heads:
|
| 179 |
+
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
|
| 180 |
+
else:
|
| 181 |
+
self.head_scale = None
|
| 182 |
+
self.out_proj = nn.Linear(dim, dim)
|
| 183 |
+
self.out_drop = nn.Dropout(proj_drop)
|
| 184 |
+
self.xattn = xattn
|
| 185 |
+
self.xattn_drop = attn_drop
|
| 186 |
+
self.rope = rope
|
| 187 |
+
|
| 188 |
+
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
|
| 189 |
+
L, N, C = x.shape
|
| 190 |
+
q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
|
| 191 |
+
if self.xattn:
|
| 192 |
+
q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 193 |
+
k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 194 |
+
v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 195 |
+
|
| 196 |
+
x = xops.memory_efficient_attention(
|
| 197 |
+
q,
|
| 198 |
+
k,
|
| 199 |
+
v,
|
| 200 |
+
p=self.xattn_drop,
|
| 201 |
+
scale=self.scale if self.logit_scale is None else None,
|
| 202 |
+
attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
|
| 203 |
+
)
|
| 204 |
+
else:
|
| 205 |
+
q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 206 |
+
k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 207 |
+
v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 208 |
+
|
| 209 |
+
if self.logit_scale is not None:
|
| 210 |
+
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
|
| 211 |
+
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
|
| 212 |
+
attn = attn.view(N, self.num_heads, L, L) * logit_scale
|
| 213 |
+
attn = attn.view(-1, L, L)
|
| 214 |
+
else:
|
| 215 |
+
q = q * self.scale
|
| 216 |
+
attn = torch.bmm(q, k.transpose(-1, -2))
|
| 217 |
+
|
| 218 |
+
if attn_mask is not None:
|
| 219 |
+
if attn_mask.dtype == torch.bool:
|
| 220 |
+
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
|
| 221 |
+
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
|
| 222 |
+
attn_mask = new_attn_mask
|
| 223 |
+
attn += attn_mask
|
| 224 |
+
|
| 225 |
+
attn = attn.softmax(dim=-1)
|
| 226 |
+
attn = self.attn_drop(attn)
|
| 227 |
+
|
| 228 |
+
x = torch.bmm(attn, v)
|
| 229 |
+
|
| 230 |
+
if self.head_scale is not None:
|
| 231 |
+
x = x.view(N, self.num_heads, L, C) * self.head_scale
|
| 232 |
+
x = x.view(-1, L, C)
|
| 233 |
+
x = x.transpose(0, 1).reshape(L, N, C)
|
| 234 |
+
x = self.out_proj(x)
|
| 235 |
+
x = self.out_drop(x)
|
| 236 |
+
return x
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
class CustomAttention(nn.Module):
|
| 240 |
+
def __init__(self, dim, num_heads=8, qkv_bias=True, scaled_cosine=True, scale_heads=False, logit_scale_max=math.log(1.0 / 0.01), attn_drop=0.0, proj_drop=0.0, xattn=False):
|
| 241 |
+
super().__init__()
|
| 242 |
+
self.scaled_cosine = scaled_cosine
|
| 243 |
+
self.scale_heads = scale_heads
|
| 244 |
+
assert dim % num_heads == 0, "dim should be divisible by num_heads"
|
| 245 |
+
self.num_heads = num_heads
|
| 246 |
+
self.head_dim = dim // num_heads
|
| 247 |
+
self.scale = self.head_dim**-0.5
|
| 248 |
+
self.logit_scale_max = logit_scale_max
|
| 249 |
+
|
| 250 |
+
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
|
| 251 |
+
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
|
| 252 |
+
if qkv_bias:
|
| 253 |
+
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
|
| 254 |
+
else:
|
| 255 |
+
self.in_proj_bias = None
|
| 256 |
+
|
| 257 |
+
if self.scaled_cosine:
|
| 258 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
|
| 259 |
+
else:
|
| 260 |
+
self.logit_scale = None
|
| 261 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 262 |
+
if self.scale_heads:
|
| 263 |
+
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
|
| 264 |
+
else:
|
| 265 |
+
self.head_scale = None
|
| 266 |
+
self.out_proj = nn.Linear(dim, dim)
|
| 267 |
+
self.out_drop = nn.Dropout(proj_drop)
|
| 268 |
+
self.xattn = xattn
|
| 269 |
+
self.xattn_drop = attn_drop
|
| 270 |
+
|
| 271 |
+
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 272 |
+
q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
|
| 273 |
+
N_q, B_q, C_q = q.shape
|
| 274 |
+
N_k, B_k, C_k = k.shape
|
| 275 |
+
N_v, B_v, C_v = v.shape
|
| 276 |
+
if self.xattn:
|
| 277 |
+
# B, N, C -> B, N, num_heads, C
|
| 278 |
+
q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1)
|
| 279 |
+
k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1)
|
| 280 |
+
v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1)
|
| 281 |
+
|
| 282 |
+
x = xops.memory_efficient_attention(q, k, v, p=self.xattn_drop, scale=self.scale if self.logit_scale is None else None, attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None)
|
| 283 |
+
else:
|
| 284 |
+
# B*H, L, C
|
| 285 |
+
q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1)
|
| 286 |
+
k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1)
|
| 287 |
+
v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1)
|
| 288 |
+
|
| 289 |
+
if self.logit_scale is not None:
|
| 290 |
+
# B*H, N_q, N_k
|
| 291 |
+
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
|
| 292 |
+
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
|
| 293 |
+
attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale
|
| 294 |
+
attn = attn.view(-1, N_q, N_k)
|
| 295 |
+
else:
|
| 296 |
+
q = q * self.scale
|
| 297 |
+
attn = torch.bmm(q, k.transpose(-1, -2))
|
| 298 |
+
|
| 299 |
+
if attn_mask is not None:
|
| 300 |
+
if attn_mask.dtype == torch.bool:
|
| 301 |
+
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
|
| 302 |
+
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
|
| 303 |
+
attn_mask = new_attn_mask
|
| 304 |
+
attn += attn_mask
|
| 305 |
+
|
| 306 |
+
attn = attn.softmax(dim=-1)
|
| 307 |
+
attn = self.attn_drop(attn)
|
| 308 |
+
|
| 309 |
+
x = torch.bmm(attn, v)
|
| 310 |
+
|
| 311 |
+
if self.head_scale is not None:
|
| 312 |
+
x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale
|
| 313 |
+
x = x.view(-1, N_q, C_q)
|
| 314 |
+
x = x.transpose(0, 1).reshape(N_q, B_q, C_q)
|
| 315 |
+
x = self.out_proj(x)
|
| 316 |
+
x = self.out_drop(x)
|
| 317 |
+
return x
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class CustomResidualAttentionBlock(nn.Module):
|
| 321 |
+
def __init__(
|
| 322 |
+
self,
|
| 323 |
+
d_model: int,
|
| 324 |
+
n_head: int,
|
| 325 |
+
mlp_ratio: float = 4.0,
|
| 326 |
+
ls_init_value: float = None,
|
| 327 |
+
act_layer: Callable = nn.GELU,
|
| 328 |
+
norm_layer: Callable = LayerNorm,
|
| 329 |
+
scale_cosine_attn: bool = False,
|
| 330 |
+
scale_heads: bool = False,
|
| 331 |
+
scale_attn: bool = False,
|
| 332 |
+
scale_fc: bool = False,
|
| 333 |
+
cross_attn: bool = False,
|
| 334 |
+
xattn: bool = False,
|
| 335 |
+
):
|
| 336 |
+
super().__init__()
|
| 337 |
+
|
| 338 |
+
self.ln_1 = norm_layer(d_model)
|
| 339 |
+
self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1
|
| 340 |
+
self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1
|
| 341 |
+
self.attn = CustomAttention(d_model, n_head, qkv_bias=True, attn_drop=0.0, proj_drop=0.0, scaled_cosine=scale_cosine_attn, scale_heads=scale_heads, xattn=xattn)
|
| 342 |
+
|
| 343 |
+
self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
|
| 344 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 345 |
+
|
| 346 |
+
self.ln_2 = norm_layer(d_model)
|
| 347 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 348 |
+
self.mlp = nn.Sequential(OrderedDict([("c_fc", nn.Linear(d_model, mlp_width)), ("ln", norm_layer(mlp_width) if scale_fc else nn.Identity()), ("gelu", act_layer()), ("c_proj", nn.Linear(mlp_width, d_model))]))
|
| 349 |
+
|
| 350 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 351 |
+
|
| 352 |
+
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 353 |
+
q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask)))
|
| 354 |
+
q = q + self.ls_2(self.mlp(self.ln_2(q)))
|
| 355 |
+
return q
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
class CustomTransformer(nn.Module):
|
| 359 |
+
def __init__(
|
| 360 |
+
self,
|
| 361 |
+
width: int,
|
| 362 |
+
layers: int,
|
| 363 |
+
heads: int,
|
| 364 |
+
mlp_ratio: float = 4.0,
|
| 365 |
+
ls_init_value: float = None,
|
| 366 |
+
act_layer: Callable = nn.GELU,
|
| 367 |
+
norm_layer: Callable = LayerNorm,
|
| 368 |
+
scale_cosine_attn: bool = True,
|
| 369 |
+
scale_heads: bool = False,
|
| 370 |
+
scale_attn: bool = False,
|
| 371 |
+
scale_fc: bool = False,
|
| 372 |
+
cross_attn: bool = False,
|
| 373 |
+
xattn: bool = False,
|
| 374 |
+
):
|
| 375 |
+
super().__init__()
|
| 376 |
+
self.width = width
|
| 377 |
+
self.layers = layers
|
| 378 |
+
self.grad_checkpointing = False
|
| 379 |
+
self.xattn = xattn
|
| 380 |
+
|
| 381 |
+
self.resblocks = nn.ModuleList(
|
| 382 |
+
[
|
| 383 |
+
CustomResidualAttentionBlock(
|
| 384 |
+
width,
|
| 385 |
+
heads,
|
| 386 |
+
mlp_ratio,
|
| 387 |
+
ls_init_value=ls_init_value,
|
| 388 |
+
act_layer=act_layer,
|
| 389 |
+
norm_layer=norm_layer,
|
| 390 |
+
scale_cosine_attn=scale_cosine_attn,
|
| 391 |
+
scale_heads=scale_heads,
|
| 392 |
+
scale_attn=scale_attn,
|
| 393 |
+
scale_fc=scale_fc,
|
| 394 |
+
cross_attn=cross_attn,
|
| 395 |
+
xattn=xattn,
|
| 396 |
+
)
|
| 397 |
+
for _ in range(layers)
|
| 398 |
+
]
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 402 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
| 403 |
+
|
| 404 |
+
def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None):
|
| 405 |
+
if k is None and v is None:
|
| 406 |
+
k = v = q
|
| 407 |
+
for r in self.resblocks:
|
| 408 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 409 |
+
q = checkpoint(r, q, k, v, attn_mask)
|
| 410 |
+
else:
|
| 411 |
+
q = r(q, k, v, attn_mask=attn_mask)
|
| 412 |
+
return q
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class ResidualAttentionBlock(nn.Module):
|
| 416 |
+
def __init__(
|
| 417 |
+
self,
|
| 418 |
+
d_model: int,
|
| 419 |
+
n_head: int,
|
| 420 |
+
mlp_ratio: float = 4.0,
|
| 421 |
+
ls_init_value: float = None,
|
| 422 |
+
act_layer: Callable = nn.GELU,
|
| 423 |
+
norm_layer: Callable = LayerNorm,
|
| 424 |
+
xattn: bool = False,
|
| 425 |
+
):
|
| 426 |
+
super().__init__()
|
| 427 |
+
|
| 428 |
+
self.ln_1 = norm_layer(d_model)
|
| 429 |
+
if xattn:
|
| 430 |
+
self.attn = Attention(d_model, n_head, xattn=True)
|
| 431 |
+
else:
|
| 432 |
+
self.attn = nn.MultiheadAttention(d_model, n_head)
|
| 433 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 434 |
+
|
| 435 |
+
self.ln_2 = norm_layer(d_model)
|
| 436 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 437 |
+
self.mlp = nn.Sequential(OrderedDict([("c_fc", nn.Linear(d_model, mlp_width)), ("gelu", act_layer()), ("c_proj", nn.Linear(mlp_width, d_model))]))
|
| 438 |
+
|
| 439 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 440 |
+
self.xattn = xattn
|
| 441 |
+
|
| 442 |
+
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 443 |
+
attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
|
| 444 |
+
if self.xattn:
|
| 445 |
+
return self.attn(x, attn_mask=attn_mask)
|
| 446 |
+
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
|
| 447 |
+
|
| 448 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 449 |
+
x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
|
| 450 |
+
x = x + self.ls_2(self.mlp(self.ln_2(x)))
|
| 451 |
+
return x
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
class Transformer(nn.Module):
|
| 455 |
+
def __init__(
|
| 456 |
+
self,
|
| 457 |
+
width: int,
|
| 458 |
+
layers: int,
|
| 459 |
+
heads: int,
|
| 460 |
+
mlp_ratio: float = 4.0,
|
| 461 |
+
ls_init_value: float = None,
|
| 462 |
+
act_layer: Callable = nn.GELU,
|
| 463 |
+
norm_layer: Callable = LayerNorm,
|
| 464 |
+
xattn: bool = False,
|
| 465 |
+
):
|
| 466 |
+
super().__init__()
|
| 467 |
+
self.width = width
|
| 468 |
+
self.layers = layers
|
| 469 |
+
self.grad_checkpointing = False
|
| 470 |
+
|
| 471 |
+
self.resblocks = nn.ModuleList([ResidualAttentionBlock(width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn) for _ in range(layers)])
|
| 472 |
+
|
| 473 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 474 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
| 475 |
+
|
| 476 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 477 |
+
for r in self.resblocks:
|
| 478 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 479 |
+
x = checkpoint(r, x, attn_mask)
|
| 480 |
+
else:
|
| 481 |
+
x = r(x, attn_mask=attn_mask)
|
| 482 |
+
return x
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
class VisionTransformer(nn.Module):
|
| 486 |
+
def __init__(
|
| 487 |
+
self,
|
| 488 |
+
image_size: int,
|
| 489 |
+
patch_size: int,
|
| 490 |
+
width: int,
|
| 491 |
+
layers: int,
|
| 492 |
+
heads: int,
|
| 493 |
+
mlp_ratio: float,
|
| 494 |
+
ls_init_value: float = None,
|
| 495 |
+
patch_dropout: float = 0.0,
|
| 496 |
+
global_average_pool: bool = False,
|
| 497 |
+
output_dim: int = 512,
|
| 498 |
+
act_layer: Callable = nn.GELU,
|
| 499 |
+
norm_layer: Callable = LayerNorm,
|
| 500 |
+
xattn: bool = False,
|
| 501 |
+
):
|
| 502 |
+
super().__init__()
|
| 503 |
+
self.image_size = to_2tuple(image_size)
|
| 504 |
+
self.patch_size = to_2tuple(patch_size)
|
| 505 |
+
self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
|
| 506 |
+
self.output_dim = output_dim
|
| 507 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
| 508 |
+
|
| 509 |
+
scale = width**-0.5
|
| 510 |
+
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| 511 |
+
self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
|
| 512 |
+
|
| 513 |
+
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
|
| 514 |
+
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0.0 else nn.Identity()
|
| 515 |
+
self.ln_pre = norm_layer(width)
|
| 516 |
+
|
| 517 |
+
self.transformer = Transformer(width, layers, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
|
| 518 |
+
|
| 519 |
+
self.global_average_pool = global_average_pool
|
| 520 |
+
self.ln_post = norm_layer(width)
|
| 521 |
+
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
| 522 |
+
|
| 523 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 524 |
+
for param in self.parameters():
|
| 525 |
+
param.requires_grad = False
|
| 526 |
+
|
| 527 |
+
if unlocked_groups != 0:
|
| 528 |
+
groups = [
|
| 529 |
+
[
|
| 530 |
+
self.conv1,
|
| 531 |
+
self.class_embedding,
|
| 532 |
+
self.positional_embedding,
|
| 533 |
+
self.ln_pre,
|
| 534 |
+
],
|
| 535 |
+
*self.transformer.resblocks[:-1],
|
| 536 |
+
[
|
| 537 |
+
self.transformer.resblocks[-1],
|
| 538 |
+
self.ln_post,
|
| 539 |
+
],
|
| 540 |
+
self.proj,
|
| 541 |
+
]
|
| 542 |
+
|
| 543 |
+
def _unlock(x):
|
| 544 |
+
if isinstance(x, Sequence):
|
| 545 |
+
for g in x:
|
| 546 |
+
_unlock(g)
|
| 547 |
+
else:
|
| 548 |
+
if isinstance(x, torch.nn.Parameter):
|
| 549 |
+
x.requires_grad = True
|
| 550 |
+
else:
|
| 551 |
+
for p in x.parameters():
|
| 552 |
+
p.requires_grad = True
|
| 553 |
+
|
| 554 |
+
_unlock(groups[-unlocked_groups:])
|
| 555 |
+
|
| 556 |
+
def get_num_layers(self):
|
| 557 |
+
return self.transformer.layers
|
| 558 |
+
|
| 559 |
+
@torch.jit.ignore
|
| 560 |
+
def set_grad_checkpointing(self, enable=True):
|
| 561 |
+
self.transformer.grad_checkpointing = enable
|
| 562 |
+
|
| 563 |
+
@torch.jit.ignore
|
| 564 |
+
def no_weight_decay(self):
|
| 565 |
+
return {"positional_embedding", "class_embedding"}
|
| 566 |
+
|
| 567 |
+
def forward(self, x: torch.Tensor, return_all_features: bool = False):
|
| 568 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 569 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 570 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 571 |
+
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
| 572 |
+
x = x + self.positional_embedding.to(x.dtype)
|
| 573 |
+
|
| 574 |
+
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
|
| 575 |
+
x = self.patch_dropout(x)
|
| 576 |
+
x = self.ln_pre(x)
|
| 577 |
+
|
| 578 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 579 |
+
x = self.transformer(x)
|
| 580 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 581 |
+
|
| 582 |
+
if not return_all_features:
|
| 583 |
+
if self.global_average_pool:
|
| 584 |
+
x = x.mean(dim=1) # x = x[:,1:,:].mean(dim=1)
|
| 585 |
+
else:
|
| 586 |
+
x = x[:, 0]
|
| 587 |
+
|
| 588 |
+
x = self.ln_post(x)
|
| 589 |
+
|
| 590 |
+
if self.proj is not None:
|
| 591 |
+
x = x @ self.proj
|
| 592 |
+
|
| 593 |
+
return x
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class TextTransformer(nn.Module):
|
| 597 |
+
def __init__(
|
| 598 |
+
self,
|
| 599 |
+
context_length: int = 77,
|
| 600 |
+
vocab_size: int = 49408,
|
| 601 |
+
width: int = 512,
|
| 602 |
+
heads: int = 8,
|
| 603 |
+
layers: int = 12,
|
| 604 |
+
ls_init_value: float = None,
|
| 605 |
+
output_dim: int = 512,
|
| 606 |
+
act_layer: Callable = nn.GELU,
|
| 607 |
+
norm_layer: Callable = LayerNorm,
|
| 608 |
+
xattn: bool = False,
|
| 609 |
+
attn_mask: bool = True,
|
| 610 |
+
):
|
| 611 |
+
super().__init__()
|
| 612 |
+
self.context_length = context_length
|
| 613 |
+
self.vocab_size = vocab_size
|
| 614 |
+
self.width = width
|
| 615 |
+
self.output_dim = output_dim
|
| 616 |
+
|
| 617 |
+
self.token_embedding = nn.Embedding(vocab_size, width)
|
| 618 |
+
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
|
| 619 |
+
self.transformer = Transformer(width=width, layers=layers, heads=heads, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
|
| 620 |
+
|
| 621 |
+
self.xattn = xattn
|
| 622 |
+
self.ln_final = norm_layer(width)
|
| 623 |
+
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
|
| 624 |
+
|
| 625 |
+
if attn_mask:
|
| 626 |
+
self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
|
| 627 |
+
else:
|
| 628 |
+
self.attn_mask = None
|
| 629 |
+
|
| 630 |
+
self.init_parameters()
|
| 631 |
+
|
| 632 |
+
def init_parameters(self):
|
| 633 |
+
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
| 634 |
+
nn.init.normal_(self.positional_embedding, std=0.01)
|
| 635 |
+
|
| 636 |
+
proj_std = (self.transformer.width**-0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 637 |
+
attn_std = self.transformer.width**-0.5
|
| 638 |
+
fc_std = (2 * self.transformer.width) ** -0.5
|
| 639 |
+
for block in self.transformer.resblocks:
|
| 640 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 641 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 642 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 643 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 644 |
+
|
| 645 |
+
if self.text_projection is not None:
|
| 646 |
+
nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
|
| 647 |
+
|
| 648 |
+
@torch.jit.ignore
|
| 649 |
+
def set_grad_checkpointing(self, enable=True):
|
| 650 |
+
self.transformer.grad_checkpointing = enable
|
| 651 |
+
|
| 652 |
+
@torch.jit.ignore
|
| 653 |
+
def no_weight_decay(self):
|
| 654 |
+
# return {'positional_embedding', 'token_embedding'}
|
| 655 |
+
return {"positional_embedding"}
|
| 656 |
+
|
| 657 |
+
def get_num_layers(self):
|
| 658 |
+
return self.transformer.layers
|
| 659 |
+
|
| 660 |
+
def build_attention_mask(self):
|
| 661 |
+
# lazily create causal attention mask, with full attention between the vision tokens
|
| 662 |
+
# pytorch uses additive attention mask; fill with -inf
|
| 663 |
+
mask = torch.empty(self.context_length, self.context_length)
|
| 664 |
+
mask.fill_(float("-inf"))
|
| 665 |
+
mask.triu_(1) # zero out the lower diagonal
|
| 666 |
+
return mask
|
| 667 |
+
|
| 668 |
+
def forward(self, text, return_all_features: bool = False):
|
| 669 |
+
cast_dtype = self.transformer.get_cast_dtype()
|
| 670 |
+
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 671 |
+
|
| 672 |
+
x = x + self.positional_embedding.to(cast_dtype)
|
| 673 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 674 |
+
x = self.transformer(x, attn_mask=self.attn_mask)
|
| 675 |
+
# x = self.transformer(x) # no attention mask is applied
|
| 676 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 677 |
+
x = self.ln_final(x)
|
| 678 |
+
|
| 679 |
+
if not return_all_features:
|
| 680 |
+
# x.shape = [batch_size, n_ctx, transformer.width]
|
| 681 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 682 |
+
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 683 |
+
return x
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/utils.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import repeat
|
| 2 |
+
import collections.abc
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn as nn
|
| 9 |
+
from torchvision.ops.misc import FrozenBatchNorm2d
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# open CLIP
|
| 14 |
+
def resize_clip_pos_embed(state_dict, model, interpolation: str = "bicubic", seq_dim=1):
|
| 15 |
+
# Rescale the grid of position embeddings when loading from state_dict
|
| 16 |
+
old_pos_embed = state_dict.get("visual.positional_embedding", None)
|
| 17 |
+
if old_pos_embed is None or not hasattr(model.visual, "grid_size"):
|
| 18 |
+
return
|
| 19 |
+
grid_size = to_2tuple(model.visual.grid_size)
|
| 20 |
+
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 21 |
+
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| 22 |
+
if new_seq_len == old_pos_embed.shape[0]:
|
| 23 |
+
return
|
| 24 |
+
|
| 25 |
+
if extra_tokens:
|
| 26 |
+
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| 27 |
+
else:
|
| 28 |
+
pos_emb_tok, pos_emb_img = None, old_pos_embed
|
| 29 |
+
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
| 30 |
+
|
| 31 |
+
logging.info("Resizing position embedding grid-size from %s to %s", old_grid_size, grid_size)
|
| 32 |
+
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| 33 |
+
pos_emb_img = F.interpolate(
|
| 34 |
+
pos_emb_img,
|
| 35 |
+
size=grid_size,
|
| 36 |
+
mode=interpolation,
|
| 37 |
+
align_corners=True,
|
| 38 |
+
)
|
| 39 |
+
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| 40 |
+
if pos_emb_tok is not None:
|
| 41 |
+
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| 42 |
+
else:
|
| 43 |
+
new_pos_embed = pos_emb_img
|
| 44 |
+
state_dict["visual.positional_embedding"] = new_pos_embed
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def resize_visual_pos_embed(state_dict, model, interpolation: str = "bicubic", seq_dim=1):
|
| 48 |
+
# Rescale the grid of position embeddings when loading from state_dict
|
| 49 |
+
old_pos_embed = state_dict.get("positional_embedding", None)
|
| 50 |
+
if old_pos_embed is None or not hasattr(model.visual, "grid_size"):
|
| 51 |
+
return
|
| 52 |
+
grid_size = to_2tuple(model.visual.grid_size)
|
| 53 |
+
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 54 |
+
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| 55 |
+
if new_seq_len == old_pos_embed.shape[0]:
|
| 56 |
+
return
|
| 57 |
+
|
| 58 |
+
if extra_tokens:
|
| 59 |
+
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| 60 |
+
else:
|
| 61 |
+
pos_emb_tok, pos_emb_img = None, old_pos_embed
|
| 62 |
+
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
| 63 |
+
|
| 64 |
+
logging.info("Resizing position embedding grid-size from %s to %s", old_grid_size, grid_size)
|
| 65 |
+
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| 66 |
+
pos_emb_img = F.interpolate(
|
| 67 |
+
pos_emb_img,
|
| 68 |
+
size=grid_size,
|
| 69 |
+
mode=interpolation,
|
| 70 |
+
align_corners=True,
|
| 71 |
+
)
|
| 72 |
+
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| 73 |
+
if pos_emb_tok is not None:
|
| 74 |
+
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| 75 |
+
else:
|
| 76 |
+
new_pos_embed = pos_emb_img
|
| 77 |
+
state_dict["positional_embedding"] = new_pos_embed
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def resize_evaclip_pos_embed(state_dict, model, interpolation: str = "bicubic", seq_dim=1):
|
| 81 |
+
all_keys = list(state_dict.keys())
|
| 82 |
+
# interpolate position embedding
|
| 83 |
+
if "visual.pos_embed" in state_dict:
|
| 84 |
+
pos_embed_checkpoint = state_dict["visual.pos_embed"]
|
| 85 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 86 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 87 |
+
# num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 88 |
+
num_extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 89 |
+
# height (== width) for the checkpoint position embedding
|
| 90 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 91 |
+
# height (== width) for the new position embedding
|
| 92 |
+
new_size = int(num_patches**0.5)
|
| 93 |
+
# class_token and dist_token are kept unchanged
|
| 94 |
+
if orig_size != new_size:
|
| 95 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 96 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 97 |
+
# only the position tokens are interpolated
|
| 98 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 99 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 100 |
+
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False)
|
| 101 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 102 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 103 |
+
state_dict["visual.pos_embed"] = new_pos_embed
|
| 104 |
+
|
| 105 |
+
patch_embed_proj = state_dict["visual.patch_embed.proj.weight"]
|
| 106 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 107 |
+
state_dict["visual.patch_embed.proj.weight"] = torch.nn.functional.interpolate(patch_embed_proj.float(), size=patch_size, mode="bicubic", align_corners=False)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def resize_eva_pos_embed(state_dict, model, interpolation: str = "bicubic", seq_dim=1):
|
| 111 |
+
all_keys = list(state_dict.keys())
|
| 112 |
+
# interpolate position embedding
|
| 113 |
+
if "pos_embed" in state_dict:
|
| 114 |
+
pos_embed_checkpoint = state_dict["pos_embed"]
|
| 115 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 116 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 117 |
+
# num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 118 |
+
num_extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 119 |
+
# height (== width) for the checkpoint position embedding
|
| 120 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 121 |
+
# height (== width) for the new position embedding
|
| 122 |
+
new_size = int(num_patches**0.5)
|
| 123 |
+
# class_token and dist_token are kept unchanged
|
| 124 |
+
if orig_size != new_size:
|
| 125 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 126 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 127 |
+
# only the position tokens are interpolated
|
| 128 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 129 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 130 |
+
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False)
|
| 131 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 132 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 133 |
+
state_dict["pos_embed"] = new_pos_embed
|
| 134 |
+
|
| 135 |
+
patch_embed_proj = state_dict["patch_embed.proj.weight"]
|
| 136 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 137 |
+
state_dict["patch_embed.proj.weight"] = torch.nn.functional.interpolate(patch_embed_proj.float(), size=patch_size, mode="bicubic", align_corners=False)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def resize_rel_pos_embed(state_dict, model, interpolation: str = "bicubic", seq_dim=1):
|
| 141 |
+
all_keys = list(state_dict.keys())
|
| 142 |
+
for key in all_keys:
|
| 143 |
+
if "relative_position_index" in key:
|
| 144 |
+
state_dict.pop(key)
|
| 145 |
+
|
| 146 |
+
if "relative_position_bias_table" in key:
|
| 147 |
+
rel_pos_bias = state_dict[key]
|
| 148 |
+
src_num_pos, num_attn_heads = rel_pos_bias.size()
|
| 149 |
+
dst_num_pos, _ = model.visual.state_dict()[key].size()
|
| 150 |
+
dst_patch_shape = model.visual.patch_embed.patch_shape
|
| 151 |
+
if dst_patch_shape[0] != dst_patch_shape[1]:
|
| 152 |
+
raise NotImplementedError()
|
| 153 |
+
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
|
| 154 |
+
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
|
| 155 |
+
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
|
| 156 |
+
if src_size != dst_size:
|
| 157 |
+
print("Position interpolate for %s from %dx%d to %dx%d" % (key, src_size, src_size, dst_size, dst_size))
|
| 158 |
+
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
|
| 159 |
+
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
|
| 160 |
+
|
| 161 |
+
def geometric_progression(a, r, n):
|
| 162 |
+
return a * (1.0 - r**n) / (1.0 - r)
|
| 163 |
+
|
| 164 |
+
left, right = 1.01, 1.5
|
| 165 |
+
while right - left > 1e-6:
|
| 166 |
+
q = (left + right) / 2.0
|
| 167 |
+
gp = geometric_progression(1, q, src_size // 2)
|
| 168 |
+
if gp > dst_size // 2:
|
| 169 |
+
right = q
|
| 170 |
+
else:
|
| 171 |
+
left = q
|
| 172 |
+
|
| 173 |
+
# if q > 1.090307:
|
| 174 |
+
# q = 1.090307
|
| 175 |
+
|
| 176 |
+
dis = []
|
| 177 |
+
cur = 1
|
| 178 |
+
for i in range(src_size // 2):
|
| 179 |
+
dis.append(cur)
|
| 180 |
+
cur += q ** (i + 1)
|
| 181 |
+
|
| 182 |
+
r_ids = [-_ for _ in reversed(dis)]
|
| 183 |
+
|
| 184 |
+
x = r_ids + [0] + dis
|
| 185 |
+
y = r_ids + [0] + dis
|
| 186 |
+
|
| 187 |
+
t = dst_size // 2.0
|
| 188 |
+
dx = np.arange(-t, t + 0.1, 1.0)
|
| 189 |
+
dy = np.arange(-t, t + 0.1, 1.0)
|
| 190 |
+
|
| 191 |
+
print("Original positions = %s" % str(x))
|
| 192 |
+
print("Target positions = %s" % str(dx))
|
| 193 |
+
|
| 194 |
+
all_rel_pos_bias = []
|
| 195 |
+
|
| 196 |
+
for i in range(num_attn_heads):
|
| 197 |
+
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
|
| 198 |
+
f = F.interpolate.interp2d(x, y, z, kind="cubic")
|
| 199 |
+
all_rel_pos_bias.append(torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
|
| 200 |
+
|
| 201 |
+
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
|
| 202 |
+
|
| 203 |
+
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
|
| 204 |
+
state_dict[key] = new_rel_pos_bias
|
| 205 |
+
|
| 206 |
+
# interpolate position embedding
|
| 207 |
+
if "pos_embed" in state_dict:
|
| 208 |
+
pos_embed_checkpoint = state_dict["pos_embed"]
|
| 209 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 210 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 211 |
+
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 212 |
+
# height (== width) for the checkpoint position embedding
|
| 213 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 214 |
+
# height (== width) for the new position embedding
|
| 215 |
+
new_size = int(num_patches**0.5)
|
| 216 |
+
# class_token and dist_token are kept unchanged
|
| 217 |
+
if orig_size != new_size:
|
| 218 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 219 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 220 |
+
# only the position tokens are interpolated
|
| 221 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 222 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 223 |
+
pos_tokens = torch.nn.functional.interpolate(pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False)
|
| 224 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 225 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 226 |
+
state_dict["pos_embed"] = new_pos_embed
|
| 227 |
+
|
| 228 |
+
patch_embed_proj = state_dict["patch_embed.proj.weight"]
|
| 229 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 230 |
+
state_dict["patch_embed.proj.weight"] = torch.nn.functional.interpolate(patch_embed_proj.float(), size=patch_size, mode="bicubic", align_corners=False)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def freeze_batch_norm_2d(module, module_match={}, name=""):
|
| 234 |
+
"""
|
| 235 |
+
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
|
| 236 |
+
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
|
| 237 |
+
returned. Otherwise, the module is walked recursively and submodules are converted in place.
|
| 238 |
+
|
| 239 |
+
Args:
|
| 240 |
+
module (torch.nn.Module): Any PyTorch module.
|
| 241 |
+
module_match (dict): Dictionary of full module names to freeze (all if empty)
|
| 242 |
+
name (str): Full module name (prefix)
|
| 243 |
+
|
| 244 |
+
Returns:
|
| 245 |
+
torch.nn.Module: Resulting module
|
| 246 |
+
|
| 247 |
+
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
|
| 248 |
+
"""
|
| 249 |
+
res = module
|
| 250 |
+
is_match = True
|
| 251 |
+
if module_match:
|
| 252 |
+
is_match = name in module_match
|
| 253 |
+
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
|
| 254 |
+
res = FrozenBatchNorm2d(module.num_features)
|
| 255 |
+
res.num_features = module.num_features
|
| 256 |
+
res.affine = module.affine
|
| 257 |
+
if module.affine:
|
| 258 |
+
res.weight.data = module.weight.data.clone().detach()
|
| 259 |
+
res.bias.data = module.bias.data.clone().detach()
|
| 260 |
+
res.running_mean.data = module.running_mean.data
|
| 261 |
+
res.running_var.data = module.running_var.data
|
| 262 |
+
res.eps = module.eps
|
| 263 |
+
else:
|
| 264 |
+
for child_name, child in module.named_children():
|
| 265 |
+
full_child_name = ".".join([name, child_name]) if name else child_name
|
| 266 |
+
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
|
| 267 |
+
if new_child is not child:
|
| 268 |
+
res.add_module(child_name, new_child)
|
| 269 |
+
return res
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# From PyTorch internals
|
| 273 |
+
def _ntuple(n):
|
| 274 |
+
def parse(x):
|
| 275 |
+
if isinstance(x, collections.abc.Iterable):
|
| 276 |
+
return x
|
| 277 |
+
return tuple(repeat(x, n))
|
| 278 |
+
|
| 279 |
+
return parse
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
to_1tuple = _ntuple(1)
|
| 283 |
+
to_2tuple = _ntuple(2)
|
| 284 |
+
to_3tuple = _ntuple(3)
|
| 285 |
+
to_4tuple = _ntuple(4)
|
| 286 |
+
to_ntuple = lambda n, x: _ntuple(n)(x)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def is_logging(args):
|
| 290 |
+
def is_global_master(args):
|
| 291 |
+
return args.rank == 0
|
| 292 |
+
|
| 293 |
+
def is_local_master(args):
|
| 294 |
+
return args.local_rank == 0
|
| 295 |
+
|
| 296 |
+
def is_master(args, local=False):
|
| 297 |
+
return is_local_master(args) if local else is_global_master(args)
|
| 298 |
+
|
| 299 |
+
return is_master
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class AllGather(torch.autograd.Function):
|
| 303 |
+
"""An autograd function that performs allgather on a tensor.
|
| 304 |
+
Performs all_gather operation on the provided tensors.
|
| 305 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
@staticmethod
|
| 309 |
+
def forward(ctx, tensor, rank, world_size):
|
| 310 |
+
tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)]
|
| 311 |
+
torch.distributed.all_gather(tensors_gather, tensor)
|
| 312 |
+
ctx.rank = rank
|
| 313 |
+
ctx.batch_size = tensor.shape[0]
|
| 314 |
+
return torch.cat(tensors_gather, 0)
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def backward(ctx, grad_output):
|
| 318 |
+
return (grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)], None, None)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
allgather = AllGather.apply
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_vit.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Based on EVA, BEIT, timm and DeiT code bases
|
| 2 |
+
# https://github.com/baaivision/EVA
|
| 3 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
| 4 |
+
# https://github.com/microsoft/unilm/tree/master/beit
|
| 5 |
+
# https://github.com/facebookresearch/deit/
|
| 6 |
+
# https://github.com/facebookresearch/dino
|
| 7 |
+
# --------------------------------------------------------'
|
| 8 |
+
# not tested yet
|
| 9 |
+
import math
|
| 10 |
+
from transformers import CLIPImageProcessor
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
import torch.utils.checkpoint as checkpoint
|
| 16 |
+
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
| 17 |
+
from .eva_clip import create_model_and_transforms, get_model_config
|
| 18 |
+
import torch
|
| 19 |
+
import torchvision
|
| 20 |
+
import time
|
| 21 |
+
|
| 22 |
+
from llava.utils import rank0_print
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class EvaViTWrapper(nn.Module):
|
| 26 |
+
def __init__(self, vision_tower, args, delay_load=False):
|
| 27 |
+
super().__init__()
|
| 28 |
+
|
| 29 |
+
self.is_loaded = False
|
| 30 |
+
self.vision_tower_name = vision_tower
|
| 31 |
+
self.pretrained = args.vision_tower_pretrained
|
| 32 |
+
self.args = args
|
| 33 |
+
|
| 34 |
+
self.select_layer = args.mm_vision_select_layer
|
| 35 |
+
if self.select_layer < -1:
|
| 36 |
+
self.select_layer += 1
|
| 37 |
+
self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
|
| 38 |
+
|
| 39 |
+
self.model_config = get_model_config(self.vision_tower_name)
|
| 40 |
+
|
| 41 |
+
if not delay_load:
|
| 42 |
+
rank0_print(f"Loading vision tower: {vision_tower}")
|
| 43 |
+
self.load_model()
|
| 44 |
+
elif getattr(args, "unfreeze_mm_vision_tower", False):
|
| 45 |
+
# TODO: better detector is needed.
|
| 46 |
+
rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
|
| 47 |
+
self.load_model()
|
| 48 |
+
elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts:
|
| 49 |
+
rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
|
| 50 |
+
self.load_model()
|
| 51 |
+
|
| 52 |
+
def load_model(self):
|
| 53 |
+
rank0_print(f"Loading: {self.vision_tower_name}")
|
| 54 |
+
rank0_print(f"Pretrained: {self.pretrained}")
|
| 55 |
+
time_start = time.time()
|
| 56 |
+
model, _, image_processor = create_model_and_transforms(self.vision_tower_name, self.pretrained, force_custom_clip=True, precision="fp16")
|
| 57 |
+
time_end = time.time()
|
| 58 |
+
rank0_print(f"Loaded: {self.vision_tower_name} in {time_end - time_start:.2f}s")
|
| 59 |
+
self.device = next(model.parameters()).device
|
| 60 |
+
self.dtype = next(model.parameters()).dtype
|
| 61 |
+
if self.device.type != "meta":
|
| 62 |
+
model = model.to("cuda")
|
| 63 |
+
self.vision_tower = model.visual
|
| 64 |
+
resize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Resize)][0]
|
| 65 |
+
normalize_transform = [t for t in image_processor.transforms if isinstance(t, torchvision.transforms.Normalize)][0]
|
| 66 |
+
self.resize_transform_size = resize_transform.size
|
| 67 |
+
self.image_processor = CLIPImageProcessor.from_pretrained(
|
| 68 |
+
"openai/clip-vit-large-patch14",
|
| 69 |
+
crop_size=resize_transform.size,
|
| 70 |
+
size={"shortest_edge": resize_transform.size},
|
| 71 |
+
image_mean=list(normalize_transform.mean),
|
| 72 |
+
image_std=list(normalize_transform.std),
|
| 73 |
+
)
|
| 74 |
+
rank0_print(f"Loaded image processor: {self.image_processor}")
|
| 75 |
+
self.vision_tower.requires_grad_(False)
|
| 76 |
+
self.is_loaded = True
|
| 77 |
+
|
| 78 |
+
def feature_select(self, image_features):
|
| 79 |
+
select_feature_type = self.select_feature
|
| 80 |
+
|
| 81 |
+
# if self.select_feature in ["slicefour_patch", "slicefour_cls_patch"]:
|
| 82 |
+
# select_every_k_layer = len(image_features) // 4
|
| 83 |
+
# image_features = torch.cat([image_features[i] for i in range(select_every_k_layer + self.select_layer, len(image_features), select_every_k_layer)], dim=-1)
|
| 84 |
+
# select_feature_type = select_feature_type.replace("slicefour_", "")
|
| 85 |
+
# elif self.select_feature in ["slice_m25811_f6_patch", "slice_m25811_f6_cls_patch"]:
|
| 86 |
+
# select_layers = [-1, -4, -7, -10, 6]
|
| 87 |
+
# image_features = torch.cat([image_features[i] for i in select_layers], dim=-1)
|
| 88 |
+
# select_feature_type = select_feature_type.replace("slice_m25811_f6_", "")
|
| 89 |
+
# else:
|
| 90 |
+
# image_features = image_features[self.select_layer]
|
| 91 |
+
|
| 92 |
+
if select_feature_type == "patch":
|
| 93 |
+
image_features = image_features[:, 1:]
|
| 94 |
+
elif select_feature_type == "cls_patch":
|
| 95 |
+
image_features = image_features
|
| 96 |
+
else:
|
| 97 |
+
raise ValueError(f"Unexpected select feature: {select_feature_type}")
|
| 98 |
+
return image_features
|
| 99 |
+
|
| 100 |
+
def train(self, mode=True):
|
| 101 |
+
self.training = mode
|
| 102 |
+
|
| 103 |
+
if self.is_loaded:
|
| 104 |
+
self.vision_tower.eval()
|
| 105 |
+
|
| 106 |
+
def forward(self, images):
|
| 107 |
+
if type(images) is list:
|
| 108 |
+
image_features = []
|
| 109 |
+
for image in images:
|
| 110 |
+
image_features = self.vision_tower.forward_features(image.to(self.dtype), return_all_features=True)
|
| 111 |
+
image_features = self.feature_select(image_features).to(self.dtype)
|
| 112 |
+
image_features.append(image_features)
|
| 113 |
+
else:
|
| 114 |
+
image_features = self.vision_tower.forward_features(images.to(self.dtype), return_all_features=True)
|
| 115 |
+
image_features = self.feature_select(image_features).to(self.dtype)
|
| 116 |
+
|
| 117 |
+
return image_features
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def dummy_feature(self):
|
| 121 |
+
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
|
| 122 |
+
|
| 123 |
+
@property
|
| 124 |
+
def hidden_size(self):
|
| 125 |
+
return self.model_config["vision_cfg"]["width"]
|
| 126 |
+
|
| 127 |
+
@property
|
| 128 |
+
def num_patches(self):
|
| 129 |
+
return (self.model_config["vision_cfg"]["image_size"] // self.model_config["vision_cfg"]["patch_size"]) ** 2
|
| 130 |
+
|
| 131 |
+
@property
|
| 132 |
+
def num_patches_per_side(self):
|
| 133 |
+
return self.model_config["vision_cfg"]["image_size"] // self.model_config["vision_cfg"]["patch_size"]
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def config(self):
|
| 137 |
+
return self.model_config
|
| 138 |
+
|
| 139 |
+
@property
|
| 140 |
+
def image_size(self):
|
| 141 |
+
return self.model_config["vision_cfg"]["image_size"]
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/factory.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import pathlib
|
| 5 |
+
import re
|
| 6 |
+
from copy import deepcopy
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Optional, Tuple, Union, Dict, Any
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
|
| 12 |
+
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _natural_key(string_):
|
| 16 |
+
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _rescan_model_configs():
|
| 20 |
+
global _MODEL_CONFIGS
|
| 21 |
+
|
| 22 |
+
config_ext = (".json",)
|
| 23 |
+
config_files = []
|
| 24 |
+
for config_path in _MODEL_CONFIG_PATHS:
|
| 25 |
+
if config_path.is_file() and config_path.suffix in config_ext:
|
| 26 |
+
config_files.append(config_path)
|
| 27 |
+
elif config_path.is_dir():
|
| 28 |
+
for ext in config_ext:
|
| 29 |
+
config_files.extend(config_path.glob(f"*{ext}"))
|
| 30 |
+
|
| 31 |
+
for cf in config_files:
|
| 32 |
+
with open(cf, "r", encoding="utf8") as f:
|
| 33 |
+
model_cfg = json.load(f)
|
| 34 |
+
if all(a in model_cfg for a in ("embed_dim", "vision_cfg", "text_cfg")):
|
| 35 |
+
_MODEL_CONFIGS[cf.stem] = model_cfg
|
| 36 |
+
|
| 37 |
+
_MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
_rescan_model_configs() # initial populate of model config registry
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def list_models():
|
| 44 |
+
"""enumerate available model architectures based on config files"""
|
| 45 |
+
return list(_MODEL_CONFIGS.keys())
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def add_model_config(path):
|
| 49 |
+
"""add model config path or file and update registry"""
|
| 50 |
+
if not isinstance(path, Path):
|
| 51 |
+
path = Path(path)
|
| 52 |
+
_MODEL_CONFIG_PATHS.append(path)
|
| 53 |
+
_rescan_model_configs()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_model_config(model_name):
|
| 57 |
+
if model_name in _MODEL_CONFIGS:
|
| 58 |
+
return deepcopy(_MODEL_CONFIGS[model_name])
|
| 59 |
+
else:
|
| 60 |
+
return None
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B-plus.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1280,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 448,
|
| 5 |
+
"layers": 32,
|
| 6 |
+
"width": 4096,
|
| 7 |
+
"head_width": 128,
|
| 8 |
+
"mlp_ratio": 5,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-8b-14-plus-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"qkv_bias": false,
|
| 13 |
+
"xattn": true,
|
| 14 |
+
"postnorm": false,
|
| 15 |
+
"fusedLN": false,
|
| 16 |
+
"use_rms_norm": true
|
| 17 |
+
},
|
| 18 |
+
"text_cfg": {
|
| 19 |
+
"context_length": 77,
|
| 20 |
+
"vocab_size": 49408,
|
| 21 |
+
"width": 1280,
|
| 22 |
+
"heads": 20,
|
| 23 |
+
"layers": 32,
|
| 24 |
+
"xattn": false,
|
| 25 |
+
"fusedLN": false
|
| 26 |
+
}
|
| 27 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-B-16.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 512,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 12,
|
| 6 |
+
"width": 768,
|
| 7 |
+
"patch_size": 16,
|
| 8 |
+
"eva_model_name": "eva-clip-b-16",
|
| 9 |
+
"ls_init_value": 0.1,
|
| 10 |
+
"drop_path_rate": 0.0
|
| 11 |
+
},
|
| 12 |
+
"text_cfg": {
|
| 13 |
+
"context_length": 77,
|
| 14 |
+
"vocab_size": 49408,
|
| 15 |
+
"width": 512,
|
| 16 |
+
"heads": 8,
|
| 17 |
+
"layers": 12
|
| 18 |
+
}
|
| 19 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 40,
|
| 6 |
+
"width": 1408,
|
| 7 |
+
"head_width": 88,
|
| 8 |
+
"mlp_ratio": 4.3637,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-g-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true
|
| 14 |
+
},
|
| 15 |
+
"text_cfg": {
|
| 16 |
+
"context_length": 77,
|
| 17 |
+
"vocab_size": 49408,
|
| 18 |
+
"width": 1024,
|
| 19 |
+
"heads": 16,
|
| 20 |
+
"layers": 24,
|
| 21 |
+
"xattn": false,
|
| 22 |
+
"fusedLN": true
|
| 23 |
+
}
|
| 24 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 64,
|
| 6 |
+
"width": 1792,
|
| 7 |
+
"head_width": 112,
|
| 8 |
+
"mlp_ratio": 8.571428571428571,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-4b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"postnorm": true,
|
| 14 |
+
"fusedLN": true
|
| 15 |
+
},
|
| 16 |
+
"text_cfg": {
|
| 17 |
+
"context_length": 77,
|
| 18 |
+
"vocab_size": 49408,
|
| 19 |
+
"width": 1280,
|
| 20 |
+
"heads": 20,
|
| 21 |
+
"layers": 32,
|
| 22 |
+
"xattn": false,
|
| 23 |
+
"fusedLN": true
|
| 24 |
+
}
|
| 25 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 448,
|
| 5 |
+
"layers": 77,
|
| 6 |
+
"width": 2304,
|
| 7 |
+
"head_width": 144,
|
| 8 |
+
"mlp_ratio": 10.9722,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-10b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"postnorm": false,
|
| 14 |
+
"fusedLN": true
|
| 15 |
+
},
|
| 16 |
+
"text_cfg": {
|
| 17 |
+
"context_length": 77,
|
| 18 |
+
"vocab_size": 49408,
|
| 19 |
+
"width": 1280,
|
| 20 |
+
"heads": 20,
|
| 21 |
+
"layers": 32,
|
| 22 |
+
"xattn": false,
|
| 23 |
+
"fusedLN": true
|
| 24 |
+
}
|
| 25 |
+
}
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc
ADDED
|
Binary file (3.29 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_projector/resampler.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Alibaba Cloud.
|
| 2 |
+
#
|
| 3 |
+
# This source code is licensed under the license found in the
|
| 4 |
+
# LICENSE file in the root directory of this source tree.
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import math
|
| 8 |
+
import requests
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from functools import partial
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from typing import Callable, Optional, Sequence, Tuple, List, Union
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
from torch.nn import functional as F
|
| 18 |
+
from torch.nn.init import trunc_normal_
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
from torchvision.transforms import InterpolationMode
|
| 21 |
+
|
| 22 |
+
from llava.slice_process import slice_image_feature_minicpm
|
| 23 |
+
import torchvision.ops.roi_align as RoIAlign
|
| 24 |
+
|
| 25 |
+
def get_abs_pos(abs_pos, tgt_size):
|
| 26 |
+
# abs_pos: L, C
|
| 27 |
+
# tgt_size: (H, W)
|
| 28 |
+
# return: M, C
|
| 29 |
+
src_size = int(math.sqrt(abs_pos.size(0)))
|
| 30 |
+
dtype = abs_pos.dtype
|
| 31 |
+
|
| 32 |
+
return F.interpolate(
|
| 33 |
+
abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
|
| 34 |
+
size=(tgt_size[0], tgt_size[1]),
|
| 35 |
+
mode="bicubic",
|
| 36 |
+
align_corners=False,
|
| 37 |
+
).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
|
| 41 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
| 42 |
+
"""
|
| 43 |
+
grid_size: int of the grid height and width
|
| 44 |
+
return:
|
| 45 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
| 46 |
+
"""
|
| 47 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
| 48 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
| 49 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
| 50 |
+
grid = np.stack(grid, axis=0)
|
| 51 |
+
|
| 52 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
| 53 |
+
|
| 54 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
| 55 |
+
if cls_token:
|
| 56 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
| 57 |
+
return pos_embed
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
| 61 |
+
assert embed_dim % 2 == 0
|
| 62 |
+
|
| 63 |
+
# use half of dimensions to encode grid_h
|
| 64 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
| 65 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
| 66 |
+
|
| 67 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
| 68 |
+
return emb
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
| 72 |
+
"""
|
| 73 |
+
embed_dim: output dimension for each position
|
| 74 |
+
pos: a list of positions to be encoded: size (M,)
|
| 75 |
+
out: (M, D)
|
| 76 |
+
"""
|
| 77 |
+
assert embed_dim % 2 == 0
|
| 78 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
| 79 |
+
omega /= embed_dim / 2.
|
| 80 |
+
omega = 1. / 10000 ** omega # (D/2,)
|
| 81 |
+
|
| 82 |
+
pos = pos.reshape(-1) # (M,)
|
| 83 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
| 84 |
+
|
| 85 |
+
emb_sin = np.sin(out) # (M, D/2)
|
| 86 |
+
emb_cos = np.cos(out) # (M, D/2)
|
| 87 |
+
|
| 88 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
| 89 |
+
return emb
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class Resampler(nn.Module):
|
| 93 |
+
"""
|
| 94 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 95 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 96 |
+
Outputs:
|
| 97 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
def __init__(
|
| 101 |
+
self,
|
| 102 |
+
grid_size,
|
| 103 |
+
embed_dim,
|
| 104 |
+
num_heads,
|
| 105 |
+
kv_dim=None,
|
| 106 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6)
|
| 107 |
+
):
|
| 108 |
+
super().__init__()
|
| 109 |
+
self.grid_size = grid_size
|
| 110 |
+
self.num_queries = grid_size ** 2
|
| 111 |
+
self.embed_dim = embed_dim
|
| 112 |
+
self.num_heads = num_heads
|
| 113 |
+
|
| 114 |
+
self.pos_embed = nn.Parameter(
|
| 115 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
| 116 |
+
).requires_grad_(False)
|
| 117 |
+
|
| 118 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 119 |
+
trunc_normal_(self.query, std=.02)
|
| 120 |
+
|
| 121 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
| 122 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
| 123 |
+
else:
|
| 124 |
+
self.kv_proj = nn.Identity()
|
| 125 |
+
|
| 126 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
| 127 |
+
self.ln_q = norm_layer(embed_dim)
|
| 128 |
+
self.ln_kv = norm_layer(embed_dim)
|
| 129 |
+
|
| 130 |
+
self.ln_post = norm_layer(embed_dim)
|
| 131 |
+
self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
|
| 132 |
+
|
| 133 |
+
self.apply(self._init_weights)
|
| 134 |
+
|
| 135 |
+
def _init_weights(self, m):
|
| 136 |
+
if isinstance(m, nn.Linear):
|
| 137 |
+
trunc_normal_(m.weight, std=.02)
|
| 138 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 139 |
+
nn.init.constant_(m.bias, 0)
|
| 140 |
+
elif isinstance(m, nn.LayerNorm):
|
| 141 |
+
nn.init.constant_(m.bias, 0)
|
| 142 |
+
nn.init.constant_(m.weight, 1.0)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def forward(self, x, tgt_size=(24, 24), attn_mask=None):
|
| 146 |
+
height, width = tgt_size
|
| 147 |
+
# import pdb; pdb.set_trace()
|
| 148 |
+
# breakpoint()
|
| 149 |
+
x=x[0]
|
| 150 |
+
x= x.unsqueeze(0)
|
| 151 |
+
if height * width != x.shape[1]:
|
| 152 |
+
x = x[:, :height * width]
|
| 153 |
+
|
| 154 |
+
x = x.to(torch.bfloat16)
|
| 155 |
+
dtype = x.dtype
|
| 156 |
+
bs = x.shape[0]
|
| 157 |
+
key_height, key_width = tgt_size
|
| 158 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
|
| 159 |
+
|
| 160 |
+
# breakpoint()
|
| 161 |
+
x = self.ln_kv(self.kv_proj(x))
|
| 162 |
+
q = self.ln_q(self.query) #[:num_valid_query]
|
| 163 |
+
query = self._repeat(q, bs) #+ self.pos_embed[None].to(dtype=dtype)
|
| 164 |
+
key = x + key_pos_embed[None].to(dtype=dtype)
|
| 165 |
+
value = x
|
| 166 |
+
|
| 167 |
+
out, attn_weights = self.attn(
|
| 168 |
+
query.permute(1, 0, 2),
|
| 169 |
+
key.permute(1, 0, 2),
|
| 170 |
+
value.permute(1, 0, 2),
|
| 171 |
+
attn_mask=attn_mask
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
# out->1, bs*l, c
|
| 175 |
+
# import pdb; pdb.set_trace()
|
| 176 |
+
# x = out[0].unflatten(0, [bs, -1]) # bs, l, c
|
| 177 |
+
x = out.permute(1, 0, 2)
|
| 178 |
+
x = self.ln_post(x)
|
| 179 |
+
x = x @ self.proj
|
| 180 |
+
return x
|
| 181 |
+
|
| 182 |
+
def _repeat(self, query, N: int):
|
| 183 |
+
return query.unsqueeze(0).repeat(N, 1, 1)
|
| 184 |
+
|
| 185 |
+
class Resampler_ln(nn.Module):
|
| 186 |
+
"""
|
| 187 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
| 188 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
| 189 |
+
Outputs:
|
| 190 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def __init__(
|
| 194 |
+
self,
|
| 195 |
+
grid_size,
|
| 196 |
+
embed_dim,
|
| 197 |
+
num_heads,
|
| 198 |
+
kv_dim=None,
|
| 199 |
+
norm_layer=partial(nn.LayerNorm, eps=1e-6)
|
| 200 |
+
):
|
| 201 |
+
super().__init__()
|
| 202 |
+
self.grid_size = grid_size
|
| 203 |
+
self.num_queries = grid_size ** 2
|
| 204 |
+
self.embed_dim = embed_dim
|
| 205 |
+
self.num_heads = num_heads
|
| 206 |
+
|
| 207 |
+
self.pos_embed = nn.Parameter(
|
| 208 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
| 209 |
+
).requires_grad_(False)
|
| 210 |
+
|
| 211 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
| 212 |
+
trunc_normal_(self.query, std=.02)
|
| 213 |
+
|
| 214 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
| 215 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
| 216 |
+
else:
|
| 217 |
+
self.kv_proj = nn.Identity()
|
| 218 |
+
|
| 219 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
| 220 |
+
self.ln_q = norm_layer(embed_dim)
|
| 221 |
+
self.ln_kv = norm_layer(embed_dim)
|
| 222 |
+
|
| 223 |
+
self.ln_post = norm_layer(embed_dim)
|
| 224 |
+
self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
|
| 225 |
+
|
| 226 |
+
self.apply(self._init_weights)
|
| 227 |
+
|
| 228 |
+
def _init_weights(self, m):
|
| 229 |
+
if isinstance(m, nn.Linear):
|
| 230 |
+
trunc_normal_(m.weight, std=.02)
|
| 231 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 232 |
+
nn.init.constant_(m.bias, 0)
|
| 233 |
+
elif isinstance(m, nn.LayerNorm):
|
| 234 |
+
nn.init.constant_(m.bias, 0)
|
| 235 |
+
nn.init.constant_(m.weight, 1.0)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def forward(self, x, tgt_size=(24, 24), attn_mask=None):
|
| 239 |
+
height, width = tgt_size
|
| 240 |
+
# import pdb; pdb.set_trace()
|
| 241 |
+
if height * width != x.shape[1]:
|
| 242 |
+
x = x[:, :height * width]
|
| 243 |
+
|
| 244 |
+
x = x.to(torch.bfloat16)
|
| 245 |
+
dtype = x.dtype
|
| 246 |
+
bs = x.shape[0]
|
| 247 |
+
key_height, key_width = tgt_size
|
| 248 |
+
key_pos_embed = get_abs_pos(self.pos_embed, (key_height, key_width))
|
| 249 |
+
|
| 250 |
+
x = self.ln_kv(self.kv_proj(x))
|
| 251 |
+
q = self.ln_q(self.query) #[:num_valid_query]
|
| 252 |
+
query = self._repeat(q, bs) #+ self.pos_embed[None].to(dtype=dtype)
|
| 253 |
+
key = x + key_pos_embed[None].to(dtype=dtype)
|
| 254 |
+
value = x
|
| 255 |
+
|
| 256 |
+
out, attn_weights = self.attn(
|
| 257 |
+
query.permute(1, 0, 2),
|
| 258 |
+
key.permute(1, 0, 2),
|
| 259 |
+
value.permute(1, 0, 2),
|
| 260 |
+
attn_mask=attn_mask
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
# out->1, bs*l, c
|
| 264 |
+
# import pdb; pdb.set_trace()
|
| 265 |
+
# x = out[0].unflatten(0, [bs, -1]) # bs, l, c
|
| 266 |
+
x = out.permute(1, 0, 2)
|
| 267 |
+
x = x @ self.proj
|
| 268 |
+
x = self.ln_post(x)
|
| 269 |
+
return x
|
| 270 |
+
|
| 271 |
+
def _repeat(self, query, N: int):
|
| 272 |
+
return query.unsqueeze(0).repeat(N, 1, 1)
|
| 273 |
+
|
VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-310.pyc
ADDED
|
Binary file (1.87 kB). View file
|
|
|
VLMEvalKit-sudoku/llava/model/multimodal_resampler/builder.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from .masked_drop import MaskedDrop
|
| 4 |
+
from .spatial_pool import SpatialPool
|
| 5 |
+
from .perceiver import PerceiverResampler
|
| 6 |
+
from .qformer import Qformer
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class IdentityMap(torch.nn.Module):
|
| 10 |
+
def __init__(self):
|
| 11 |
+
super().__init__()
|
| 12 |
+
|
| 13 |
+
def forward(self, x, *args, **kwargs):
|
| 14 |
+
return x
|
| 15 |
+
|
| 16 |
+
@property
|
| 17 |
+
def config(self):
|
| 18 |
+
return {"mm_resampler_type": None}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_vision_resampler(model_args, delay_load=False, **kwargs):
|
| 22 |
+
resampler_type = getattr(model_args, "mm_resampler_type", None)
|
| 23 |
+
if resampler_type == "masked_drop":
|
| 24 |
+
return MaskedDrop(model_args)
|
| 25 |
+
elif resampler_type == "spatial_pool":
|
| 26 |
+
return SpatialPool(model_args, **kwargs)
|
| 27 |
+
elif resampler_type == "perceiver":
|
| 28 |
+
return PerceiverResampler(model_args, **kwargs)
|
| 29 |
+
elif resampler_type == "qformer":
|
| 30 |
+
return Qformer(model_args, **kwargs)
|
| 31 |
+
elif resampler_type is None:
|
| 32 |
+
return IdentityMap()
|
| 33 |
+
|
| 34 |
+
raise ValueError(f"Unknown resampler type: {resampler_type}")
|
VLMEvalKit-sudoku/llava/model/multimodal_resampler/masked_drop.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MaskedDrop(nn.Module):
|
| 8 |
+
def __init__(self, model_args):
|
| 9 |
+
super().__init__()
|
| 10 |
+
|
| 11 |
+
self.mode = model_args.mm_mask_drop_mode
|
| 12 |
+
self.skip_percentage = model_args.mm_mask_drop_skip_percentage
|
| 13 |
+
self.ratio = model_args.mm_mask_drop_ratio
|
| 14 |
+
self.ratio_upper = model_args.mm_mask_drop_ratio_upper
|
| 15 |
+
self.ratio_lower = model_args.mm_mask_drop_ratio_lower
|
| 16 |
+
|
| 17 |
+
def forward(self, image_features, *args, **kwargs):
|
| 18 |
+
|
| 19 |
+
if not self.training:
|
| 20 |
+
return image_features
|
| 21 |
+
|
| 22 |
+
if self.skip_percentage > random.random():
|
| 23 |
+
return image_features
|
| 24 |
+
|
| 25 |
+
masked_features = []
|
| 26 |
+
|
| 27 |
+
for image_feature in image_features:
|
| 28 |
+
num_tokens = image_feature.shape[0]
|
| 29 |
+
if self.mode == "fixed":
|
| 30 |
+
num_keep = int(num_tokens * self.ratio)
|
| 31 |
+
masked_features.append(self.random_masking(image_feature.unsqueeze(0), num_keep)[0][0])
|
| 32 |
+
elif self.mode == "range":
|
| 33 |
+
num_keep = int(num_tokens * random.uniform(self.ratio_lower, self.ratio_upper))
|
| 34 |
+
masked_features.append(self.random_masking(image_feature.unsqueeze(0), num_keep)[0])
|
| 35 |
+
elif self.mode == "cls_only":
|
| 36 |
+
masked_features.append(image_feature[0:1])
|
| 37 |
+
else:
|
| 38 |
+
raise ValueError(f"Unexpected masked drop mode: {self.mode}")
|
| 39 |
+
|
| 40 |
+
if self.mode not in ["range"] and (type(image_features) is not list or self.mode in ["cls_only"]):
|
| 41 |
+
masked_features = torch.stack(masked_features, dim=0)
|
| 42 |
+
|
| 43 |
+
return masked_features
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def config(self):
|
| 47 |
+
return {
|
| 48 |
+
"mm_resampler_type": "masked_drop",
|
| 49 |
+
"mm_mask_drop_mode": self.mode,
|
| 50 |
+
"mm_mask_drop_skip_percentage": self.skip_percentage,
|
| 51 |
+
"mm_mask_drop_ratio": self.ratio,
|
| 52 |
+
"mm_mask_drop_ratio_upper": self.ratio_upper,
|
| 53 |
+
"mm_mask_drop_ratio_lower": self.ratio_lower,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
def random_masking(self, x, len_keep):
|
| 57 |
+
"""
|
| 58 |
+
Perform per-sample random masking by per-sample shuffling.
|
| 59 |
+
Per-sample shuffling is done by argsort random noise.
|
| 60 |
+
x: [N, L, D], sequence
|
| 61 |
+
"""
|
| 62 |
+
N, L, D = x.shape # batch, length, dim
|
| 63 |
+
|
| 64 |
+
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
|
| 65 |
+
|
| 66 |
+
# sort noise for each sample
|
| 67 |
+
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
|
| 68 |
+
ids_restore = torch.argsort(ids_shuffle, dim=1)
|
| 69 |
+
|
| 70 |
+
# keep the first subset
|
| 71 |
+
ids_keep = ids_shuffle[:, :len_keep]
|
| 72 |
+
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
|
| 73 |
+
|
| 74 |
+
# generate the binary mask: 0 is keep, 1 is remove
|
| 75 |
+
mask = torch.ones([N, L], device=x.device)
|
| 76 |
+
mask[:, :len_keep] = 0
|
| 77 |
+
# unshuffle to get the binary mask
|
| 78 |
+
mask = torch.gather(mask, dim=1, index=ids_restore)
|
| 79 |
+
|
| 80 |
+
return x_masked, mask, ids_restore
|
VLMEvalKit-sudoku/llava/slice_process.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from torchvision.transforms import ToTensor, ToPILImage
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
import random
|
| 8 |
+
from imgaug import augmenters as iaa
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
NEWLINE_TOKEN = 13 # '\n'
|
| 12 |
+
DOT_TOKEN = 29892 # ','
|
| 13 |
+
|
| 14 |
+
def split_to_patches(image, grid):
|
| 15 |
+
patches = []
|
| 16 |
+
width, height = image.size
|
| 17 |
+
grid_x = int(width / grid[0])
|
| 18 |
+
grid_y = int(height / grid[1])
|
| 19 |
+
|
| 20 |
+
for i in range(0, height, grid_y):
|
| 21 |
+
images = []
|
| 22 |
+
for j in range(0, width, grid_x):
|
| 23 |
+
box = (j, i, j + grid_x, i + grid_y)
|
| 24 |
+
patch = image.crop(box)
|
| 25 |
+
images.append(patch)
|
| 26 |
+
patches.append(images)
|
| 27 |
+
|
| 28 |
+
return patches
|
| 29 |
+
|
| 30 |
+
def get_refine_size(
|
| 31 |
+
original_size, grid, scale_resolution, patch_size, allow_upscale=False
|
| 32 |
+
):
|
| 33 |
+
width, height = original_size
|
| 34 |
+
grid_x, grid_y = grid
|
| 35 |
+
|
| 36 |
+
refine_width = ensure_divide(width, grid_x)
|
| 37 |
+
refine_height = ensure_divide(height, grid_y)
|
| 38 |
+
|
| 39 |
+
grid_width = refine_width / grid_x
|
| 40 |
+
grid_height = refine_height / grid_y
|
| 41 |
+
|
| 42 |
+
best_grid_size = find_best_resize(
|
| 43 |
+
(grid_width, grid_height),
|
| 44 |
+
scale_resolution,
|
| 45 |
+
patch_size,
|
| 46 |
+
allow_upscale=allow_upscale,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
|
| 50 |
+
|
| 51 |
+
return refine_size
|
| 52 |
+
|
| 53 |
+
def ensure_divide(length, patch_size):
|
| 54 |
+
# return max(round(length / patch_size) * patch_size, patch_size)
|
| 55 |
+
return max(math.floor(length / patch_size) * patch_size, patch_size)
|
| 56 |
+
|
| 57 |
+
def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False, any_res=False):
|
| 58 |
+
width, height = original_size
|
| 59 |
+
if any_res:
|
| 60 |
+
r = width / height
|
| 61 |
+
if (width * height > scale_resolution * scale_resolution):
|
| 62 |
+
height = int(scale_resolution / math.sqrt(r))
|
| 63 |
+
width = int(height * r)
|
| 64 |
+
elif (width * height < 256 * 256):
|
| 65 |
+
height = int(256 / math.sqrt(r))
|
| 66 |
+
width = int(height * r)
|
| 67 |
+
else:
|
| 68 |
+
if (width * height > scale_resolution * scale_resolution) or allow_upscale:
|
| 69 |
+
r = width / height # width=672 height=448 r= 1.5
|
| 70 |
+
height = int(scale_resolution / math.sqrt(r)) # scale_resolution=336 / r**0.5 274.3428511917
|
| 71 |
+
width = int(height * r) # 411.5142767876
|
| 72 |
+
best_width = ensure_divide(width, patch_size)
|
| 73 |
+
best_height = ensure_divide(height, patch_size)
|
| 74 |
+
return (best_width, best_height)
|
| 75 |
+
|
| 76 |
+
def slice_image_minicpm(
|
| 77 |
+
image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False, any_res=False
|
| 78 |
+
):
|
| 79 |
+
original_size = image.size
|
| 80 |
+
original_width, original_height = original_size
|
| 81 |
+
log_ratio = math.log(original_width / original_height)
|
| 82 |
+
ratio = original_width * original_height / (scale_resolution * scale_resolution)
|
| 83 |
+
multiple = min(math.ceil(ratio), max_slice_nums)
|
| 84 |
+
|
| 85 |
+
source_image = None
|
| 86 |
+
best_grid = None
|
| 87 |
+
patches = []
|
| 88 |
+
|
| 89 |
+
if multiple <= 1 or never_split:
|
| 90 |
+
# dont need to slice, upsample
|
| 91 |
+
best_size = find_best_resize(
|
| 92 |
+
original_size, scale_resolution, patch_size, allow_upscale=True, any_res=any_res
|
| 93 |
+
)
|
| 94 |
+
source_image = image.resize(best_size, Image.Resampling.BICUBIC)
|
| 95 |
+
else:
|
| 96 |
+
candidate_split_grids_nums = []
|
| 97 |
+
for i in [multiple - 1, multiple, multiple + 1]:
|
| 98 |
+
if i == 1 or i > max_slice_nums:
|
| 99 |
+
continue
|
| 100 |
+
candidate_split_grids_nums.append(i)
|
| 101 |
+
|
| 102 |
+
# source image, down-sampling and ensure divided by patch_size
|
| 103 |
+
best_resize = find_best_resize(original_size, scale_resolution, patch_size, any_res=any_res)
|
| 104 |
+
source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
|
| 105 |
+
candidate_grids = []
|
| 106 |
+
|
| 107 |
+
# find best grid
|
| 108 |
+
for split_grids_nums in candidate_split_grids_nums:
|
| 109 |
+
m = 1
|
| 110 |
+
while m <= split_grids_nums:
|
| 111 |
+
if split_grids_nums % m == 0:
|
| 112 |
+
candidate_grids.append([m, split_grids_nums // m])
|
| 113 |
+
m += 1
|
| 114 |
+
|
| 115 |
+
best_grid = [1, 1]
|
| 116 |
+
min_error = float("inf")
|
| 117 |
+
for grid in candidate_grids:
|
| 118 |
+
error = abs(log_ratio - math.log(grid[0] / grid[1]))
|
| 119 |
+
if error < min_error:
|
| 120 |
+
best_grid = grid
|
| 121 |
+
min_error = error
|
| 122 |
+
|
| 123 |
+
refine_size = get_refine_size(
|
| 124 |
+
original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
refine_image = image.resize(refine_size, Image.Resampling.BICUBIC)
|
| 128 |
+
patches = split_to_patches(refine_image, best_grid)
|
| 129 |
+
|
| 130 |
+
ind_tokens = []
|
| 131 |
+
if best_grid is None:
|
| 132 |
+
return source_image, patches, best_grid, ind_tokens
|
| 133 |
+
else:
|
| 134 |
+
# flatten the patches
|
| 135 |
+
patches = [item for sublist in patches for item in sublist]
|
| 136 |
+
# calculate ind_token layout
|
| 137 |
+
for j in range(best_grid[1]):
|
| 138 |
+
for i in range(best_grid[0]):
|
| 139 |
+
if i != best_grid[0] - 1:
|
| 140 |
+
ind_tokens.append(DOT_TOKEN)
|
| 141 |
+
else:
|
| 142 |
+
ind_tokens.append(NEWLINE_TOKEN)
|
| 143 |
+
|
| 144 |
+
return source_image, patches, best_grid, ind_tokens
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def split_image(image, scale=672, grid=(2, 2)):
|
| 149 |
+
resized_image = image.resize((scale, scale))
|
| 150 |
+
width, height = resized_image.size
|
| 151 |
+
grid_width = width // grid[0]
|
| 152 |
+
grid_height = height // grid[1]
|
| 153 |
+
|
| 154 |
+
sub_images = []
|
| 155 |
+
|
| 156 |
+
for i in range(grid[0]):
|
| 157 |
+
for j in range(grid[1]):
|
| 158 |
+
left = i * grid_width
|
| 159 |
+
upper = j * grid_height
|
| 160 |
+
right = left + grid_width
|
| 161 |
+
lower = upper + grid_height
|
| 162 |
+
sub_image = resized_image.crop((left, upper, right, lower))
|
| 163 |
+
sub_images.append(sub_image)
|
| 164 |
+
|
| 165 |
+
return sub_images
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def generate_subimage_coordinates(H, W, h, w, num_windows):
|
| 169 |
+
"""
|
| 170 |
+
生成子图的左上角和右下角坐标,并返回一个形状为 (n, 4) 的 PyTorch tensor。
|
| 171 |
+
|
| 172 |
+
参数:
|
| 173 |
+
H (int): 原始图像的高度
|
| 174 |
+
W (int): 原始图像的宽度
|
| 175 |
+
h (int): 子图的高度
|
| 176 |
+
w (int): 子图的宽度
|
| 177 |
+
|
| 178 |
+
返回:
|
| 179 |
+
torch.Tensor: 形状为 (n, 4) 的张量,包含所有子图的左上角和右下角坐标
|
| 180 |
+
"""
|
| 181 |
+
# assert H % h == 0 and W % w == 0, "H/h and W/w must be an integer"
|
| 182 |
+
|
| 183 |
+
rows = int(round(H / h))
|
| 184 |
+
cols = int(round(W / w))
|
| 185 |
+
assert rows * cols == num_windows, f'H:{H}, W:{W}, h:{h}, w:{w}, rows:{H/h}, cols:{W/w}'
|
| 186 |
+
coordinates = []
|
| 187 |
+
for i in range(rows):
|
| 188 |
+
for j in range(cols):
|
| 189 |
+
x1 = j * w
|
| 190 |
+
y1 = i * h
|
| 191 |
+
x2 = x1 + w
|
| 192 |
+
y2 = y1 + h
|
| 193 |
+
coordinates.append([x1, y1, x2, y2])
|
| 194 |
+
|
| 195 |
+
return torch.tensor(coordinates, dtype=torch.float32)
|
| 196 |
+
|
| 197 |
+
def slice_image_feature_minicpm(
|
| 198 |
+
image_feature, num_windows=144, max_slice_nums=1000, num_ratio=1):
|
| 199 |
+
# image_feature: b,c,h,w
|
| 200 |
+
# num_queries of resampler. n
|
| 201 |
+
#
|
| 202 |
+
bs = image_feature.shape[0]
|
| 203 |
+
dtype, device = image_feature.dtype, image_feature.device
|
| 204 |
+
feature_size = image_feature.shape[-2:]
|
| 205 |
+
feature_height, feature_width = feature_size
|
| 206 |
+
log_ratio = math.log(feature_width / feature_height)
|
| 207 |
+
ratio = feature_height * feature_width / num_windows
|
| 208 |
+
multiple = min(math.ceil(ratio), max_slice_nums)
|
| 209 |
+
|
| 210 |
+
candidate_split_grids_nums = []
|
| 211 |
+
for i in [multiple - 1, multiple, multiple + 1]:
|
| 212 |
+
if i == 1 or i > max_slice_nums:
|
| 213 |
+
continue
|
| 214 |
+
candidate_split_grids_nums.append(i)
|
| 215 |
+
|
| 216 |
+
candidate_grids = []
|
| 217 |
+
# find best grid
|
| 218 |
+
for split_grids_nums in candidate_split_grids_nums:
|
| 219 |
+
m = 1
|
| 220 |
+
while m <= split_grids_nums:
|
| 221 |
+
if split_grids_nums % m == 0:
|
| 222 |
+
candidate_grids.append([m, split_grids_nums // m])
|
| 223 |
+
m += 1
|
| 224 |
+
|
| 225 |
+
best_grid = [1, 1]
|
| 226 |
+
min_error = float("inf")
|
| 227 |
+
for grid in candidate_grids:
|
| 228 |
+
error = abs(log_ratio - math.log(grid[0] / grid[1]))
|
| 229 |
+
if error < min_error:
|
| 230 |
+
best_grid = grid
|
| 231 |
+
min_error = error
|
| 232 |
+
|
| 233 |
+
# (Iw * Ih) / n = Iw / Ih * h^2
|
| 234 |
+
float_crop_height = math.sqrt(ratio / (feature_width / feature_height))
|
| 235 |
+
float_crop_width = float_crop_height * (feature_width / feature_height)
|
| 236 |
+
|
| 237 |
+
# print(float_crop_height, float_crop_width, feature_height, feature_width, )
|
| 238 |
+
# print('true:', feature_height / float_crop_height, feature_width / float_crop_width)
|
| 239 |
+
|
| 240 |
+
region_boxes = generate_subimage_coordinates(feature_height, feature_width,
|
| 241 |
+
float_crop_height, float_crop_width, num_windows)
|
| 242 |
+
|
| 243 |
+
region_boxes = region_boxes.to(dtype=dtype, device=device).detach()
|
| 244 |
+
batch_region_boxes = []
|
| 245 |
+
for i in range(bs):
|
| 246 |
+
batch_id = torch.ones_like(region_boxes)[:, :1] * i
|
| 247 |
+
batch_region_boxes.append(torch.cat([batch_id, region_boxes], dim=1))
|
| 248 |
+
batch_region_boxes = torch.cat(batch_region_boxes)
|
| 249 |
+
|
| 250 |
+
return batch_region_boxes, best_grid, feature_width / feature_height
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def resize_image_keep_ratio(image, max_size=1024):
|
| 254 |
+
original_width, original_height = image.size
|
| 255 |
+
if original_width > original_height:
|
| 256 |
+
new_width = max_size
|
| 257 |
+
new_height = int((max_size / original_width) * original_height)
|
| 258 |
+
else:
|
| 259 |
+
new_height = max_size
|
| 260 |
+
new_width = int((max_size / original_height) * original_width)
|
| 261 |
+
resized_image = image.resize((new_width, new_height), Image.Resampling.BICUBIC)
|
| 262 |
+
return resized_image
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def aug_image(image):
|
| 266 |
+
if random.random() < 0.5:
|
| 267 |
+
image = resize_image_keep_ratio(image, max_size=1024)
|
| 268 |
+
if random.random() < 0.1:
|
| 269 |
+
aug = iaa.contrast.LinearContrast((0.5, 2.0), per_channel=False)
|
| 270 |
+
image = Image.fromarray(aug(image=np.array(image)))
|
| 271 |
+
if random.random() < 0.1:
|
| 272 |
+
aug = iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.75, 1.5))
|
| 273 |
+
image = Image.fromarray(aug(image=np.array(image)))
|
| 274 |
+
if random.random() < 0.2:
|
| 275 |
+
aug = iaa.AddToHue((-50, 50))
|
| 276 |
+
image = Image.fromarray(aug(image=np.array(image)))
|
| 277 |
+
if random.random() < 0.1:
|
| 278 |
+
aug = iaa.JpegCompression(compression=(75, 95))
|
| 279 |
+
image = Image.fromarray(aug(image=np.array(image)))
|
| 280 |
+
return image
|
| 281 |
+
|
VLMEvalKit-sudoku/scripts/srun.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
set -x
|
| 3 |
+
srun -n1 --ntasks-per-node=1 --partition $1 --gres=gpu:8 --quotatype=reserved --job-name vlmeval --cpus-per-task=64 torchrun --nproc-per-node=8 run.py ${@:2}
|
VLMEvalKit-sudoku/setup.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import sys
|
| 3 |
+
from os.path import exists
|
| 4 |
+
from setuptools import find_packages, setup
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def parse_requirements(fname='requirements.txt', with_version=True):
|
| 8 |
+
"""Parse the package dependencies listed in a requirements file but strips
|
| 9 |
+
specific versioning information.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
fname (str): path to requirements file
|
| 13 |
+
with_version (bool, default=False): if True include version specs
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
List[str]: list of requirements items
|
| 17 |
+
|
| 18 |
+
CommandLine:
|
| 19 |
+
python -c "import setup; print(setup.parse_requirements())"
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
require_fpath = fname
|
| 23 |
+
|
| 24 |
+
def parse_line(line):
|
| 25 |
+
"""Parse information from a line in a requirements text file."""
|
| 26 |
+
if line.startswith('-r '):
|
| 27 |
+
# Allow specifying requirements in other files
|
| 28 |
+
target = line.split(' ')[1]
|
| 29 |
+
for info in parse_require_file(target):
|
| 30 |
+
yield info
|
| 31 |
+
else:
|
| 32 |
+
info = {'line': line}
|
| 33 |
+
if line.startswith('-e '):
|
| 34 |
+
info['package'] = line.split('#egg=')[1]
|
| 35 |
+
elif '@git+' in line:
|
| 36 |
+
info['package'] = line
|
| 37 |
+
else:
|
| 38 |
+
# Remove versioning from the package
|
| 39 |
+
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
|
| 40 |
+
parts = re.split(pat, line, maxsplit=1)
|
| 41 |
+
parts = [p.strip() for p in parts]
|
| 42 |
+
|
| 43 |
+
info['package'] = parts[0]
|
| 44 |
+
if len(parts) > 1:
|
| 45 |
+
op, rest = parts[1:]
|
| 46 |
+
if ';' in rest:
|
| 47 |
+
# Handle platform specific dependencies
|
| 48 |
+
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
|
| 49 |
+
version, platform_deps = map(str.strip,
|
| 50 |
+
rest.split(';'))
|
| 51 |
+
info['platform_deps'] = platform_deps
|
| 52 |
+
else:
|
| 53 |
+
version = rest # NOQA
|
| 54 |
+
info['version'] = (op, version)
|
| 55 |
+
yield info
|
| 56 |
+
|
| 57 |
+
def parse_require_file(fpath):
|
| 58 |
+
with open(fpath, 'r') as f:
|
| 59 |
+
for line in f.readlines():
|
| 60 |
+
line = line.strip()
|
| 61 |
+
if line and not line.startswith('#'):
|
| 62 |
+
for info in parse_line(line):
|
| 63 |
+
yield info
|
| 64 |
+
|
| 65 |
+
def gen_packages_items():
|
| 66 |
+
if exists(require_fpath):
|
| 67 |
+
for info in parse_require_file(require_fpath):
|
| 68 |
+
parts = [info['package']]
|
| 69 |
+
if with_version and 'version' in info:
|
| 70 |
+
parts.extend(info['version'])
|
| 71 |
+
if not sys.version.startswith('3.4'):
|
| 72 |
+
# apparently package_deps are broken in 3.4
|
| 73 |
+
platform_deps = info.get('platform_deps')
|
| 74 |
+
if platform_deps is not None:
|
| 75 |
+
parts.append(';' + platform_deps)
|
| 76 |
+
item = ''.join(parts)
|
| 77 |
+
yield item
|
| 78 |
+
|
| 79 |
+
packages = list(gen_packages_items())
|
| 80 |
+
return packages
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
with open('README.md', encoding="utf-8") as f:
|
| 84 |
+
readme = f.read()
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def do_setup():
|
| 88 |
+
setup(
|
| 89 |
+
name='vlmeval',
|
| 90 |
+
version='0.1.0',
|
| 91 |
+
description='OpenCompass VLM Evaluation Kit',
|
| 92 |
+
author='Haodong Duan',
|
| 93 |
+
author_email='dhd.efz@gmail.com',
|
| 94 |
+
maintainer='Haodong Duan',
|
| 95 |
+
maintainer_email='dhd.efz@gmail.com',
|
| 96 |
+
long_description=readme,
|
| 97 |
+
long_description_content_type='text/markdown',
|
| 98 |
+
cmdclass={},
|
| 99 |
+
install_requires=parse_requirements('requirements.txt'),
|
| 100 |
+
setup_requires=[],
|
| 101 |
+
python_requires='>=3.7.0',
|
| 102 |
+
packages=find_packages(exclude=[
|
| 103 |
+
'test*',
|
| 104 |
+
'paper_test*',
|
| 105 |
+
]),
|
| 106 |
+
keywords=['AI', 'NLP', 'in-context learning'],
|
| 107 |
+
entry_points={
|
| 108 |
+
'console_scripts': ['vlmutil = vlmeval:cli']
|
| 109 |
+
},
|
| 110 |
+
classifiers=[
|
| 111 |
+
'Programming Language :: Python :: 3.7',
|
| 112 |
+
'Programming Language :: Python :: 3.8',
|
| 113 |
+
'Programming Language :: Python :: 3.9',
|
| 114 |
+
'Programming Language :: Python :: 3.10',
|
| 115 |
+
'Intended Audience :: Developers',
|
| 116 |
+
'Intended Audience :: Education',
|
| 117 |
+
'Intended Audience :: Science/Research',
|
| 118 |
+
])
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
if __name__ == '__main__':
|
| 122 |
+
do_setup()
|
VLMEvalKit-sudoku/vlmeval/api/__pycache__/bailingmm.cpython-310.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/api/__pycache__/jt_vl_chat.cpython-310.pyc
ADDED
|
Binary file (9.05 kB). View file
|
|
|
VLMEvalKit-sudoku/vlmeval/api/__pycache__/kimivl_api.cpython-310.pyc
ADDED
|
Binary file (5.55 kB). View file
|
|
|