ZzzHelloWorld commited on
Commit
eb282ac
·
verified ·
1 Parent(s): a5eb8a5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. VLMEvalKit-sudoku/docs/en/Quickstart.md +221 -0
  2. VLMEvalKit-sudoku/docs/en/_static/image/logo.svg +24 -0
  3. VLMEvalKit-sudoku/docs/en/_templates/404.html +18 -0
  4. VLMEvalKit-sudoku/docs/en/_templates/callable.rst +14 -0
  5. VLMEvalKit-sudoku/docs/zh-CN/Makefile +20 -0
  6. VLMEvalKit-sudoku/docs/zh-CN/_static/css/readthedocs.css +63 -0
  7. VLMEvalKit-sudoku/docs/zh-CN/_static/js/custom.js +10 -0
  8. VLMEvalKit-sudoku/llava/__pycache__/mm_utils.cpython-310.pyc +0 -0
  9. VLMEvalKit-sudoku/llava/constants.py +12 -0
  10. VLMEvalKit-sudoku/llava/conversation.py +596 -0
  11. VLMEvalKit-sudoku/llava/eval/eval_docvqa.py +63 -0
  12. VLMEvalKit-sudoku/llava/eval/eval_gpt_review_bench.py +121 -0
  13. VLMEvalKit-sudoku/llava/eval/eval_gpt_review_visual.py +118 -0
  14. VLMEvalKit-sudoku/llava/eval/eval_science_qa.py +114 -0
  15. VLMEvalKit-sudoku/llava/eval/model_vqa_loader.py +256 -0
  16. VLMEvalKit-sudoku/llava/eval/qa_baseline_gpt35.py +74 -0
  17. VLMEvalKit-sudoku/llava/model/__init__.py +17 -0
  18. VLMEvalKit-sudoku/llava/model/__pycache__/__init__.cpython-310.pyc +0 -0
  19. VLMEvalKit-sudoku/llava/model/__pycache__/builder.cpython-310.pyc +0 -0
  20. VLMEvalKit-sudoku/llava/model/builder.py +211 -0
  21. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/attn_res.cpython-310.pyc +0 -0
  22. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc +0 -0
  23. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_moonvit.cpython-310.pyc +0 -0
  24. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_siglip2.cpython-310.pyc +0 -0
  25. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2_zyc.cpython-310.pyc +0 -0
  26. VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-310.pyc +0 -0
  27. VLMEvalKit-sudoku/llava/model/multimodal_encoder/attn_res.py +103 -0
  28. VLMEvalKit-sudoku/llava/model/multimodal_encoder/clip_encoder.py +223 -0
  29. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/factory.py +528 -0
  30. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/loss.py +123 -0
  31. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-18B.json +27 -0
  32. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B-plus.json +27 -0
  33. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14.json +24 -0
  34. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14-336.json +29 -0
  35. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14.json +25 -0
  36. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json +25 -0
  37. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json +25 -0
  38. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/openai.py +144 -0
  39. VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/timm_model.py +114 -0
  40. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_encoder.py +74 -0
  41. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_vit.py +856 -0
  42. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-18B.json +27 -0
  43. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-B-16.json +29 -0
  44. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14.json +25 -0
  45. VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json +25 -0
  46. VLMEvalKit-sudoku/llava/model/multimodal_encoder/hf_vision.py +111 -0
  47. VLMEvalKit-sudoku/llava/model/multimodal_encoder/hubconf.py +213 -0
  48. VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_siglip2.py +1386 -0
  49. VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_siglip2_cjm.py +1187 -0
  50. VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_swin_siglip2.py +1319 -0
VLMEvalKit-sudoku/docs/en/Quickstart.md ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quickstart
2
+
3
+ Before running the evaluation script, you need to **configure** the VLMs and set the model_paths properly.
4
+
5
+ After that, you can use a single script `run.py` to inference and evaluate multiple VLMs and benchmarks at a same time.
6
+
7
+ ## Step 0. Installation & Setup essential keys
8
+
9
+ **Installation.**
10
+
11
+ ```bash
12
+ git clone https://github.com/open-compass/VLMEvalKit.git
13
+ cd VLMEvalKit
14
+ pip install -e .
15
+ ```
16
+
17
+ **Setup Keys.**
18
+
19
+ To infer with API models (GPT-4v, Gemini-Pro-V, etc.) or use LLM APIs as the **judge or choice extractor**, you need to first setup API keys. VLMEvalKit will use an judge **LLM** to extract answer from the output if you set the key, otherwise it uses the **exact matching** mode (find "Yes", "No", "A", "B", "C"... in the output strings). **The exact matching can only be applied to the Yes-or-No tasks and the Multi-choice tasks.**
20
+ - You can place the required keys in `$VLMEvalKit/.env` or directly set them as the environment variable. If you choose to create a `.env` file, its content will look like:
21
+
22
+ ```bash
23
+ # The .env file, place it under $VLMEvalKit
24
+ # API Keys of Proprietary VLMs
25
+ # QwenVL APIs
26
+ DASHSCOPE_API_KEY=
27
+ # Gemini w. Google Cloud Backends
28
+ GOOGLE_API_KEY=
29
+ # OpenAI API
30
+ OPENAI_API_KEY=
31
+ OPENAI_API_BASE=
32
+ # StepAI API
33
+ STEPAI_API_KEY=
34
+ # REKA API
35
+ REKA_API_KEY=
36
+ # GLMV API
37
+ GLMV_API_KEY=
38
+ # CongRong API
39
+ CW_API_BASE=
40
+ CW_API_KEY=
41
+ # SenseNova API
42
+ SENSENOVA_API_KEY=
43
+ # Hunyuan-Vision API
44
+ HUNYUAN_SECRET_KEY=
45
+ HUNYUAN_SECRET_ID=
46
+ # LMDeploy API
47
+ LMDEPLOY_API_BASE=
48
+ # You can also set a proxy for calling api models during the evaluation stage
49
+ EVAL_PROXY=
50
+ ```
51
+
52
+ - Fill the blanks with your API keys (if necessary). Those API keys will be automatically loaded when doing the inference and evaluation.
53
+ ## Step 1. Configuration
54
+
55
+ **VLM Configuration**: All VLMs are configured in `vlmeval/config.py`. Few legacy VLMs (like MiniGPT-4, LLaVA-v1-7B) requires additional configuration (configuring the code / model_weight root in the config file). During evaluation, you should use the model name specified in `supported_VLM` in `vlmeval/config.py` to select the VLM. Make sure you can successfully infer with the VLM before starting the evaluation with the following command `vlmutil check {MODEL_NAME}`.
56
+
57
+ ## Step 2. Evaluation
58
+
59
+ **New!!!** We integrated a new config system to enable more flexible evaluation settings. Check the [Document](/docs/en/ConfigSystem.md) or run `python run.py --help` for more details 🔥🔥🔥
60
+
61
+ We use `run.py` for evaluation. To use the script, you can use `$VLMEvalKit/run.py` or create a soft-link of the script (to use the script anywhere):
62
+
63
+ **Arguments**
64
+
65
+ - `--data (list[str])`: Set the dataset names that are supported in VLMEvalKit (names can be found in the codebase README).
66
+ - `--model (list[str])`: Set the VLM names that are supported in VLMEvalKit (defined in `supported_VLM` in `vlmeval/config.py`).
67
+ - `--mode (str, default to 'all', choices are ['all', 'infer'])`: When `mode` set to "all", will perform both inference and evaluation; when set to "infer", will only perform the inference.
68
+ - `--api-nproc (int, default to 4)`: The number of threads for OpenAI API calling.
69
+ - `--work-dir (str, default to '.')`: The directory to save evaluation results.
70
+
71
+ **Command for Evaluating Image Benchmarks **
72
+
73
+ You can run the script with `python` or `torchrun`:
74
+
75
+ ```bash
76
+ # When running with `python`, only one VLM instance is instantiated, and it might use multiple GPUs (depending on its default behavior).
77
+ # That is recommended for evaluating very large VLMs (like IDEFICS-80B-Instruct).
78
+
79
+ # IDEFICS-80B-Instruct on MMBench_DEV_EN, MME, and SEEDBench_IMG, Inference and Evalution
80
+ python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose
81
+ # IDEFICS-80B-Instruct on MMBench_DEV_EN, MME, and SEEDBench_IMG, Inference only
82
+ python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose --mode infer
83
+
84
+ # When running with `torchrun`, one VLM instance is instantiated on each GPU. It can speed up the inference.
85
+ # However, that is only suitable for VLMs that consume small amounts of GPU memory.
86
+
87
+ # IDEFICS-9B-Instruct, Qwen-VL-Chat, mPLUG-Owl2 on MMBench_DEV_EN, MME, and SEEDBench_IMG. On a node with 8 GPU. Inference and Evaluation.
88
+ torchrun --nproc-per-node=8 run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct qwen_chat mPLUG-Owl2 --verbose
89
+ # Qwen-VL-Chat on MME. On a node with 2 GPU. Inference and Evaluation.
90
+ torchrun --nproc-per-node=2 run.py --data MME --model qwen_chat --verbose
91
+ ```
92
+
93
+ **Command for Evaluating Video Benchmarks**
94
+
95
+ ```bash
96
+ # When running with `python`, only one VLM instance is instantiated, and it might use multiple GPUs (depending on its default behavior).
97
+ # That is recommended for evaluating very large VLMs (like IDEFICS-80B-Instruct).
98
+
99
+ # IDEFICS2-8B on MMBench-Video, with 8 frames as inputs and vanilla evaluation. On a node with 8 GPUs. MMBench_Video_8frame_nopack is a defined dataset setting in `vlmeval/dataset/video_dataset_config.py`.
100
+ torchrun --nproc-per-node=8 run.py --data MMBench_Video_8frame_nopack --model idefics2_8
101
+ # GPT-4o (API model) on MMBench-Video, with 1 frame per second as inputs and pack evaluation (all questions of a video in a single query).
102
+ python run.py --data MMBench_Video_1fps_pack --model GPT4o
103
+ ```
104
+
105
+ The evaluation results will be printed as logs, besides. **Result Files** will also be generated in the directory `$YOUR_WORKING_DIRECTORY/{model_name}`. Files ending with `.csv` contain the evaluated metrics.
106
+
107
+ ### Frequently Asked Questions
108
+
109
+ #### Constructing Input Prompt: The `build_prompt()` Function
110
+ If you find that the model's output does not match the expected results when evaluating a specific benchmark, it could be due to the model not constructing the input prompt correctly.
111
+
112
+ In VLMEvalKit, each `dataset` class includes a function named `build_prompt()`, which is responsible for formatting input questions. Different benchmarks can either customize their own `build_prompt()` function or use the default implementation.
113
+
114
+ For instance, when handling the default [Multiple-Choice QA](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/dataset/image_mcq.py#L164), the `ImageMCQDataset.build_prompt()` method combines elements such as `hint`, `question`, and `options` (if present in the dataset) into a complete question format, as shown below:
115
+
116
+ ```
117
+ HINT
118
+ QUESTION
119
+ Options:
120
+ A. Option A
121
+ B. Option B
122
+ ···
123
+ Please select the correct answer from the options above.
124
+ ```
125
+
126
+ Additionally, since different models may have varying evaluation requirements, VLMEvalKit also supports customizing the prompt construction method at the model level through `model.build_prompt()`. For an example, you can refer to [InternVL](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/vlm/internvl_chat.py#L324).
127
+
128
+ **Note: If both `model.build_prompt()` and `dataset.build_prompt()` are defined, `model.build_prompt()` will take precedence over `dataset.build_prompt()`, effectively overriding it.**
129
+
130
+ Some models, such as Qwen2VL and InternVL, define extensive prompt-building methods for various types of benchmarks. To provide more flexibility in adapting to different benchmarks, VLMEvalKit allows users to customize the `model.use_custom_prompt()` function within the model. By adding or modifying the `use_custom_prompt()` function, you can decide which benchmarks should utilize the model's custom prompt logic. Below is an example:
131
+
132
+ ```python
133
+ def use_custom_prompt(self, dataset: str) -> bool:
134
+ from vlmeval.dataset import DATASET_TYPE, DATASET_MODALITY
135
+ dataset_type = DATASET_TYPE(dataset, default=None)
136
+ if not self._use_custom_prompt:
137
+ return False
138
+ if listinstr(['MMVet'], dataset):
139
+ return True
140
+ if dataset_type == 'MCQ':
141
+ return True
142
+ if DATASET_MODALITY(dataset) == 'VIDEO':
143
+ return False
144
+ return False
145
+ ```
146
+ Only when the `use_custom_prompt()` function returns `True` will VLMEvalKit call the model's `build_prompt()` function for the current benchmark.
147
+ With this approach, you can flexibly control which benchmarks use the model's custom prompt logic based on your specific needs, thereby better adapting to different models and tasks.
148
+
149
+ #### Model Splitting
150
+
151
+ Currently, VLMEvalKit automatically supports GPU resource allocation and model splitting between processes on the same machine. This feature is supported when the inference backend is `lmdeploy` or `transformers`, with the following behaviors:
152
+
153
+ - When launching with `python` command, the model is by default allocated to all available GPUs. If you want to specify which GPUs to use, you can use `CUDA_VISIBLE_DEVICES` environment variable.
154
+ - When starting with `torchrun` command, each model instance will be allocated to `N_GPU // N_PROC` GPUs, where `N_PROC` is the number of processes specified by the `--nproc-per-node` parameter in the torchrun command. The value of `N_GPU` is determined as follows:
155
+ - If `CUDA_VISIBLE_DEVICES` environment variable is not set, `N_GPU` will be the total number of available GPUs.
156
+ - If `CUDA_VISIBLE_DEVICES` environment variable is set, `N_GPU` will be the number of GPUs specified by the `CUDA_VISIBLE_DEVICES` environment variable, and only the specified GPUs will be utilized.
157
+ Below are specific examples of running evaluation tasks on a machine equipped with 8 GPUs:
158
+
159
+ ```bash
160
+ <!-- Launch two model instances in data parallel, each instance using 4 GPUs -->
161
+ torchrun --nproc-per-node=2 run.py --data MMBench_DEV_EN --model InternVL3-78B
162
+ <!-- Launch one model instance, using all 8 GPUs -->
163
+ python run.py --data MMBench_DEV_EN --model InternVL3-78B
164
+ <!-- Launch three model instances, each instance using 2 GPUs, GPU 0 and 7 are not used -->
165
+ CUDA_VISIBLE_DEVICES=1,2,3,4,5,6 torchrun --nproc-per-node=3 run.py --data MMBench_DEV_EN --model InternVL3-38B
166
+ ```
167
+
168
+ PS: The feature is not compatible with `vllm` backend. When you evaluate a model with `vllm` backend, please use `python` to launch, and all visible GPU devices will be used.
169
+
170
+ #### Performance Discrepancies
171
+
172
+ Model performance may vary across different environments. As a result, you might observe discrepancies between your evaluation results and those listed on the official VLMEvalKit leaderboard. These differences could be attributed to variations in versions of libraries such as `transformers`, `cuda`, and `torch`.
173
+
174
+ Besides, if you encounter unexpected performance, we recommend first reviewing the local generation records (`{model}_{dataset}.xlsx`) or the evaluation records (`{model}_{dataset}_{judge_model}.xlsx`). This may help you better understand the evaluation outcomes and identify potential issues.
175
+
176
+ ## Deploy a local language model as the judge / choice extractor
177
+ The default setting mentioned above uses OpenAI's GPT as the judge LLM. However, you can also deploy a local judge LLM with [LMDeploy](https://github.com/InternLM/lmdeploy).
178
+
179
+ First install:
180
+ ```
181
+ pip install lmdeploy openai
182
+ ```
183
+
184
+ And then deploy a local judge LLM with the single line of code. LMDeploy will automatically download the model from Huggingface. Assuming we use internlm2-chat-1_8b as the judge, port 23333, and the key sk-123456 (the key must start with "sk-" and follow with any number you like):
185
+ ```
186
+ lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333
187
+ ```
188
+
189
+ You need to get the model name registered by LMDeploy with the following python code:
190
+ ```
191
+ from openai import OpenAI
192
+ client = OpenAI(
193
+ api_key='sk-123456',
194
+ base_url="http://0.0.0.0:23333/v1"
195
+ )
196
+ model_name = client.models.list().data[0].id
197
+ ```
198
+
199
+ Now set some environment variables to tell VLMEvalKit how to use the local judge LLM. As mentioned above, you can also set them in `$VLMEvalKit/.env` file:
200
+ ```
201
+ OPENAI_API_KEY=sk-123456
202
+ OPENAI_API_BASE=http://0.0.0.0:23333/v1/chat/completions
203
+ LOCAL_LLM=<model_name you get>
204
+ ```
205
+
206
+ Finally, you can run the commands in step 2 to evaluate your VLM with the local judge LLM.
207
+
208
+ Note that
209
+
210
+ - If you hope to deploy the judge LLM in a single GPU and evaluate your VLM on other GPUs because of limited GPU memory, try `CUDA_VISIBLE_DEVICES=x` like
211
+ ```
212
+ CUDA_VISIBLE_DEVICES=0 lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333
213
+ CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc-per-node=3 run.py --data HallusionBench --model qwen_chat --verbose
214
+ ```
215
+ - If the local judge LLM is not good enough in following the instructions, the evaluation may fail. Please report such failures (e.g., by issues).
216
+ - It's possible to deploy the judge LLM in different ways, e.g., use a private LLM (not from HuggingFace) or use a quantized LLM. Please refer to the [LMDeploy doc](https://lmdeploy.readthedocs.io/en/latest/serving/api_server.html). You can use any other deployment framework if they support OpenAI API.
217
+
218
+
219
+ ### Using LMDeploy to Accelerate Evaluation and Inference
220
+
221
+ You can refer this [doc](/docs/en/EvalByLMDeploy.md)
VLMEvalKit-sudoku/docs/en/_static/image/logo.svg ADDED
VLMEvalKit-sudoku/docs/en/_templates/404.html ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% extends "layout.html" %}
2
+
3
+ {% block body %}
4
+
5
+ <h1>Page Not Found</h1>
6
+ <p>
7
+ The page you are looking for cannot be found.
8
+ </p>
9
+ <p>
10
+ If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in
11
+ the content table left, or go to <a href="{{ pathto(root_doc) }}">the homepage</a>.
12
+ </p>
13
+ <!-- <p>
14
+ If you cannot find documentation you want, please <a
15
+ href="">open an issue</a> to tell us!
16
+ </p> -->
17
+
18
+ {% endblock %}
VLMEvalKit-sudoku/docs/en/_templates/callable.rst ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. role:: hidden
2
+ :class: hidden-section
3
+ .. currentmodule:: {{ module }}
4
+
5
+
6
+ {{ name | underline}}
7
+
8
+ .. autoclass:: {{ name }}
9
+ :members:
10
+ :special-members: __call__
11
+
12
+ ..
13
+ autogenerated from _templates/callable.rst
14
+ note it does not have :inherited-members:
VLMEvalKit-sudoku/docs/zh-CN/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
VLMEvalKit-sudoku/docs/zh-CN/_static/css/readthedocs.css ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .header-logo {
2
+ background-image: url("../image/logo.svg");
3
+ background-size: 275px 80px;
4
+ height: 80px;
5
+ width: 275px;
6
+ }
7
+
8
+
9
+ @media screen and (min-width: 1100px) {
10
+ .header-logo {
11
+ top: -25px;
12
+ }
13
+ }
14
+
15
+ pre {
16
+ white-space: pre;
17
+ }
18
+
19
+ @media screen and (min-width: 2000px) {
20
+ .pytorch-content-left {
21
+ width: 1200px;
22
+ margin-left: 30px;
23
+ }
24
+ article.pytorch-article {
25
+ max-width: 1200px;
26
+ }
27
+ .pytorch-breadcrumbs-wrapper {
28
+ width: 1200px;
29
+ }
30
+ .pytorch-right-menu.scrolling-fixed {
31
+ position: fixed;
32
+ top: 45px;
33
+ left: 1580px;
34
+ }
35
+ }
36
+
37
+
38
+ article.pytorch-article section code {
39
+ padding: .2em .4em;
40
+ background-color: #f3f4f7;
41
+ border-radius: 5px;
42
+ }
43
+
44
+ /* Disable the change in tables */
45
+ article.pytorch-article section table code {
46
+ padding: unset;
47
+ background-color: unset;
48
+ border-radius: unset;
49
+ }
50
+
51
+ table.autosummary td {
52
+ width: 50%
53
+ }
54
+
55
+ img.align-center {
56
+ display: block;
57
+ margin-left: auto;
58
+ margin-right: auto;
59
+ }
60
+
61
+ article.pytorch-article p.rubric {
62
+ font-weight: bold;
63
+ }
VLMEvalKit-sudoku/docs/zh-CN/_static/js/custom.js ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ var collapsedSections = [];
2
+
3
+ $(document).ready(function () {
4
+ $('.model-summary').DataTable({
5
+ "stateSave": false,
6
+ "lengthChange": false,
7
+ "pageLength": 20,
8
+ "order": []
9
+ });
10
+ });
VLMEvalKit-sudoku/llava/__pycache__/mm_utils.cpython-310.pyc ADDED
Binary file (13.9 kB). View file
 
VLMEvalKit-sudoku/llava/constants.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
+ WORKER_HEART_BEAT_INTERVAL = 15
3
+
4
+ LOGDIR = "."
5
+
6
+ # Model Constants
7
+ IGNORE_INDEX = -100
8
+ IMAGE_TOKEN_INDEX = -200
9
+ DEFAULT_IMAGE_TOKEN = "<image>"
10
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
11
+ DEFAULT_IM_START_TOKEN = "<im_start>"
12
+ DEFAULT_IM_END_TOKEN = "<im_end>"
VLMEvalKit-sudoku/llava/conversation.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Any, Dict, Union, Tuple
4
+ import re
5
+ import base64
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ from transformers import AutoTokenizer
9
+
10
+
11
+ class SeparatorStyle(Enum):
12
+ """Different separator style."""
13
+
14
+ SINGLE = auto()
15
+ TWO = auto()
16
+ MPT = auto()
17
+ PLAIN = auto()
18
+ CHATML = auto()
19
+ LLAMA_2 = auto()
20
+ LLAMA_3 = auto()
21
+ QWEN = auto()
22
+ GEMMA = auto()
23
+
24
+
25
+ @dataclasses.dataclass
26
+ class Conversation:
27
+ """A class that keeps all conversation history."""
28
+
29
+ system: str
30
+ roles: List[str]
31
+ messages: List[List[str]]
32
+ offset: int
33
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
34
+ sep: str = "###"
35
+ sep2: str = None
36
+ version: str = "Unknown"
37
+
38
+ tokenizer_id: str = ""
39
+ tokenizer: Any = None
40
+ # Stop criteria (the default one is EOS token)
41
+ stop_str: Union[str, List[str]] = None
42
+ # Stops generation if meeting any token in this list
43
+ stop_token_ids: List[int] = None
44
+
45
+ skip_next: bool = False
46
+
47
+ def get_prompt(self):
48
+ messages = self.messages
49
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
50
+ messages = self.messages.copy()
51
+ init_role, init_msg = messages[0].copy()
52
+ init_msg = init_msg[0]
53
+ if "mmtag" in self.version:
54
+ init_msg = init_msg.replace("<image>", "").strip()
55
+ messages[0] = (init_role, init_msg)
56
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
57
+ messages.insert(1, (self.roles[1], "Received."))
58
+ elif not init_msg.startswith("<image>"):
59
+ init_msg = init_msg.replace("<image>", "").strip()
60
+ messages[0] = (init_role, "<image>\n" + init_msg)
61
+ else:
62
+ messages[0] = (init_role, init_msg)
63
+
64
+ if self.sep_style == SeparatorStyle.SINGLE:
65
+ ret = self.system + self.sep
66
+ for role, message in messages:
67
+ if message:
68
+ if type(message) is tuple:
69
+ message, _, _ = message
70
+ ret += role + ": " + message + self.sep
71
+ else:
72
+ ret += role + ":"
73
+
74
+ elif self.sep_style == SeparatorStyle.TWO:
75
+ seps = [self.sep, self.sep2]
76
+ ret = self.system + seps[0]
77
+ for i, (role, message) in enumerate(messages):
78
+ if message:
79
+ if type(message) is tuple:
80
+ message, _, _ = message
81
+ ret += role + ": " + message + seps[i % 2]
82
+ else:
83
+ ret += role + ":"
84
+
85
+ elif self.sep_style == SeparatorStyle.CHATML:
86
+ ret = "" if self.system == "" else self.system + self.sep + "\n"
87
+ for role, message in messages:
88
+ if message:
89
+ if type(message) is tuple:
90
+ message, images, _ = message
91
+ message = "<image>" * len(images) + message
92
+ ret += role + "\n" + message + self.sep + "\n"
93
+ else:
94
+ ret += role + "\n"
95
+ return ret
96
+
97
+ elif self.sep_style == SeparatorStyle.LLAMA_3:
98
+ if self.tokenizer is None:
99
+ raise ValueError("Llama 3 tokenizer is not available. Make sure you have the necessary permissions.")
100
+ chat_template_messages = [{"role": "system", "content": self.system}]
101
+ for role, message in messages:
102
+ if message:
103
+ if type(message) is tuple:
104
+ message, images = message
105
+ message = "<image>" * len(images) + message
106
+ chat_template_messages.append({"role": role, "content": message})
107
+
108
+ # print(chat_template_messages)
109
+ return self.tokenizer.apply_chat_template(chat_template_messages, tokenize=False, add_generation_prompt=True)
110
+ # ret = "" if self.system == "" else self.system + self.sep + "\n"
111
+ # for role, message in messages:
112
+ # if message:
113
+ # if type(message) is tuple:
114
+ # message, images = message
115
+ # message = "<image>" * len(images) + message
116
+ # ret += role + "\n" + message + self.sep + "\n"
117
+ # else:
118
+ # ret += role + "\n"
119
+ # return ret
120
+
121
+ elif self.sep_style == SeparatorStyle.MPT:
122
+ ret = self.system + self.sep
123
+ for role, message in messages:
124
+ if message:
125
+ if type(message) is tuple:
126
+ message, _, _ = message
127
+ ret += role + message + self.sep
128
+ else:
129
+ ret += role
130
+
131
+ elif self.sep_style == SeparatorStyle.GEMMA:
132
+ ret = ""
133
+ for i, (role, message) in enumerate(messages):
134
+ assert role == self.roles[i % 2], "Conversation should alternate user/assistant/user/assistant/..."
135
+ if message:
136
+ if type(message) is tuple:
137
+ message, _, _ = message
138
+ ret += role + message + self.sep
139
+ else:
140
+ ret += role
141
+
142
+ elif self.sep_style == SeparatorStyle.LLAMA_2:
143
+ wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
144
+ wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
145
+ ret = ""
146
+
147
+ for i, (role, message) in enumerate(messages):
148
+ if i == 0:
149
+ assert message, "first message should not be none"
150
+ assert role == self.roles[0], "first message should come from user"
151
+ if message:
152
+ if type(message) is tuple:
153
+ message, _, _ = message
154
+ if i == 0:
155
+ message = wrap_sys(self.system) + message
156
+ if i % 2 == 0:
157
+ message = wrap_inst(message)
158
+ ret += self.sep + message
159
+ else:
160
+ ret += " " + message + " " + self.sep2
161
+ else:
162
+ ret += ""
163
+ ret = ret.lstrip(self.sep)
164
+
165
+ elif self.sep_style == SeparatorStyle.PLAIN:
166
+ seps = [self.sep, self.sep2]
167
+ ret = self.system
168
+ for i, (role, message) in enumerate(messages):
169
+ if message:
170
+ if type(message) is tuple:
171
+ message, _, _ = message
172
+ ret += message + seps[i % 2]
173
+ else:
174
+ ret += ""
175
+ else:
176
+ raise ValueError(f"Invalid style: {self.sep_style}")
177
+
178
+ return ret
179
+
180
+ def append_message(self, role, message):
181
+ self.messages.append([role, message])
182
+
183
+ def process_image(self, image, image_process_mode, return_pil=False, image_format="PNG"):
184
+ if image_process_mode == "Pad":
185
+
186
+ def expand2square(pil_img, background_color=(122, 116, 104)):
187
+ width, height = pil_img.size
188
+ if width == height:
189
+ return pil_img
190
+ elif width > height:
191
+ result = Image.new(pil_img.mode, (width, width), background_color)
192
+ result.paste(pil_img, (0, (width - height) // 2))
193
+ return result
194
+ else:
195
+ result = Image.new(pil_img.mode, (height, height), background_color)
196
+ result.paste(pil_img, ((height - width) // 2, 0))
197
+ return result
198
+
199
+ image = expand2square(image)
200
+ elif image_process_mode in ["Default", "Crop"]:
201
+ pass
202
+ elif image_process_mode == "Resize":
203
+ image = image.resize((336, 336))
204
+ else:
205
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
206
+
207
+ if type(image) is not Image.Image:
208
+ image = Image.open(image).convert("RGB")
209
+
210
+ max_hw, min_hw = max(image.size), min(image.size)
211
+ aspect_ratio = max_hw / min_hw
212
+ max_len, min_len = 672, 448
213
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
214
+ longest_edge = int(shortest_edge * aspect_ratio)
215
+ W, H = image.size
216
+ if H > W:
217
+ H, W = longest_edge, shortest_edge
218
+ else:
219
+ H, W = shortest_edge, longest_edge
220
+ image = image.resize((W, H))
221
+ if return_pil:
222
+ return image
223
+ else:
224
+ buffered = BytesIO()
225
+ image.save(buffered, format=image_format)
226
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
227
+ return img_b64_str
228
+
229
+ def get_images(self, return_pil=False, return_path=False):
230
+ images = []
231
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
232
+ if i % 2 == 0:
233
+ if type(msg) is tuple:
234
+ msg, image, image_process_mode = msg
235
+ if type(image) != list:
236
+ image = [image]
237
+ for img in image:
238
+ if not return_path and self.is_image_file(img):
239
+ img = self.process_image(img, image_process_mode, return_pil=return_pil)
240
+ else:
241
+ images.append(img)
242
+ return images
243
+
244
+ def is_image_file(self, filename):
245
+ image_extensions = [".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp"]
246
+ return any(filename.lower().endswith(ext) for ext in image_extensions)
247
+
248
+ def is_video_file(self, filename):
249
+ video_extensions = [".mp4", ".mov", ".avi", ".mkv", ".wmv", ".flv", ".mpeg", ".mpg"]
250
+ return any(filename.lower().endswith(ext) for ext in video_extensions)
251
+
252
+ def to_gradio_chatbot(self):
253
+ ret = []
254
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
255
+ if i % 2 == 0:
256
+ if type(msg) is tuple:
257
+ msg, image, image_process_mode = msg
258
+ if type(image) != list:
259
+ image = [image]
260
+ if len(image) == 1:
261
+ msg = "<image>\n" + msg.replace("<image>", "").strip()
262
+ else:
263
+ msg = re.sub(r"(<image>)\n(?=<image>)", r"\1 ", msg)
264
+
265
+ img_str_list = []
266
+ for img in image:
267
+ if self.is_image_file(img):
268
+ img_b64_str = self.process_image(img, "Default", return_pil=False, image_format="JPEG")
269
+ img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" style="max-width: 256px; max-height: 256px; width: auto; height: auto; object-fit: contain;"/>'
270
+ img_str_list.append(img_str)
271
+ elif self.is_video_file(img):
272
+ ret.append(((img,), None))
273
+
274
+ msg = msg.strip()
275
+ img_place_holder = ""
276
+ for img_str in img_str_list:
277
+ img_place_holder += f"{img_str}\n\n"
278
+
279
+ if len(img_str_list) > 0:
280
+ msg = f"{img_place_holder}\n\n{msg}"
281
+
282
+ if len(msg) > 0:
283
+ ret.append([msg, None])
284
+ else:
285
+ ret.append([msg, None])
286
+ else:
287
+ ret[-1][-1] = msg
288
+ return ret
289
+
290
+ def copy(self):
291
+ return Conversation(system=self.system, roles=self.roles, messages=[[x, y] for x, y in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2, version=self.version)
292
+
293
+ def dict(self):
294
+ if len(self.get_images()) > 0:
295
+ return {
296
+ "system": self.system,
297
+ "roles": self.roles,
298
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
299
+ "offset": self.offset,
300
+ "sep": self.sep,
301
+ "sep2": self.sep2,
302
+ }
303
+ return {
304
+ "system": self.system,
305
+ "roles": self.roles,
306
+ "messages": self.messages,
307
+ "offset": self.offset,
308
+ "sep": self.sep,
309
+ "sep2": self.sep2,
310
+ }
311
+
312
+
313
+ conv_vicuna_v0 = Conversation(
314
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
315
+ roles=("Human", "Assistant"),
316
+ messages=[
317
+ ["Human", "What are the key differences between renewable and non-renewable energy sources?"],
318
+ [
319
+ "Assistant",
320
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
321
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
322
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
323
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
324
+ "renewable and non-renewable energy sources:\n"
325
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
326
+ "energy sources are finite and will eventually run out.\n"
327
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
328
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
329
+ "and other negative effects.\n"
330
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
331
+ "have lower operational costs than non-renewable sources.\n"
332
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
333
+ "locations than non-renewable sources.\n"
334
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
335
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
336
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
337
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.\n",
338
+ ],
339
+ ],
340
+ offset=2,
341
+ sep_style=SeparatorStyle.SINGLE,
342
+ sep="###",
343
+ )
344
+
345
+ conv_vicuna_v1 = Conversation(
346
+ system="A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions.",
347
+ roles=("USER", "ASSISTANT"),
348
+ version="v1",
349
+ messages=[],
350
+ offset=0,
351
+ sep_style=SeparatorStyle.TWO,
352
+ sep=" ",
353
+ sep2="</s>",
354
+ )
355
+
356
+ conv_llama_2 = Conversation(
357
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
358
+
359
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
360
+ roles=("USER", "ASSISTANT"),
361
+ version="llama_v2",
362
+ messages=[],
363
+ offset=0,
364
+ sep_style=SeparatorStyle.LLAMA_2,
365
+ sep="<s>",
366
+ sep2="</s>",
367
+ )
368
+
369
+ conv_llava_llama_2 = Conversation(
370
+ system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
371
+ roles=("USER", "ASSISTANT"),
372
+ version="llama_v2",
373
+ messages=[],
374
+ offset=0,
375
+ sep_style=SeparatorStyle.LLAMA_2,
376
+ sep="<s>",
377
+ sep2="</s>",
378
+ )
379
+
380
+ def safe_load_tokenizer(tokenizer_id):
381
+ try:
382
+ return AutoTokenizer.from_pretrained(tokenizer_id)
383
+ except Exception:
384
+ return None
385
+
386
+ conv_llava_llama_3 = Conversation(
387
+ system="You are a helpful language and vision assistant. " "You are able to understand the visual content that the user provides, " "and assist the user with a variety of tasks using natural language.",
388
+ roles=("user", "assistant"),
389
+ version="llama_v3",
390
+ messages=[],
391
+ offset=0,
392
+ sep="<|eot_id|>",
393
+ sep_style=SeparatorStyle.LLAMA_3,
394
+ tokenizer_id="meta-llama/Meta-Llama-3-8B-Instruct",
395
+ tokenizer=safe_load_tokenizer("meta-llama/Meta-Llama-3-8B-Instruct"),
396
+ stop_token_ids=[128009],
397
+ )
398
+
399
+ conv_mistral_instruct = Conversation(
400
+ system="",
401
+ roles=("USER", "ASSISTANT"),
402
+ version="llama_v2",
403
+ messages=[],
404
+ offset=0,
405
+ sep_style=SeparatorStyle.LLAMA_2,
406
+ sep="",
407
+ sep2="</s>",
408
+ )
409
+
410
+ conv_llava_llama_2_simple = Conversation(
411
+ system="Answer the questions about the visual content that the user provides.",
412
+ roles=("USER", "ASSISTANT"),
413
+ version="llama_v2",
414
+ messages=[],
415
+ offset=0,
416
+ sep_style=SeparatorStyle.LLAMA_2,
417
+ sep="<s>",
418
+ sep2="</s>",
419
+ )
420
+
421
+ conv_llava_llama_2_mmtag = Conversation(
422
+ system="Answer the questions about the visual content that the user provides." "The visual content will be provided with the following format: <Image>visual content</Image>.",
423
+ roles=("USER", "ASSISTANT"),
424
+ version="llama_v2_mmtag",
425
+ messages=[],
426
+ offset=0,
427
+ sep_style=SeparatorStyle.LLAMA_2,
428
+ sep="<s>",
429
+ sep2="</s>",
430
+ )
431
+
432
+ conv_mpt = Conversation(
433
+ system="""<|im_start|>system
434
+ A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
435
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
436
+ version="mpt",
437
+ messages=[],
438
+ offset=0,
439
+ sep_style=SeparatorStyle.MPT,
440
+ sep="<|im_end|>",
441
+ )
442
+
443
+ conv_qwen = Conversation(
444
+ system="""<|im_start|>system
445
+ You are a helpful assistant.""",
446
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
447
+ version="qwen",
448
+ messages=[],
449
+ offset=0,
450
+ sep_style=SeparatorStyle.CHATML,
451
+ sep="<|im_end|>",
452
+ )
453
+
454
+ conv_qwen3 = Conversation(
455
+ system="",
456
+ roles=("<|im_start|>user", "<|im_start|>assistant"),
457
+ version="qwen3",
458
+ messages=[],
459
+ offset=0,
460
+ sep_style=SeparatorStyle.CHATML,
461
+ sep="<|im_end|>",
462
+ )
463
+
464
+ conv_gemma_instruct = Conversation(system="", roles=("<start_of_turn>user\n", "<start_of_turn>model\n"), version="gemma", messages=[], offset=0, sep_style=SeparatorStyle.GEMMA, sep="<end_of_turn>\n")
465
+
466
+ conv_llava_plain = Conversation(
467
+ system="",
468
+ roles=("", ""),
469
+ messages=[],
470
+ offset=0,
471
+ sep_style=SeparatorStyle.PLAIN,
472
+ sep="\n",
473
+ )
474
+
475
+ conv_llava_v0 = Conversation(
476
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
477
+ roles=("Human", "Assistant"),
478
+ messages=[],
479
+ offset=0,
480
+ sep_style=SeparatorStyle.SINGLE,
481
+ sep="###",
482
+ )
483
+
484
+ conv_llava_v0_mmtag = Conversation(
485
+ system="A chat between a curious user and an artificial intelligence assistant. "
486
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
487
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
488
+ roles=("Human", "Assistant"),
489
+ messages=[],
490
+ offset=0,
491
+ sep_style=SeparatorStyle.SINGLE,
492
+ sep="###",
493
+ version="v0_mmtag",
494
+ )
495
+
496
+ conv_llava_v1 = Conversation(
497
+ system="A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions.",
498
+ roles=("USER", "ASSISTANT"),
499
+ version="v1",
500
+ messages=[],
501
+ offset=0,
502
+ sep_style=SeparatorStyle.TWO,
503
+ sep=" ",
504
+ sep2="</s>",
505
+ )
506
+
507
+ conv_llava_v1_mmtag = Conversation(
508
+ system="A chat between a curious user and an artificial intelligence assistant. "
509
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
510
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
511
+ roles=("USER", "ASSISTANT"),
512
+ messages=[],
513
+ offset=0,
514
+ sep_style=SeparatorStyle.TWO,
515
+ sep=" ",
516
+ sep2="</s>",
517
+ version="v1_mmtag",
518
+ )
519
+
520
+ conv_mistral_orca = Conversation(
521
+ system="""<|im_start|>system
522
+ You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!""",
523
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
524
+ version="mpt",
525
+ messages=[],
526
+ offset=0,
527
+ sep_style=SeparatorStyle.MPT,
528
+ sep="<|im_end|>",
529
+ )
530
+
531
+ conv_mistral_zephyr = Conversation(
532
+ system="""<|system|>
533
+ You are a helpful AI assistant.""",
534
+ roles=("<|user|>\n", "<|assistant|>\n"),
535
+ version="mpt",
536
+ messages=[],
537
+ offset=0,
538
+ sep_style=SeparatorStyle.MPT,
539
+ sep="</s>",
540
+ )
541
+
542
+ conv_mistral_direct = Conversation(
543
+ system="""<|im_start|>system
544
+ Answer the questions.""",
545
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
546
+ version="mpt",
547
+ messages=[],
548
+ offset=0,
549
+ sep_style=SeparatorStyle.MPT,
550
+ sep="<|im_end|>",
551
+ )
552
+
553
+ conv_chatml_direct = Conversation(
554
+ system="""<|im_start|>system
555
+ Answer the questions.""",
556
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
557
+ version="mpt",
558
+ messages=[],
559
+ offset=0,
560
+ sep_style=SeparatorStyle.MPT,
561
+ sep="<|im_end|>",
562
+ )
563
+
564
+ default_conversation = conv_vicuna_v0
565
+ conv_templates = {
566
+ "default": conv_vicuna_v0,
567
+ "v0": conv_vicuna_v0,
568
+ "v1": conv_vicuna_v1,
569
+ "vicuna_v1": conv_vicuna_v1,
570
+ "llama_2": conv_llama_2,
571
+ "mistral_instruct": conv_mistral_instruct,
572
+ "mistral_orca": conv_mistral_orca,
573
+ "mistral_zephyr": conv_mistral_zephyr,
574
+ "mistral_direct": conv_mistral_direct,
575
+ "plain": conv_llava_plain,
576
+ "v0_plain": conv_llava_plain,
577
+ "chatml_direct": conv_chatml_direct,
578
+ "llava_v0": conv_llava_v0,
579
+ "llava_v0_mmtag": conv_llava_v0_mmtag,
580
+ "llava_v1": conv_llava_v1,
581
+ "llava_v1_mmtag": conv_llava_v1_mmtag,
582
+ "llava_llama_2": conv_llava_llama_2,
583
+ "llava_llama_3": conv_llava_llama_3,
584
+ "llava_llama_2_simple": conv_llava_llama_2_simple,
585
+ "llava_llama_2_mmtag": conv_llava_llama_2_mmtag,
586
+ "llava_mistral_instruct": conv_mistral_instruct,
587
+ "mpt": conv_mpt,
588
+ "qwen_1_5": conv_qwen,
589
+ "qwen_2": conv_qwen,
590
+ "qwen3": conv_qwen3,
591
+ "gemma_instruct": conv_gemma_instruct,
592
+ }
593
+
594
+
595
+ if __name__ == "__main__":
596
+ print(default_conversation.get_prompt())
VLMEvalKit-sudoku/llava/eval/eval_docvqa.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ import re
5
+
6
+ from llava.eval.m4c_evaluator import STVQAANLSEvaluator
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument('--annotation-file', type=str)
11
+ parser.add_argument('--result-file', type=str)
12
+ parser.add_argument('--result-dir', type=str)
13
+ parser.add_argument('--mid_result', type=str)
14
+ parser.add_argument('--output_result', type=str)
15
+ return parser.parse_args()
16
+
17
+
18
+ def eval_single(annotation_file, result_file):
19
+ experiment_name = os.path.splitext(os.path.basename(result_file))[0]
20
+ print(experiment_name)
21
+ # annotations = json.load(open(annotation_file))['data']
22
+ annotations = [
23
+ json.loads(q) for q in open(os.path.expanduser(annotation_file), "r")
24
+ ]
25
+ annotations = {(annotation['question_id'], annotation['question'].lower()): annotation for annotation in annotations}
26
+ results = [json.loads(line) for line in open(result_file)]
27
+
28
+ pred_list = []
29
+ mid_list = []
30
+ for result in results:
31
+ annotation = annotations[(result['question_id'], result['prompt'].lower())]
32
+ pred_list.append({
33
+ "pred_answer": result['text'],
34
+ "gt_answers": [annotation['answer']],
35
+ })
36
+ mid_list.append(result)
37
+ mid_list[-1]["gt_answers"] = annotation['answer']
38
+
39
+ evaluator = STVQAANLSEvaluator()
40
+ acc = evaluator.eval_pred_list(pred_list)
41
+ acc = 100. * acc
42
+ print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), acc))
43
+ return len(pred_list), acc, mid_list
44
+
45
+
46
+ if __name__ == "__main__":
47
+ args = get_args()
48
+
49
+ if args.result_file is not None:
50
+ samples, acc, mid_result = eval_single(args.annotation_file, args.result_file)
51
+
52
+ if args.result_dir is not None:
53
+ for result_file in sorted(os.listdir(args.result_dir)):
54
+ if not result_file.endswith('.jsonl'):
55
+ print(f'Skipping {result_file}')
56
+ continue
57
+ samples, acc, mid_result = eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
58
+
59
+ # with open(args.mid_result, 'w') as f:
60
+ # json.dump(mid_result, f, indent=2)
61
+
62
+ # with open(args.output_result, 'w') as f:
63
+ # json.dump({'samples': samples, 'acc': acc}, f, indent=2)
VLMEvalKit-sudoku/llava/eval/eval_gpt_review_bench.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import openai
6
+ import time
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.5
9
+
10
+
11
+ def get_eval(content: str, max_tokens: int):
12
+ while True:
13
+ try:
14
+ response = openai.ChatCompletion.create(
15
+ model='gpt-4-0314',
16
+ messages=[{
17
+ 'role': 'system',
18
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
19
+ }, {
20
+ 'role': 'user',
21
+ 'content': content,
22
+ }],
23
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
24
+ max_tokens=max_tokens,
25
+ )
26
+ break
27
+ except openai.error.RateLimitError:
28
+ pass
29
+ except Exception as e:
30
+ print(e)
31
+ time.sleep(NUM_SECONDS_TO_SLEEP)
32
+
33
+ return response['choices'][0]['message']['content']
34
+
35
+
36
+ def parse_score(review):
37
+ try:
38
+ score_pair = review.split('\n')[0]
39
+ score_pair = score_pair.replace(',', ' ')
40
+ sp = score_pair.split(' ')
41
+ if len(sp) == 2:
42
+ return [float(sp[0]), float(sp[1])]
43
+ else:
44
+ print('error', review)
45
+ return [-1, -1]
46
+ except Exception as e:
47
+ print(e)
48
+ print('error', review)
49
+ return [-1, -1]
50
+
51
+
52
+ if __name__ == '__main__':
53
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
54
+ parser.add_argument('-q', '--question')
55
+ parser.add_argument('-c', '--context')
56
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
57
+ parser.add_argument('-r', '--rule')
58
+ parser.add_argument('-o', '--output')
59
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
60
+ args = parser.parse_args()
61
+
62
+ f_q = open(os.path.expanduser(args.question))
63
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
64
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
65
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
66
+
67
+ if os.path.isfile(os.path.expanduser(args.output)):
68
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
69
+ else:
70
+ cur_reviews = []
71
+
72
+ review_file = open(f'{args.output}', 'a')
73
+
74
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
75
+ image_to_context = {context['image']: context for context in context_list}
76
+
77
+ handles = []
78
+ idx = 0
79
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
80
+ ques = json.loads(ques_js)
81
+ ans1 = json.loads(ans1_js)
82
+ ans2 = json.loads(ans2_js)
83
+
84
+ inst = image_to_context[ques['image']]
85
+
86
+ if isinstance(inst['caption'], list):
87
+ cap_str = '\n'.join(inst['caption'])
88
+ else:
89
+ cap_str = inst['caption']
90
+
91
+ category = 'llava_bench_' + json.loads(ques_js)['category']
92
+ if category in rule_dict:
93
+ rule = rule_dict[category]
94
+ else:
95
+ assert False, f"Visual QA category not found in rule file: {category}."
96
+ prompt = rule['prompt']
97
+ role = rule['role']
98
+ content = (f'[Context]\n{cap_str}\n\n'
99
+ f'[Question]\n{ques["text"]}\n\n'
100
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
101
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
102
+ f'[System]\n{prompt}\n\n')
103
+ cur_js = {
104
+ 'id': idx+1,
105
+ 'question_id': ques['question_id'],
106
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
107
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
108
+ 'category': category
109
+ }
110
+ if idx >= len(cur_reviews):
111
+ review = get_eval(content, args.max_tokens)
112
+ scores = parse_score(review)
113
+ cur_js['content'] = review
114
+ cur_js['tuple'] = scores
115
+ review_file.write(json.dumps(cur_js) + '\n')
116
+ review_file.flush()
117
+ else:
118
+ print(f'Skipping {idx} as we already have it.')
119
+ idx += 1
120
+ print(idx)
121
+ review_file.close()
VLMEvalKit-sudoku/llava/eval/eval_gpt_review_visual.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+
5
+ import openai
6
+ import time
7
+
8
+ NUM_SECONDS_TO_SLEEP = 0.5
9
+
10
+
11
+ def get_eval(content: str, max_tokens: int):
12
+ while True:
13
+ try:
14
+ response = openai.ChatCompletion.create(
15
+ model='gpt-4-0314',
16
+ messages=[{
17
+ 'role': 'system',
18
+ 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
19
+ }, {
20
+ 'role': 'user',
21
+ 'content': content,
22
+ }],
23
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
24
+ max_tokens=max_tokens,
25
+ )
26
+ break
27
+ except openai.error.RateLimitError:
28
+ pass
29
+ except Exception as e:
30
+ print(e)
31
+ time.sleep(NUM_SECONDS_TO_SLEEP)
32
+
33
+ return response['choices'][0]['message']['content']
34
+
35
+
36
+ def parse_score(review):
37
+ try:
38
+ score_pair = review.split('\n')[0]
39
+ score_pair = score_pair.replace(',', ' ')
40
+ sp = score_pair.split(' ')
41
+ if len(sp) == 2:
42
+ return [float(sp[0]), float(sp[1])]
43
+ else:
44
+ print('error', review)
45
+ return [-1, -1]
46
+ except Exception as e:
47
+ print(e)
48
+ print('error', review)
49
+ return [-1, -1]
50
+
51
+
52
+ if __name__ == '__main__':
53
+ parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
54
+ parser.add_argument('-q', '--question')
55
+ parser.add_argument('-c', '--context')
56
+ parser.add_argument('-a', '--answer-list', nargs='+', default=[])
57
+ parser.add_argument('-r', '--rule')
58
+ parser.add_argument('-o', '--output')
59
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
60
+ args = parser.parse_args()
61
+
62
+ f_q = open(os.path.expanduser(args.question))
63
+ f_ans1 = open(os.path.expanduser(args.answer_list[0]))
64
+ f_ans2 = open(os.path.expanduser(args.answer_list[1]))
65
+ rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
66
+
67
+ if os.path.isfile(os.path.expanduser(args.output)):
68
+ cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
69
+ else:
70
+ cur_reviews = []
71
+
72
+ review_file = open(f'{args.output}', 'a')
73
+
74
+ context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
75
+ image_to_context = {context['image']: context for context in context_list}
76
+
77
+ handles = []
78
+ idx = 0
79
+ for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
80
+ ques = json.loads(ques_js)
81
+ ans1 = json.loads(ans1_js)
82
+ ans2 = json.loads(ans2_js)
83
+
84
+ inst = image_to_context[ques['image']]
85
+ cap_str = '\n'.join(inst['captions'])
86
+ box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
87
+
88
+ category = json.loads(ques_js)['category']
89
+ if category in rule_dict:
90
+ rule = rule_dict[category]
91
+ else:
92
+ assert False, f"Visual QA category not found in rule file: {category}."
93
+ prompt = rule['prompt']
94
+ role = rule['role']
95
+ content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
96
+ f'[Question]\n{ques["text"]}\n\n'
97
+ f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
98
+ f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
99
+ f'[System]\n{prompt}\n\n')
100
+ cur_js = {
101
+ 'id': idx+1,
102
+ 'question_id': ques['question_id'],
103
+ 'answer1_id': ans1.get('answer_id', ans1['question_id']),
104
+ 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
105
+ 'category': category
106
+ }
107
+ if idx >= len(cur_reviews):
108
+ review = get_eval(content, args.max_tokens)
109
+ scores = parse_score(review)
110
+ cur_js['content'] = review
111
+ cur_js['tuple'] = scores
112
+ review_file.write(json.dumps(cur_js) + '\n')
113
+ review_file.flush()
114
+ else:
115
+ print(f'Skipping {idx} as we already have it.')
116
+ idx += 1
117
+ print(idx)
118
+ review_file.close()
VLMEvalKit-sudoku/llava/eval/eval_science_qa.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import re
5
+ import random
6
+
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument('--base-dir', type=str)
11
+ parser.add_argument('--result-file', type=str)
12
+ parser.add_argument('--output-file', type=str)
13
+ parser.add_argument('--output-result', type=str)
14
+ parser.add_argument('--split', type=str, default='test')
15
+ parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
16
+ return parser.parse_args()
17
+
18
+
19
+ def convert_caps(results):
20
+ fakecaps = []
21
+ for result in results:
22
+ image_id = result['question_id']
23
+ caption = result['text']
24
+ fakecaps.append({"image_id": int(image_id), "caption": caption})
25
+ return fakecaps
26
+
27
+
28
+ def get_pred_idx(prediction, choices, options):
29
+ """
30
+ Get the index (e.g. 2) from the prediction (e.g. 'C')
31
+ """
32
+ if prediction in options[:len(choices)]:
33
+ return options.index(prediction)
34
+ else:
35
+ return -1
36
+ return random.choice(range(len(choices)))
37
+
38
+
39
+ if __name__ == "__main__":
40
+ args = get_args()
41
+
42
+ base_dir = args.base_dir
43
+ split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
44
+ problems = json.load(open(os.path.join(base_dir, "problems.json")))
45
+ predictions = [json.loads(line) for line in open(args.result_file)]
46
+ predictions = {pred['question_id']: pred for pred in predictions}
47
+ split_problems = {idx: problems[idx] for idx in split_indices}
48
+
49
+ results = {'correct': [], 'incorrect': []}
50
+ sqa_results = {}
51
+ sqa_results['acc'] = None
52
+ sqa_results['correct'] = None
53
+ sqa_results['count'] = None
54
+ sqa_results['results'] = {}
55
+ sqa_results['outputs'] = {}
56
+
57
+ for prob_id, prob in split_problems.items():
58
+ if prob_id not in predictions:
59
+ pred = {'text': 'FAILED', 'prompt': 'Unknown'}
60
+ pred_text = 'FAILED'
61
+ else:
62
+ pred = predictions[prob_id]
63
+ pred_text = pred['text']
64
+
65
+ if pred_text in args.options:
66
+ answer = pred_text
67
+ elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ":
68
+ answer = pred_text[0]
69
+ else:
70
+ pattern = re.compile(r'The answer is ([A-Z]).')
71
+ res = pattern.findall(pred_text)
72
+ if len(res) == 1:
73
+ answer = res[0] # 'A', 'B', ...
74
+ else:
75
+ answer = "FAILED"
76
+
77
+ pred_idx = get_pred_idx(answer, prob['choices'], args.options)
78
+
79
+ analysis = {
80
+ 'question_id': prob_id,
81
+ 'parsed_ans': answer,
82
+ 'ground_truth': args.options[prob['answer']],
83
+ 'question': pred['prompt'],
84
+ 'pred': pred_text,
85
+ 'is_multimodal': '<image>' in pred['prompt'],
86
+ }
87
+
88
+ sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
89
+ sqa_results['outputs'][prob_id] = pred_text
90
+
91
+ if pred_idx == prob['answer']:
92
+ results['correct'].append(analysis)
93
+ else:
94
+ results['incorrect'].append(analysis)
95
+
96
+ correct = len(results['correct'])
97
+ total = len(results['correct']) + len(results['incorrect'])
98
+
99
+ ###### IMG ######
100
+ multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']])
101
+ multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']])
102
+ multimodal_total = multimodal_correct + multimodal_incorrect
103
+ ###### IMG ######
104
+
105
+ print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%')
106
+
107
+ sqa_results['acc'] = correct / total * 100
108
+ sqa_results['correct'] = correct
109
+ sqa_results['count'] = total
110
+
111
+ with open(args.output_file, 'w') as f:
112
+ json.dump(results, f, indent=2)
113
+ with open(args.output_result, 'w') as f:
114
+ json.dump(sqa_results, f, indent=2)
VLMEvalKit-sudoku/llava/eval/model_vqa_loader.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava.conversation import conv_templates, SeparatorStyle
10
+ from llava.model.builder import load_pretrained_model
11
+ from llava.utils import disable_torch_init
12
+ from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, expand2square, KeywordsStoppingCriteria
13
+ from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX
14
+
15
+ from torch.utils.data import Dataset, DataLoader
16
+ from typing import Dict, Optional, Sequence, List
17
+ import transformers
18
+ import re
19
+ from PIL import Image
20
+ import math
21
+ from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio
22
+
23
+
24
+ def split_list(lst, n):
25
+ """Split a list into n (roughly) equal-sized chunks"""
26
+ chunk_size = math.ceil(len(lst) / n) # integer division
27
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
28
+
29
+
30
+ def get_chunk(lst, n, k):
31
+ chunks = split_list(lst, n)
32
+ return chunks[k]
33
+
34
+ def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict:
35
+ roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"}
36
+
37
+ im_start, im_end = tokenizer.additional_special_tokens_ids
38
+ nl_tokens = tokenizer("\n").input_ids
39
+ _system = tokenizer("system").input_ids + nl_tokens
40
+ _user = tokenizer("user").input_ids + nl_tokens
41
+ _assistant = tokenizer("assistant").input_ids + nl_tokens
42
+
43
+ # Apply prompt templates
44
+ input_ids, targets = [], []
45
+
46
+ source = sources
47
+ if roles[source[0]["from"]] != roles["human"]:
48
+ source = source[1:]
49
+
50
+ input_id, target = [], []
51
+ system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
52
+ input_id += system
53
+ target += [im_start] + [IGNORE_INDEX] * (len(system) - 3) + [im_end] + nl_tokens
54
+ assert len(input_id) == len(target)
55
+ for j, sentence in enumerate(source):
56
+ role = roles[sentence["from"]]
57
+ if has_image and sentence["value"] is not None and "<image>" in sentence["value"]:
58
+ num_image = len(re.findall(DEFAULT_IMAGE_TOKEN, sentence["value"]))
59
+ texts = sentence["value"].split('<image>')
60
+ _input_id = tokenizer(role).input_ids + nl_tokens
61
+ for i,text in enumerate(texts):
62
+ _input_id += tokenizer(text).input_ids
63
+ if i<len(texts)-1:
64
+ _input_id += [IMAGE_TOKEN_INDEX] + nl_tokens
65
+ _input_id += [im_end] + nl_tokens
66
+ assert sum([i==IMAGE_TOKEN_INDEX for i in _input_id])==num_image
67
+ else:
68
+ if sentence["value"] is None:
69
+ _input_id = tokenizer(role).input_ids + nl_tokens
70
+ else:
71
+ _input_id = tokenizer(role).input_ids + nl_tokens + tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
72
+ input_id += _input_id
73
+ if role == "<|im_start|>user":
74
+ _target = [im_start] + [IGNORE_INDEX] * (len(_input_id) - 3) + [im_end] + nl_tokens
75
+ elif role == "<|im_start|>assistant":
76
+ _target = [im_start] + [IGNORE_INDEX] * len(tokenizer(role).input_ids) + _input_id[len(tokenizer(role).input_ids) + 1 : -2] + [im_end] + nl_tokens
77
+ else:
78
+ raise NotImplementedError
79
+ target += _target
80
+
81
+ input_ids.append(input_id)
82
+ targets.append(target)
83
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
84
+ targets = torch.tensor(targets, dtype=torch.long)
85
+ return input_ids
86
+
87
+ # Custom dataset class
88
+ class CustomDataset(Dataset):
89
+ def __init__(self, questions, image_folder, tokenizer, image_processor, model_config):
90
+ self.questions = questions
91
+ self.image_folder = image_folder
92
+ self.tokenizer = tokenizer
93
+ self.image_processor = image_processor
94
+ self.model_config = model_config
95
+
96
+ def __getitem__(self, index):
97
+ line = self.questions[index]
98
+ image_file = line["image"]
99
+ qs = line["text"]
100
+ processor = self.image_processor
101
+ if self.model_config.mm_use_im_start_end:
102
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
103
+ else:
104
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
105
+
106
+ conv = conv_templates[args.conv_mode].copy()
107
+ conv.append_message(conv.roles[0], qs)
108
+ conv.append_message(conv.roles[1], None)
109
+ prompt = conv.get_prompt()
110
+
111
+ image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB')
112
+ # image_tensor = process_images([image], self.image_processor, self.model_config)[0]
113
+
114
+ # 2x2切片
115
+ # image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
116
+ # sub_images = split_image(image, scale=672, grid=(2, 2))
117
+ # sub_images.append(image)
118
+ # image = sub_images
119
+ # image = processor.preprocess(image, return_tensors='pt')['pixel_values'] # bs, 3, h, w
120
+ # image_tensor = image.flatten(0, 1)
121
+
122
+ # adapt
123
+ # image, _, _, _ = slice_image_minicpm(
124
+ # image, max_slice_nums=7, scale_resolution=336, patch_size=14, never_split=False)
125
+ # image = processor.preprocess(image, do_resize=False, do_center_crop=False,
126
+ # do_rescale=True, do_normalize=True, return_tensors='pt')['pixel_values'][0]
127
+ # image_tensor = image
128
+
129
+ image = resize_image_keep_ratio(image, max_size=1024)
130
+
131
+ source_image, patches, best_grid, ind_tokens = slice_image_minicpm(
132
+ image, max_slice_nums=7, scale_resolution=336, patch_size=14, never_split=False)
133
+
134
+ if best_grid is None: #说明没有切片
135
+ source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
136
+ do_rescale=True, do_normalize=True,
137
+ return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
138
+ crop_size = processor.crop_size
139
+ patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width'])
140
+ else:
141
+ source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False,
142
+ do_rescale=True, do_normalize=True,
143
+ return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w
144
+ patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False,
145
+ do_rescale=True, do_normalize=True,
146
+ return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w
147
+ image_tensor = source_tensors[0] # 3, h, w
148
+ patch_images = patch_tensors # bs, 3, h, w
149
+
150
+ input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
151
+
152
+ return input_ids, image_tensor, image.size, patch_images, ind_tokens
153
+
154
+ def __len__(self):
155
+ return len(self.questions)
156
+
157
+
158
+ def collate_fn(batch):
159
+ input_ids, image_tensors, image_sizes, patch_images, ind_tokens = zip(*batch)
160
+ input_ids = torch.stack(input_ids, dim=0)
161
+ image_tensors = torch.stack(image_tensors, dim=0)
162
+ return input_ids, image_tensors, image_sizes, patch_images, ind_tokens
163
+
164
+
165
+ # DataLoader
166
+ def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4):
167
+ assert batch_size == 1, "batch_size must be 1"
168
+ dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config)
169
+ data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False, collate_fn=collate_fn)
170
+ return data_loader
171
+
172
+
173
+ def eval_model(args):
174
+ # Model
175
+ disable_torch_init()
176
+ model_path = os.path.expanduser(args.model_path)
177
+ model_name = get_model_name_from_path(model_path)
178
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name, _args=args)
179
+
180
+ questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
181
+ questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
182
+ answers_file = os.path.expanduser(args.answers_file)
183
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
184
+ ans_file = open(answers_file, "w")
185
+
186
+ if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
187
+ args.conv_mode = args.conv_mode + '_mmtag'
188
+ print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
189
+
190
+ data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config)
191
+
192
+ for (input_ids, image_tensor, image_sizes, patch_images, ind_tokens), line in tqdm(zip(data_loader, questions), total=len(questions)):
193
+ idx = line["question_id"]
194
+ cur_prompt = line["text"]
195
+
196
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
197
+
198
+ image_tensor = [image_tensor[0].to(dtype=torch.float16, device='cuda', non_blocking=True)]
199
+ patch_images = [item.to(dtype=torch.float16, device='cuda', non_blocking=True) for item in patch_images]
200
+
201
+ args.conv_mode = "qwen_1_5"
202
+
203
+ conv = conv_templates[args.conv_mode].copy()
204
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
205
+ keywords = [stop_str]
206
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
207
+
208
+
209
+ with torch.inference_mode():
210
+ output_ids = model.generate(
211
+ input_ids,
212
+ images=image_tensor,
213
+ image_sizes=image_sizes,
214
+ patch_images=patch_images,
215
+ ind_tokens=ind_tokens,
216
+ do_sample=True if args.temperature > 0 else False,
217
+ temperature=args.temperature,
218
+ top_p=args.top_p,
219
+ num_beams=args.num_beams,
220
+ max_new_tokens=args.max_new_tokens,
221
+ use_cache=True)
222
+
223
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
224
+ outputs = outputs.strip()
225
+ if outputs.endswith(stop_str):
226
+ outputs = outputs[:-len(stop_str)]
227
+ outputs = outputs.strip()
228
+
229
+ ans_id = shortuuid.uuid()
230
+ ans_file.write(json.dumps({"question_id": idx,
231
+ "prompt": cur_prompt,
232
+ "text": outputs,
233
+ "answer_id": ans_id,
234
+ "model_id": model_name,
235
+ "metadata": {}}) + "\n")
236
+ # ans_file.flush()
237
+ ans_file.close()
238
+
239
+ if __name__ == "__main__":
240
+ parser = argparse.ArgumentParser()
241
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
242
+ parser.add_argument("--model-base", type=str, default=None)
243
+ parser.add_argument("--image-folder", type=str, default="")
244
+ parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
245
+ parser.add_argument("--answers-file", type=str, default="answer.jsonl")
246
+ parser.add_argument("--conv-mode", type=str, default="llava_v1")
247
+ parser.add_argument("--num-chunks", type=int, default=1)
248
+ parser.add_argument("--chunk-idx", type=int, default=0)
249
+ parser.add_argument("--temperature", type=float, default=0.2)
250
+ parser.add_argument("--top_p", type=float, default=None)
251
+ parser.add_argument("--num_beams", type=int, default=1)
252
+ parser.add_argument("--max_new_tokens", type=int, default=128)
253
+ parser.add_argument("--fted_encoder", type=bool, default=True)
254
+ args = parser.parse_args()
255
+
256
+ eval_model(args)
VLMEvalKit-sudoku/llava/eval/qa_baseline_gpt35.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate answers with GPT-3.5"""
2
+ # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
3
+ import argparse
4
+ import json
5
+ import os
6
+ import time
7
+ import concurrent.futures
8
+
9
+ import openai
10
+ import tqdm
11
+ import shortuuid
12
+
13
+ MODEL = 'gpt-3.5-turbo'
14
+ MODEL_ID = 'gpt-3.5-turbo:20230327'
15
+
16
+ def get_answer(question_id: int, question: str, max_tokens: int):
17
+ ans = {
18
+ 'answer_id': shortuuid.uuid(),
19
+ 'question_id': question_id,
20
+ 'model_id': MODEL_ID,
21
+ }
22
+ for _ in range(3):
23
+ try:
24
+ response = openai.ChatCompletion.create(
25
+ model=MODEL,
26
+ messages=[{
27
+ 'role': 'system',
28
+ 'content': 'You are a helpful assistant.'
29
+ }, {
30
+ 'role': 'user',
31
+ 'content': question,
32
+ }],
33
+ max_tokens=max_tokens,
34
+ )
35
+ ans['text'] = response['choices'][0]['message']['content']
36
+ return ans
37
+ except Exception as e:
38
+ print('[ERROR]', e)
39
+ ans['text'] = '#ERROR#'
40
+ time.sleep(1)
41
+ return ans
42
+
43
+
44
+ if __name__ == '__main__':
45
+ parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
46
+ parser.add_argument('-q', '--question')
47
+ parser.add_argument('-o', '--output')
48
+ parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
49
+ args = parser.parse_args()
50
+
51
+ questions_dict = {}
52
+ with open(os.path.expanduser(args.question)) as f:
53
+ for line in f:
54
+ if not line:
55
+ continue
56
+ q = json.loads(line)
57
+ questions_dict[q['question_id']] = q['text']
58
+
59
+ answers = []
60
+
61
+ with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
62
+ futures = []
63
+ for qid, question in questions_dict.items():
64
+ future = executor.submit(get_answer, qid, question, args.max_tokens)
65
+ futures.append(future)
66
+
67
+ for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
68
+ answers.append(future.result())
69
+
70
+ answers.sort(key=lambda x: x['question_id'])
71
+
72
+ with open(os.path.expanduser(args.output), 'w') as f:
73
+ table = [json.dumps(ans) for ans in answers]
74
+ f.write('\n'.join(table))
VLMEvalKit-sudoku/llava/model/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ AVAILABLE_MODELS = {
4
+ "llava_llama": "LlavaLlamaForCausalLM, LlavaConfig",
5
+ "llava_qwen": "LlavaQwenForCausalLM, LlavaQwenConfig",
6
+ "llava_mistral": "LlavaMistralForCausalLM, LlavaMistralConfig",
7
+ "llava_mixtral": "LlavaMixtralForCausalLM, LlavaMixtralConfig",
8
+ "llava_qwen3": "LlavaQwen3ForCausalLM, LlavaQwen3Config"
9
+ # "llava_qwen_moe": "LlavaQwenMoeForCausalLM, LlavaQwenMoeConfig",
10
+ # Add other models as needed
11
+ }
12
+
13
+ for model_name, model_classes in AVAILABLE_MODELS.items():
14
+ try:
15
+ exec(f"from .language_model.{model_name} import {model_classes}")
16
+ except Exception as e:
17
+ print(f"Failed to import {model_name} from llava.language_model.{model_name}. Error: {e}")
VLMEvalKit-sudoku/llava/model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (753 Bytes). View file
 
VLMEvalKit-sudoku/llava/model/__pycache__/builder.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
VLMEvalKit-sudoku/llava/model/builder.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import os
17
+ import warnings
18
+ import shutil
19
+
20
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
21
+ import torch
22
+ from llava.model import *
23
+ from llava.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
24
+ from llava.utils import rank0_print
25
+
26
+ def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False,
27
+ device_map="auto", device="cuda", use_flash_attn=False, _args=None, overwrite_config=None, **kwargs):
28
+ kwargs = {"device_map": device_map, **kwargs}
29
+
30
+ if device != "cuda":
31
+ kwargs['device_map'] = {"": device}
32
+
33
+ if load_8bit:
34
+ kwargs['load_in_8bit'] = True
35
+ elif load_4bit:
36
+ kwargs['load_in_4bit'] = True
37
+ kwargs['quantization_config'] = BitsAndBytesConfig(
38
+ load_in_4bit=True,
39
+ bnb_4bit_compute_dtype=torch.float16,
40
+ bnb_4bit_use_double_quant=True,
41
+ bnb_4bit_quant_type='nf4'
42
+ )
43
+ else:
44
+ kwargs['torch_dtype'] = torch.float16
45
+
46
+ if use_flash_attn:
47
+ kwargs['attn_implementation'] = 'flash_attention_2'
48
+
49
+ if 'llava' in model_name.lower():
50
+ # Load LLaVA model
51
+ if 'lora' in model_name.lower() and model_base is None:
52
+ warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
53
+ if 'lora' in model_name.lower() and model_base is not None:
54
+ from llava.model.language_model.llava_llama import LlavaConfig
55
+ lora_cfg_pretrained = LlavaConfig.from_pretrained(model_path)
56
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
57
+ print('Loading LLaVA from base model...')
58
+ model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
59
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
60
+ if model.lm_head.weight.shape[0] != token_num:
61
+ model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
62
+ model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
63
+
64
+ print('Loading additional LLaVA weights...')
65
+ if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
66
+ non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
67
+ else:
68
+ # this is probably from HF Hub
69
+ from huggingface_hub import hf_hub_download
70
+ def load_from_hf(repo_id, filename, subfolder=None):
71
+ cache_file = hf_hub_download(
72
+ repo_id=repo_id,
73
+ filename=filename,
74
+ subfolder=subfolder)
75
+ return torch.load(cache_file, map_location='cpu')
76
+ non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
77
+ non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
78
+ if any(k.startswith('model.model.') for k in non_lora_trainables):
79
+ non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
80
+ model.load_state_dict(non_lora_trainables, strict=False)
81
+
82
+ from peft import PeftModel
83
+ print('Loading LoRA weights...')
84
+ model = PeftModel.from_pretrained(model, model_path)
85
+ print('Merging LoRA weights...')
86
+ model = model.merge_and_unload()
87
+ print('Model is loaded...')
88
+ elif model_base is not None:
89
+ # this may be mm projector only
90
+ print('Loading LLaVA from base model...')
91
+ if 'mpt' in model_name.lower():
92
+ if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
93
+ shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
94
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
95
+ cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
96
+ model = LlavaMptForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
97
+ else:
98
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
99
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
100
+ model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
101
+
102
+ mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
103
+ mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
104
+ model.load_state_dict(mm_projector_weights, strict=False)
105
+ else:
106
+ if 'mpt' in model_name.lower():
107
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
108
+ model = LlavaMptForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
109
+ elif 'mistral' in model_name.lower():
110
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
111
+ model = LlavaMistralForCausalLM.from_pretrained(
112
+ model_path,
113
+ low_cpu_mem_usage=True,
114
+ **kwargs
115
+ )
116
+ elif 'qwen3' in model_name.lower():
117
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
118
+ from llava.model.language_model.llava_qwen3 import LlavaQwen3ForCausalLM
119
+ model = LlavaQwen3ForCausalLM.from_pretrained(
120
+ model_path,
121
+ low_cpu_mem_usage=True,
122
+ **kwargs
123
+ )
124
+ model.to(torch.bfloat16)
125
+ elif "qwen" in model_name.lower() or "quyen" in model_name.lower():
126
+ attn_implementation="flash_attention_2"
127
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
128
+ if "moe" in model_name.lower() or "A14B" in model_name.lower():
129
+ from llava.model.language_model.llava_qwen_moe import LlavaQwenMoeConfig
130
+ if overwrite_config is not None:
131
+ llava_cfg = LlavaQwenMoeConfig.from_pretrained(model_path)
132
+ rank0_print(f"Overwriting config with {overwrite_config}")
133
+ for k, v in overwrite_config.items():
134
+ setattr(llava_cfg, k, v)
135
+ model = LlavaQwenMoeForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, attn_implementation=attn_implementation, config=llava_cfg, **kwargs)
136
+ else:
137
+ model = LlavaQwenMoeForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, attn_implementation=attn_implementation, **kwargs)
138
+
139
+ else:
140
+ from llava.model.language_model.llava_qwen import LlavaQwenConfig
141
+ if overwrite_config is not None:
142
+ llava_cfg = LlavaQwenConfig.from_pretrained(model_path)
143
+ rank0_print(f"Overwriting config with {overwrite_config}")
144
+ for k, v in overwrite_config.items():
145
+ setattr(llava_cfg, k, v)
146
+ model = LlavaQwenForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, config=llava_cfg, **kwargs)
147
+ else:
148
+ model = LlavaQwenForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
149
+
150
+ model.to(torch.bfloat16)
151
+ else:
152
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
153
+ model = LlavaLlamaForCausalLM.from_pretrained(
154
+ model_path,
155
+ low_cpu_mem_usage=True,
156
+ **kwargs
157
+ )
158
+ model.to(torch.bfloat16)
159
+ else:
160
+ # Load language model
161
+ if model_base is not None:
162
+ # PEFT model
163
+ from peft import PeftModel
164
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
165
+ model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
166
+ print(f"Loading LoRA weights from {model_path}")
167
+ model = PeftModel.from_pretrained(model, model_path)
168
+ print(f"Merging weights")
169
+ model = model.merge_and_unload()
170
+ print('Convert to FP16...')
171
+ model.to(torch.float16)
172
+ else:
173
+ use_fast = False
174
+ if 'mpt' in model_name.lower():
175
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
176
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
177
+ else:
178
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
179
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
180
+
181
+ image_processor = None
182
+
183
+ if 'llava' in model_name.lower():
184
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
185
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
186
+ if mm_use_im_patch_token:
187
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
188
+ if mm_use_im_start_end:
189
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
190
+ model.resize_token_embeddings(len(tokenizer))
191
+
192
+ vision_tower = model.get_vision_tower()
193
+ if not vision_tower.is_loaded:
194
+ if _args is not None and _args.fted_encoder == True:
195
+ vision_tower.load_model(device_map=device_map, model_path=model_path)
196
+ else:
197
+ vision_tower.load_model(device_map=device_map)
198
+ if device_map != 'auto':
199
+ vision_tower.to(device=device_map, dtype=torch.float16)
200
+ image_processor = vision_tower.image_processor
201
+
202
+ if hasattr(model.config, "max_sequence_length"):
203
+ context_len = model.config.max_sequence_length
204
+ elif hasattr(model.config, "max_position_embeddings"):
205
+ context_len = model.config.max_position_embeddings
206
+ elif hasattr(model.config, "tokenizer_model_max_length"):
207
+ context_len = model.config.tokenizer_model_max_length
208
+ else:
209
+ context_len = 2048
210
+
211
+ return tokenizer, model, image_processor, context_len
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/attn_res.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc ADDED
Binary file (8.21 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_moonvit.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_siglip2.cpython-310.pyc ADDED
Binary file (43.4 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/modeling_swin_siglip2_zyc.cpython-310.pyc ADDED
Binary file (62.2 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-310.pyc ADDED
Binary file (6.05 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_encoder/attn_res.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import re
6
+ from functools import partial
7
+ import numpy as np
8
+ from torch.nn.init import trunc_normal_
9
+ from torch.nn import functional as F
10
+ import math
11
+ from transformers.activations import ACT2FN
12
+ from transformers.utils import (
13
+ is_flash_attn_2_available,
14
+ is_flash_attn_greater_or_equal_2_10,
15
+ )
16
+ if is_flash_attn_2_available():
17
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
18
+
19
+ class TokenPacker(nn.Module):
20
+ is_causal = False
21
+ def __init__(
22
+ self,
23
+ embed_dim=1152,
24
+ intermediate_size=4304,
25
+ num_heads=16,
26
+ norm_layer=partial(nn.LayerNorm, eps=1e-6)
27
+ ):
28
+ super().__init__()
29
+ self.embed_dim = embed_dim
30
+ self.num_heads = num_heads
31
+ gamma_init_eps = 1e-5
32
+ layer_norm_eps = 1e-6
33
+ self.head_dim = self.embed_dim // self.num_heads
34
+ self.scale = self.head_dim**-0.5
35
+
36
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
37
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
38
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
39
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
40
+
41
+ self.ln_q = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
42
+ self.ln_kv = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
43
+ self.gamma1 = nn.Parameter(gamma_init_eps * torch.ones(embed_dim), requires_grad=True)
44
+
45
+ self.ln_ffn = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
46
+ self.ffn = nn.Sequential(
47
+ nn.Linear(self.embed_dim, intermediate_size),
48
+ ACT2FN['gelu_pytorch_tanh'],
49
+ nn.Linear(intermediate_size, self.embed_dim),
50
+ )
51
+ self.gamma2 = nn.Parameter(gamma_init_eps * torch.ones(embed_dim), requires_grad=True)
52
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
53
+
54
+ def _attn(self, query, key, value, attn_mask):
55
+ B, N_q, _ = query.shape
56
+ B, N_k, _ = key.shape
57
+ query_states = self.q_proj(query)
58
+ key_states = self.k_proj(key)
59
+ value_states = self.v_proj(value)
60
+
61
+ query_states = query_states.view(B, N_q, self.num_heads, self.head_dim)
62
+ key_states = key_states.view(B, N_k, self.num_heads, self.head_dim)
63
+ value_states = value_states.view(B, N_k, self.num_heads, self.head_dim)
64
+ attn_output = _flash_attention_forward(
65
+ query_states,
66
+ key_states,
67
+ value_states,
68
+ attn_mask,
69
+ N_q,
70
+ is_causal=self.is_causal,
71
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
72
+ )
73
+ attn_output = attn_output.reshape(B, N_q, self.embed_dim).contiguous()
74
+ attn_output = self.out_proj(attn_output)
75
+ return attn_output
76
+
77
+ def forward(self, x, x_res, query=None, key=None, value=None, attn_mask=None):
78
+ ### x_res是((h w) 4 c)
79
+ ### x是((h w) 1 c) 均值
80
+ if query is not None:
81
+ query = self.ln_q(query)
82
+ else:
83
+ query = self.ln_q(x)[:, None, :]
84
+ if key is not None:
85
+ key = key
86
+ else:
87
+ key = self.ln_kv(x_res)
88
+ if value is not None:
89
+ value = value
90
+ else:
91
+ value = key
92
+
93
+ out = self._attn(
94
+ query,
95
+ key,
96
+ value,
97
+ attn_mask=attn_mask)[0]
98
+ x_res = out
99
+
100
+ x = x + self.gamma1 * x_res # qkv norm,算attn,然后乘以gamma1,然后add 残差
101
+ x = x + self.gamma2 * self.ffn(self.ln_ffn(x))
102
+ # 这个x这个avg是直通分支,通过gamma学习引入残差分量
103
+ return x, key, value
VLMEvalKit-sudoku/llava/model/multimodal_encoder/clip_encoder.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import os
4
+ from safetensors import safe_open
5
+ from llava.utils import rank0_print
6
+ from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
7
+ from llava.model.multimodal_encoder.adapt_clip_vision_model import AdaptCLIPVisionModel
8
+ try:
9
+ from s2wrapper import forward as multiscale_forward
10
+ except:
11
+ pass
12
+
13
+ def load_vision_tower_values(model_path, device):
14
+ """
15
+ 在给定的路径下查找所有 `.safetensors` 文件,加载它们,并返回 key 中包含 `vision_tower` 的权重值。
16
+
17
+ 参数:
18
+ - model_path (str): Hugging Face 模型文件夹的路径。
19
+
20
+ 返回:
21
+ - vision_tower_values (dict): 包含所有 `vision_tower` 相关的键和值的字典。
22
+ """
23
+ # 找到路径中的所有 `.safetensors` 文件
24
+ safetensor_files = [f for f in os.listdir(model_path) if f.endswith('.safetensors')]
25
+
26
+ vision_tower_values = {}
27
+
28
+ # 遍历每个 `.safetensors` 文件
29
+ for safetensor_file in safetensor_files:
30
+ safetensor_path = os.path.join(model_path, safetensor_file)
31
+
32
+ # 使用 safetensors 库打开并读取文件内容
33
+ with safe_open(safetensor_path, framework="pt", device=str(device)) as f:
34
+ for key in f.keys():
35
+ # 如果 key 中包含 `vision_tower`,将其加入结果字典
36
+ if 'vision_tower' in key:
37
+ key_new = key.replace('model.vision_tower.vision_tower.', '')
38
+ vision_tower_values[key_new] = f.get_tensor(key)
39
+
40
+ return vision_tower_values
41
+ class CLIPVisionTower(nn.Module):
42
+ def __init__(self, vision_tower, args, delay_load=False):
43
+ super().__init__()
44
+
45
+ self.is_loaded = False
46
+
47
+ self.vision_tower_name = vision_tower
48
+ self.select_layer = args.mm_vision_select_layer
49
+ self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
50
+
51
+ if not delay_load:
52
+ rank0_print(f"Loading vision tower: {vision_tower}")
53
+ self.load_model()
54
+ elif getattr(args, "unfreeze_mm_vision_tower", False):
55
+ # TODO: better detector is needed.
56
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
57
+ self.load_model()
58
+ elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts:
59
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
60
+ self.load_model()
61
+ else:
62
+ self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
63
+
64
+ def load_model(self, device_map=None, model_path=None):
65
+ if self.is_loaded:
66
+ rank0_print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
67
+ return
68
+
69
+ self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
70
+ #self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
71
+
72
+ print('---------init adapt_vision_model---------')
73
+ self.vision_tower = AdaptCLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
74
+ if model_path is None:
75
+ print('---------from frozen ckpt---------')
76
+ else:
77
+ print('---------from ft ckpt---------')
78
+ vision_tower_values = load_vision_tower_values(model_path, self.vision_tower.device)
79
+ load_info = self.vision_tower.load_state_dict(vision_tower_values, strict=False)
80
+ print(f'load info: {load_info}')
81
+
82
+ self.vision_tower.requires_grad_(False)
83
+
84
+ self.is_loaded = True
85
+
86
+ def feature_select(self, image_forward_outs):
87
+ select_feature_type = self.select_feature
88
+
89
+ if self.select_feature in ["slicefour_patch", "slicefour_cls_patch"]:
90
+ select_every_k_layer = len(image_forward_outs.hidden_states) // 4
91
+ image_features = torch.cat([image_forward_outs.hidden_states[i] for i in range(select_every_k_layer + self.select_layer, len(image_forward_outs.hidden_states), select_every_k_layer)], dim=-1)
92
+ select_feature_type = select_feature_type.replace("slicefour_", "")
93
+ elif self.select_feature in ["slice_m25811_f6_patch", "slice_m25811_f6_cls_patch"]:
94
+ select_layers = [-2, -5, -8, -11, 6]
95
+ image_features = torch.cat([image_forward_outs.hidden_states[i] for i in select_layers], dim=-1)
96
+ select_feature_type = select_feature_type.replace("slice_m25811_f6_", "")
97
+ else:
98
+ image_features = image_forward_outs.hidden_states[self.select_layer]
99
+
100
+ if select_feature_type == "patch":
101
+ image_features = image_features[:, 1:]
102
+ elif select_feature_type == "cls_patch":
103
+ image_features = image_features
104
+ else:
105
+ raise ValueError(f"Unexpected select feature: {select_feature_type}")
106
+ return image_features
107
+
108
+ def forward(self, images, patch_sizes):
109
+ tgt_sizes = torch.tensor(patch_sizes, dtype=torch.long, device=images[0].device)
110
+
111
+ #FIXME the pooled_output here is incorrect for post_layernorm on padded features
112
+ image_forward_outs = self.vision_tower(images, tgt_sizes=tgt_sizes, output_hidden_states=True)
113
+ features = self.feature_select(image_forward_outs).to(images[0].dtype)
114
+
115
+ image_features = [] #list torch.Size([1, 1024, 25, 22])
116
+ for i in range(len(features)):
117
+ h, w = patch_sizes[i]
118
+ feature = features[i][:h * w, :].unsqueeze(0)
119
+ # feature = feature.permute(0, 2, 1) #torch.Size([1, 1024, 25*22])
120
+ # feature = feature.unflatten(2, [h, w]) #torch.Size([1, 1024, 25, 22])
121
+ image_features.append(feature)
122
+
123
+ return image_features
124
+
125
+ def forward_uhd_v2(self, images, tgt_sizes):
126
+ #FIXME the pooled_output here is incorrect for post_layernorm on padded features
127
+ image_forward_outs = self.vision_tower(images, tgt_sizes=tgt_sizes, output_hidden_states=True)
128
+ image_features = self.feature_select(image_forward_outs).to(images[0].dtype)
129
+ return image_features
130
+
131
+ @property
132
+ def dummy_feature(self):
133
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
134
+
135
+ @property
136
+ def dtype(self):
137
+ return self.vision_tower.dtype
138
+
139
+ @property
140
+ def device(self):
141
+ return self.vision_tower.device
142
+
143
+ @property
144
+ def config(self):
145
+ if self.is_loaded:
146
+ return self.vision_tower.config
147
+ else:
148
+ return self.cfg_only
149
+
150
+ @property
151
+ def hidden_size(self):
152
+ _hidden_size = self.config.hidden_size
153
+ if "slicefour" in self.select_feature:
154
+ _hidden_size *= 4
155
+ if "slice_m25811_f6" in self.select_feature:
156
+ _hidden_size *= 5
157
+ return _hidden_size
158
+
159
+ @property
160
+ def num_patches_per_side(self):
161
+ return self.config.image_size // self.config.patch_size
162
+
163
+ @property
164
+ def num_patches(self):
165
+ _num_patches = (self.config.image_size // self.config.patch_size) ** 2
166
+ if "cls_patch" in self.select_feature:
167
+ _num_patches += 1
168
+ return _num_patches
169
+
170
+ @property
171
+ def image_size(self):
172
+ return self.config.image_size
173
+
174
+
175
+ class CLIPVisionTowerS2(CLIPVisionTower):
176
+ def __init__(self, vision_tower, args, delay_load=False):
177
+
178
+ self.s2_scales = getattr(args, "s2_scales", "336,672,1008")
179
+ self.s2_scales = list(map(int, self.s2_scales.split(",")))
180
+ self.s2_scales.sort()
181
+ self.s2_split_size = self.s2_scales[0]
182
+ self.s2_image_size = self.s2_scales[-1]
183
+
184
+ super().__init__(vision_tower, args, delay_load)
185
+
186
+ # change resize/crop size in preprocessing to the largest image size in s2_scale
187
+ if not delay_load or getattr(args, "unfreeze_mm_vision_tower", False):
188
+ self.image_processor.size["shortest_edge"] = self.s2_image_size
189
+ self.image_processor.crop_size["height"] = self.image_processor.crop_size["width"] = self.s2_image_size
190
+
191
+ def load_model(self, device_map=None):
192
+ if self.is_loaded:
193
+ rank0_print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
194
+ return
195
+
196
+ self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
197
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
198
+ self.vision_tower.requires_grad_(False)
199
+
200
+ self.image_processor.size["shortest_edge"] = self.s2_image_size
201
+ self.image_processor.crop_size["height"] = self.image_processor.crop_size["width"] = self.s2_image_size
202
+
203
+ self.is_loaded = True
204
+
205
+ def forward_feature(self, images):
206
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
207
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
208
+ return image_features
209
+
210
+ def forward(self, images):
211
+ if type(images) is list:
212
+ image_features = []
213
+ for image in images:
214
+ image_feature = multiscale_forward(self.forward_feature, image.unsqueeze(0), img_sizes=self.s2_scales, max_split_size=self.s2_split_size, split_forward=True)
215
+ image_features.append(image_feature)
216
+ else:
217
+ image_features = multiscale_forward(self.forward_feature, images, img_sizes=self.s2_scales, max_split_size=self.s2_split_size, split_forward=True)
218
+
219
+ return image_features
220
+
221
+ @property
222
+ def hidden_size(self):
223
+ return self.config.hidden_size * len(self.s2_scales)
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/factory.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ import re
6
+ from copy import deepcopy
7
+ from pathlib import Path
8
+ from typing import Optional, Tuple, Union, Dict, Any
9
+ import torch
10
+
11
+ try:
12
+ import deepspeed
13
+ except ImportError:
14
+ deepspeed = None
15
+
16
+ from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
17
+ from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict, get_cast_dtype
18
+ from .openai import load_openai_model
19
+ from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
20
+ from .transform import image_transform
21
+ from .tokenizer import HFTokenizer, tokenize
22
+ from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
23
+
24
+
25
+ _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
26
+ _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
27
+
28
+
29
+ def _natural_key(string_):
30
+ return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
31
+
32
+
33
+ def _rescan_model_configs():
34
+ global _MODEL_CONFIGS
35
+
36
+ config_ext = (".json",)
37
+ config_files = []
38
+ for config_path in _MODEL_CONFIG_PATHS:
39
+ if config_path.is_file() and config_path.suffix in config_ext:
40
+ config_files.append(config_path)
41
+ elif config_path.is_dir():
42
+ for ext in config_ext:
43
+ config_files.extend(config_path.glob(f"*{ext}"))
44
+
45
+ for cf in config_files:
46
+ with open(cf, "r", encoding="utf8") as f:
47
+ model_cfg = json.load(f)
48
+ if all(a in model_cfg for a in ("embed_dim", "vision_cfg", "text_cfg")):
49
+ _MODEL_CONFIGS[cf.stem] = model_cfg
50
+
51
+ _MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
52
+
53
+
54
+ _rescan_model_configs() # initial populate of model config registry
55
+
56
+
57
+ def list_models():
58
+ """enumerate available model architectures based on config files"""
59
+ return list(_MODEL_CONFIGS.keys())
60
+
61
+
62
+ def add_model_config(path):
63
+ """add model config path or file and update registry"""
64
+ if not isinstance(path, Path):
65
+ path = Path(path)
66
+ _MODEL_CONFIG_PATHS.append(path)
67
+ _rescan_model_configs()
68
+
69
+
70
+ def get_model_config(model_name):
71
+ if model_name in _MODEL_CONFIGS:
72
+ return deepcopy(_MODEL_CONFIGS[model_name])
73
+ else:
74
+ return None
75
+
76
+
77
+ def get_tokenizer(model_name):
78
+ config = get_model_config(model_name)
79
+ tokenizer = HFTokenizer(config["text_cfg"]["hf_tokenizer_name"]) if "hf_tokenizer_name" in config["text_cfg"] else tokenize
80
+ return tokenizer
81
+
82
+
83
+ # loading openai CLIP weights when is_openai=True for training
84
+ def load_state_dict(checkpoint_path: str, map_location: str = "cpu", model_key: str = "model|module|state_dict", is_openai: bool = False, skip_list: list = []):
85
+ if is_openai:
86
+ model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
87
+ state_dict = model.state_dict()
88
+ for key in ["input_resolution", "context_length", "vocab_size"]:
89
+ state_dict.pop(key, None)
90
+ else:
91
+ checkpoint = torch.load(checkpoint_path, map_location=map_location)
92
+ for mk in model_key.split("|"):
93
+ if isinstance(checkpoint, dict) and mk in checkpoint:
94
+ state_dict = checkpoint[mk]
95
+ break
96
+ else:
97
+ state_dict = checkpoint
98
+ if next(iter(state_dict.items()))[0].startswith("module"):
99
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
100
+
101
+ for k in skip_list:
102
+ if k in list(state_dict.keys()):
103
+ logging.info(f"Removing key {k} from pretrained checkpoint")
104
+ del state_dict[k]
105
+
106
+ if os.getenv("RoPE") == "1":
107
+ for k in list(state_dict.keys()):
108
+ if "freqs_cos" in k or "freqs_sin" in k:
109
+ del state_dict[k]
110
+ return state_dict
111
+
112
+
113
+ def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
114
+ state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
115
+ # detect old format and make compatible with new format
116
+ if "positional_embedding" in state_dict and not hasattr(model, "positional_embedding"):
117
+ state_dict = convert_to_custom_text_state_dict(state_dict)
118
+ if "text.logit_scale" in state_dict and hasattr(model, "logit_scale"):
119
+ state_dict["logit_scale"] = state_dict["text.logit_scale"]
120
+ del state_dict["text.logit_scale"]
121
+
122
+ # resize_clip_pos_embed for CLIP and open CLIP
123
+ if "visual.positional_embedding" in state_dict:
124
+ resize_clip_pos_embed(state_dict, model)
125
+ # specified to eva_vit_model
126
+ elif "visual.pos_embed" in state_dict:
127
+ resize_evaclip_pos_embed(state_dict, model)
128
+
129
+ # resize_clip_pos_embed(state_dict, model)
130
+ incompatible_keys = model.load_state_dict(state_dict, strict=strict)
131
+ logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
132
+ return incompatible_keys
133
+
134
+
135
+ def load_clip_visual_state_dict(checkpoint_path: str, map_location: str = "cpu", is_openai: bool = False, skip_list: list = []):
136
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
137
+
138
+ for k in list(state_dict.keys()):
139
+ if not k.startswith("visual."):
140
+ del state_dict[k]
141
+ for k in list(state_dict.keys()):
142
+ if k.startswith("visual."):
143
+ new_k = k[7:]
144
+ state_dict[new_k] = state_dict[k]
145
+ del state_dict[k]
146
+ return state_dict
147
+
148
+
149
+ def load_clip_text_state_dict(checkpoint_path: str, map_location: str = "cpu", is_openai: bool = False, skip_list: list = []):
150
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
151
+
152
+ for k in list(state_dict.keys()):
153
+ if k.startswith("visual."):
154
+ del state_dict[k]
155
+ return state_dict
156
+
157
+
158
+ def get_pretrained_tag(pretrained_model):
159
+ pretrained_model = pretrained_model.lower()
160
+ if "laion" in pretrained_model or "open_clip" in pretrained_model:
161
+ return "open_clip"
162
+ elif "openai" in pretrained_model:
163
+ return "clip"
164
+ elif "eva" in pretrained_model and "clip" in pretrained_model:
165
+ return "eva_clip"
166
+ else:
167
+ return "other"
168
+
169
+
170
+ def load_zero_partitions(model, state_dict, is_deepspeed_zero3_enabled, pretrained_model_path, ignore_mismatched_sizes=False):
171
+ """
172
+ adept from pytorch lightning and transformers
173
+ with deepspeed.zero.Init():
174
+ model = MyModel()
175
+ state_dict = torch.load(model_path, map_location="cpu")
176
+ load_zero_partitions(model, prefix="")
177
+ """
178
+
179
+ # because zero3 puts placeholders in model params, this context
180
+ # manager gathers (unpartitions) the params of the current layer, then loads from
181
+ # the state dict and then re-partitions them again
182
+ model_state_dict = model.state_dict()
183
+ expected_keys = list(model_state_dict.keys())
184
+ loaded_keys = list(state_dict.keys())
185
+ missing_keys = list(set(expected_keys) - set(loaded_keys))
186
+ unexpected_keys = list(set(loaded_keys) - set(expected_keys))
187
+
188
+ # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
189
+ # matching the weights in the model.
190
+ mismatched_keys = []
191
+ if ignore_mismatched_sizes:
192
+ for checkpoint_key in loaded_keys:
193
+ model_key = checkpoint_key
194
+
195
+ if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape:
196
+ mismatched_keys.append((checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape))
197
+ del state_dict[checkpoint_key]
198
+ # copy state_dict so _load_from_state_dict can modify it
199
+ metadata = getattr(state_dict, "_metadata", None)
200
+ state_dict = state_dict.copy()
201
+ if metadata is not None:
202
+ state_dict._metadata = metadata
203
+
204
+ error_msgs = []
205
+
206
+ # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
207
+ # so we need to apply the function recursively.
208
+ def load(module, prefix=""):
209
+ local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
210
+ args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
211
+ if is_deepspeed_zero3_enabled:
212
+ # because zero3 puts placeholders in model params, this context
213
+ # manager gathers (unpartitions) the params of the current layer, then loads from
214
+ # the state dict and then re-partitions them again
215
+ with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
216
+ if torch.distributed.get_rank() == 0:
217
+ module._load_from_state_dict(*args)
218
+ else:
219
+ module._load_from_state_dict(*args)
220
+
221
+ for name, child in module._modules.items():
222
+ if child is not None:
223
+ load(child, prefix + name + ".")
224
+
225
+ # Make sure we are able to load base models as well as derived models (with heads)
226
+ start_prefix = ""
227
+ model_to_load = model
228
+ load(model_to_load, prefix=start_prefix)
229
+ del state_dict
230
+ if len(error_msgs) > 0:
231
+ error_msg = "\n\t".join(error_msgs)
232
+ if "size mismatch" in error_msg:
233
+ error_msg += "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method."
234
+ raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
235
+ if len(unexpected_keys) > 0:
236
+ logging.warning(
237
+ f"Some weights of the model checkpoint at {pretrained_model_path} were not used when"
238
+ f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
239
+ f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
240
+ " with another architecture (e.g. initializing a BertForSequenceClassification model from a"
241
+ " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
242
+ f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
243
+ " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
244
+ )
245
+ else:
246
+ logging.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
247
+ if len(missing_keys) > 0:
248
+ logging.warning(
249
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
250
+ f" {pretrained_model_path} and are newly initialized: {missing_keys}\nYou should probably"
251
+ " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
252
+ )
253
+ elif len(mismatched_keys) == 0:
254
+ logging.info(
255
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
256
+ f" {pretrained_model_path}.\nIf your task is similar to the task the model of the checkpoint"
257
+ f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
258
+ " training."
259
+ )
260
+ if len(mismatched_keys) > 0:
261
+ mismatched_warning = "\n".join([f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys])
262
+ logging.warning(
263
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
264
+ f" {pretrained_model_path} and are newly initialized because the shapes did not"
265
+ f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
266
+ " to use it for predictions and inference."
267
+ )
268
+
269
+
270
+ def load_pretrained_checkpoint(model, visual_checkpoint_path, text_checkpoint_path, strict=True, visual_model=None, text_model=None, model_key="model|module|state_dict", skip_list=[]):
271
+ visual_tag = get_pretrained_tag(visual_model)
272
+ text_tag = get_pretrained_tag(text_model)
273
+
274
+ logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
275
+ visual_incompatible_keys, text_incompatible_keys = None, None
276
+ if visual_checkpoint_path:
277
+ if visual_tag == "eva_clip" or visual_tag == "open_clip":
278
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
279
+ elif visual_tag == "clip":
280
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
281
+ else:
282
+ visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
283
+
284
+ # resize_clip_pos_embed for CLIP and open CLIP
285
+ if "positional_embedding" in visual_state_dict:
286
+ resize_visual_pos_embed(visual_state_dict, model)
287
+ # specified to EVA model
288
+ elif "pos_embed" in visual_state_dict:
289
+ resize_eva_pos_embed(visual_state_dict, model)
290
+
291
+ visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
292
+ logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
293
+ logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
294
+
295
+ if text_checkpoint_path:
296
+ if text_tag == "eva_clip" or text_tag == "open_clip":
297
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
298
+ elif text_tag == "clip":
299
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
300
+ else:
301
+ text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
302
+
303
+ text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
304
+
305
+ logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
306
+ logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
307
+
308
+ return visual_incompatible_keys, text_incompatible_keys
309
+
310
+
311
+ def create_model(
312
+ model_name: str,
313
+ pretrained: Optional[str] = None,
314
+ precision: str = "fp32",
315
+ device: Union[str, torch.device] = "cpu",
316
+ jit: bool = False,
317
+ force_quick_gelu: bool = False,
318
+ force_custom_clip: bool = False,
319
+ force_patch_dropout: Optional[float] = None,
320
+ pretrained_image: str = "",
321
+ pretrained_text: str = "",
322
+ pretrained_hf: bool = True,
323
+ pretrained_visual_model: str = None,
324
+ pretrained_text_model: str = None,
325
+ cache_dir: Optional[str] = None,
326
+ skip_list: list = [],
327
+ ):
328
+ model_name = model_name.replace("/", "-") # for callers using old naming with / in ViT names
329
+ if isinstance(device, str):
330
+ device = torch.device(device)
331
+
332
+ if pretrained and pretrained.lower() == "openai":
333
+ logging.info(f"Loading pretrained {model_name} from OpenAI.")
334
+ model = load_openai_model(
335
+ model_name,
336
+ precision=precision,
337
+ device=device,
338
+ jit=jit,
339
+ cache_dir=cache_dir,
340
+ )
341
+ else:
342
+ model_cfg = get_model_config(model_name)
343
+ if model_cfg is not None:
344
+ logging.info(f"Loaded {model_name} model config.")
345
+ else:
346
+ logging.error(f"Model config for {model_name} not found; available models {list_models()}.")
347
+ raise RuntimeError(f"Model config for {model_name} not found.")
348
+
349
+ if "rope" in model_cfg.get("vision_cfg", {}):
350
+ if model_cfg["vision_cfg"]["rope"]:
351
+ os.environ["RoPE"] = "1"
352
+ else:
353
+ os.environ["RoPE"] = "0"
354
+
355
+ if force_quick_gelu:
356
+ # override for use of QuickGELU on non-OpenAI transformer models
357
+ model_cfg["quick_gelu"] = True
358
+
359
+ if force_patch_dropout is not None:
360
+ # override the default patch dropout value
361
+ model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
362
+
363
+ cast_dtype = get_cast_dtype(precision)
364
+ custom_clip = model_cfg.pop("custom_text", False) or force_custom_clip or ("hf_model_name" in model_cfg["text_cfg"])
365
+
366
+ if custom_clip:
367
+ if "hf_model_name" in model_cfg.get("text_cfg", {}):
368
+ model_cfg["text_cfg"]["hf_model_pretrained"] = pretrained_hf
369
+ model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
370
+ else:
371
+ model = CLIP(**model_cfg, cast_dtype=cast_dtype)
372
+
373
+ pretrained_cfg = {}
374
+ if pretrained:
375
+ checkpoint_path = ""
376
+ pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
377
+ if pretrained_cfg:
378
+ checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
379
+ elif os.path.exists(pretrained):
380
+ checkpoint_path = pretrained
381
+
382
+ if checkpoint_path:
383
+ logging.info(f"Loading pretrained {model_name} weights ({pretrained}).")
384
+ load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=False)
385
+ else:
386
+ error_str = f"Pretrained weights ({pretrained}) not found for model {model_name}." f"Available pretrained tags ({list_pretrained_tags_by_model(model_name)}."
387
+ logging.warning(error_str)
388
+ raise RuntimeError(error_str)
389
+ else:
390
+ visual_checkpoint_path = ""
391
+ text_checkpoint_path = ""
392
+
393
+ if pretrained_image:
394
+ pretrained_visual_model = pretrained_visual_model.replace("/", "-") # for callers using old naming with / in ViT names
395
+ pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
396
+ if "timm_model_name" in model_cfg.get("vision_cfg", {}):
397
+ # pretrained weight loading for timm models set via vision_cfg
398
+ model_cfg["vision_cfg"]["timm_model_pretrained"] = True
399
+ elif pretrained_image_cfg:
400
+ visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
401
+ elif os.path.exists(pretrained_image):
402
+ visual_checkpoint_path = pretrained_image
403
+ else:
404
+ logging.warning(f"Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.")
405
+ raise RuntimeError(f"Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.")
406
+
407
+ if pretrained_text:
408
+ pretrained_text_model = pretrained_text_model.replace("/", "-") # for callers using old naming with / in ViT names
409
+ pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
410
+ if pretrained_image_cfg:
411
+ text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
412
+ elif os.path.exists(pretrained_text):
413
+ text_checkpoint_path = pretrained_text
414
+ else:
415
+ logging.warning(f"Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.")
416
+ raise RuntimeError(f"Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.")
417
+
418
+ if visual_checkpoint_path:
419
+ logging.info(f"Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).")
420
+ if text_checkpoint_path:
421
+ logging.info(f"Loading pretrained {model_name}.text weights ({text_checkpoint_path}).")
422
+
423
+ if visual_checkpoint_path or text_checkpoint_path:
424
+ load_pretrained_checkpoint(model, visual_checkpoint_path, text_checkpoint_path, strict=False, visual_model=pretrained_visual_model, text_model=pretrained_text_model, model_key="model|module|state_dict", skip_list=skip_list)
425
+
426
+ if "fp16" in precision or "bf16" in precision:
427
+ logging.info(f"convert precision to {precision}")
428
+ model = model.to(torch.bfloat16) if "bf16" in precision else model.to(torch.float16)
429
+
430
+ # model.to(device=device)
431
+
432
+ # set image / mean metadata from pretrained_cfg if available, or use default
433
+ model.visual.image_mean = pretrained_cfg.get("mean", None) or OPENAI_DATASET_MEAN
434
+ model.visual.image_std = pretrained_cfg.get("std", None) or OPENAI_DATASET_STD
435
+
436
+ if jit:
437
+ model = torch.jit.script(model)
438
+
439
+ return model
440
+
441
+
442
+ def create_model_and_transforms(
443
+ model_name: str,
444
+ pretrained: Optional[str] = None,
445
+ precision: str = "fp32",
446
+ device: Union[str, torch.device] = "cpu",
447
+ jit: bool = False,
448
+ force_quick_gelu: bool = False,
449
+ force_custom_clip: bool = False,
450
+ force_patch_dropout: Optional[float] = None,
451
+ pretrained_image: str = "",
452
+ pretrained_text: str = "",
453
+ pretrained_hf: bool = True,
454
+ pretrained_visual_model: str = None,
455
+ pretrained_text_model: str = None,
456
+ image_mean: Optional[Tuple[float, ...]] = None,
457
+ image_std: Optional[Tuple[float, ...]] = None,
458
+ cache_dir: Optional[str] = None,
459
+ skip_list: list = [],
460
+ ):
461
+ model = create_model(
462
+ model_name,
463
+ pretrained,
464
+ precision=precision,
465
+ device=device,
466
+ jit=jit,
467
+ force_quick_gelu=force_quick_gelu,
468
+ force_custom_clip=force_custom_clip,
469
+ force_patch_dropout=force_patch_dropout,
470
+ pretrained_image=pretrained_image,
471
+ pretrained_text=pretrained_text,
472
+ pretrained_hf=pretrained_hf,
473
+ pretrained_visual_model=pretrained_visual_model,
474
+ pretrained_text_model=pretrained_text_model,
475
+ cache_dir=cache_dir,
476
+ skip_list=skip_list,
477
+ )
478
+
479
+ image_mean = image_mean or getattr(model.visual, "image_mean", None)
480
+ image_std = image_std or getattr(model.visual, "image_std", None)
481
+ preprocess_train = image_transform(model.visual.image_size, is_train=True, mean=image_mean, std=image_std)
482
+ preprocess_val = image_transform(model.visual.image_size, is_train=False, mean=image_mean, std=image_std)
483
+
484
+ return model, preprocess_train, preprocess_val
485
+
486
+
487
+ def create_model_from_pretrained(
488
+ model_name: str,
489
+ pretrained: str,
490
+ precision: str = "fp32",
491
+ device: Union[str, torch.device] = "cpu",
492
+ jit: bool = False,
493
+ force_quick_gelu: bool = False,
494
+ force_custom_clip: bool = False,
495
+ force_patch_dropout: Optional[float] = None,
496
+ return_transform: bool = True,
497
+ image_mean: Optional[Tuple[float, ...]] = None,
498
+ image_std: Optional[Tuple[float, ...]] = None,
499
+ cache_dir: Optional[str] = None,
500
+ is_frozen: bool = False,
501
+ ):
502
+ if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
503
+ raise RuntimeError(f"{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}." f" Use open_clip.list_pretrained() to find one.")
504
+
505
+ model = create_model(
506
+ model_name,
507
+ pretrained,
508
+ precision=precision,
509
+ device=device,
510
+ jit=jit,
511
+ force_quick_gelu=force_quick_gelu,
512
+ force_custom_clip=force_custom_clip,
513
+ force_patch_dropout=force_patch_dropout,
514
+ cache_dir=cache_dir,
515
+ )
516
+
517
+ if is_frozen:
518
+ for param in model.parameters():
519
+ param.requires_grad = False
520
+
521
+ if not return_transform:
522
+ return model
523
+
524
+ image_mean = image_mean or getattr(model.visual, "image_mean", None)
525
+ image_std = image_std or getattr(model.visual, "image_std", None)
526
+ preprocess = image_transform(model.visual.image_size, is_train=False, mean=image_mean, std=image_std)
527
+
528
+ return model, preprocess
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/loss.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ try:
7
+ import torch.distributed.nn
8
+ from torch import distributed as dist
9
+
10
+ has_distributed = True
11
+ except ImportError:
12
+ has_distributed = False
13
+
14
+ try:
15
+ import horovod.torch as hvd
16
+ except ImportError:
17
+ hvd = None
18
+
19
+ from timm.loss import LabelSmoothingCrossEntropy
20
+
21
+
22
+ def gather_features(image_features, text_features, local_loss=False, gather_with_grad=False, rank=0, world_size=1, use_horovod=False):
23
+ assert has_distributed, "torch.distributed did not import correctly, please use a PyTorch version with support."
24
+ if use_horovod:
25
+ assert hvd is not None, "Please install horovod"
26
+ if gather_with_grad:
27
+ all_image_features = hvd.allgather(image_features)
28
+ all_text_features = hvd.allgather(text_features)
29
+ else:
30
+ with torch.no_grad():
31
+ all_image_features = hvd.allgather(image_features)
32
+ all_text_features = hvd.allgather(text_features)
33
+ if not local_loss:
34
+ # ensure grads for local rank when all_* features don't have a gradient
35
+ gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
36
+ gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
37
+ gathered_image_features[rank] = image_features
38
+ gathered_text_features[rank] = text_features
39
+ all_image_features = torch.cat(gathered_image_features, dim=0)
40
+ all_text_features = torch.cat(gathered_text_features, dim=0)
41
+ else:
42
+ # We gather tensors from all gpus
43
+ if gather_with_grad:
44
+ all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
45
+ all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
46
+ # all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0)
47
+ # all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0)
48
+ else:
49
+ gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
50
+ gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
51
+ dist.all_gather(gathered_image_features, image_features)
52
+ dist.all_gather(gathered_text_features, text_features)
53
+ if not local_loss:
54
+ # ensure grads for local rank when all_* features don't have a gradient
55
+ gathered_image_features[rank] = image_features
56
+ gathered_text_features[rank] = text_features
57
+ all_image_features = torch.cat(gathered_image_features, dim=0)
58
+ all_text_features = torch.cat(gathered_text_features, dim=0)
59
+
60
+ return all_image_features, all_text_features
61
+
62
+
63
+ class ClipLoss(nn.Module):
64
+
65
+ def __init__(
66
+ self,
67
+ local_loss=False,
68
+ gather_with_grad=False,
69
+ cache_labels=False,
70
+ rank=0,
71
+ world_size=1,
72
+ use_horovod=False,
73
+ smoothing=0.0,
74
+ ):
75
+ super().__init__()
76
+ self.local_loss = local_loss
77
+ self.gather_with_grad = gather_with_grad
78
+ self.cache_labels = cache_labels
79
+ self.rank = rank
80
+ self.world_size = world_size
81
+ self.use_horovod = use_horovod
82
+ self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None
83
+
84
+ # cache state
85
+ self.prev_num_logits = 0
86
+ self.labels = {}
87
+
88
+ def forward(self, image_features, text_features, logit_scale=1.0):
89
+ device = image_features.device
90
+ if self.world_size > 1:
91
+ all_image_features, all_text_features = gather_features(image_features, text_features, self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
92
+
93
+ if self.local_loss:
94
+ logits_per_image = logit_scale * image_features @ all_text_features.T
95
+ logits_per_text = logit_scale * text_features @ all_image_features.T
96
+ else:
97
+ logits_per_image = logit_scale * all_image_features @ all_text_features.T
98
+ logits_per_text = logits_per_image.T
99
+ else:
100
+ logits_per_image = logit_scale * image_features @ text_features.T
101
+ logits_per_text = logit_scale * text_features @ image_features.T
102
+ # calculated ground-truth and cache if enabled
103
+ num_logits = logits_per_image.shape[0]
104
+ if self.prev_num_logits != num_logits or device not in self.labels:
105
+ labels = torch.arange(num_logits, device=device, dtype=torch.long)
106
+ if self.world_size > 1 and self.local_loss:
107
+ labels = labels + num_logits * self.rank
108
+ if self.cache_labels:
109
+ self.labels[device] = labels
110
+ self.prev_num_logits = num_logits
111
+ else:
112
+ labels = self.labels[device]
113
+
114
+ if self.label_smoothing_cross_entropy:
115
+ total_loss = (self.label_smoothing_cross_entropy(logits_per_image, labels) + self.label_smoothing_cross_entropy(logits_per_text, labels)) / 2
116
+ else:
117
+ total_loss = (F.cross_entropy(logits_per_image, labels) + F.cross_entropy(logits_per_text, labels)) / 2
118
+
119
+ acc = None
120
+ i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image)
121
+ t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text)
122
+ acc = {"i2t": i2t_acc, "t2i": t2i_acc}
123
+ return total_loss, acc
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-18B.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1536,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 48,
6
+ "width": 5120,
7
+ "head_width": 128,
8
+ "mlp_ratio": 5,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-18b-14-x",
11
+ "drop_path_rate": 0,
12
+ "qkv_bias": false,
13
+ "xattn": true,
14
+ "postnorm": true,
15
+ "fusedLN": false,
16
+ "use_rms_norm": true
17
+ },
18
+ "text_cfg": {
19
+ "context_length": 77,
20
+ "vocab_size": 49408,
21
+ "width": 1280,
22
+ "heads": 20,
23
+ "layers": 32,
24
+ "xattn": false,
25
+ "fusedLN": false
26
+ }
27
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B-plus.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1280,
3
+ "vision_cfg": {
4
+ "image_size": 448,
5
+ "layers": 32,
6
+ "width": 4096,
7
+ "head_width": 128,
8
+ "mlp_ratio": 5,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-8b-14-plus-x",
11
+ "drop_path_rate": 0,
12
+ "qkv_bias": false,
13
+ "xattn": true,
14
+ "postnorm": false,
15
+ "fusedLN": false,
16
+ "use_rms_norm": true
17
+ },
18
+ "text_cfg": {
19
+ "context_length": 77,
20
+ "vocab_size": 49408,
21
+ "width": 1280,
22
+ "heads": 20,
23
+ "layers": 32,
24
+ "xattn": false,
25
+ "fusedLN": false
26
+ }
27
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 40,
6
+ "width": 1408,
7
+ "head_width": 88,
8
+ "mlp_ratio": 4.3637,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-g-14-x",
11
+ "drop_path_rate": 0.4,
12
+ "xattn": true,
13
+ "fusedLN": true
14
+ },
15
+ "text_cfg": {
16
+ "context_length": 77,
17
+ "vocab_size": 49408,
18
+ "width": 768,
19
+ "heads": 12,
20
+ "layers": 12,
21
+ "xattn": false,
22
+ "fusedLN": true
23
+ }
24
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14-336.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 768,
3
+ "vision_cfg": {
4
+ "image_size": 336,
5
+ "layers": 24,
6
+ "width": 1024,
7
+ "drop_path_rate": 0,
8
+ "head_width": 64,
9
+ "mlp_ratio": 2.6667,
10
+ "patch_size": 14,
11
+ "eva_model_name": "eva-clip-l-14-336",
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 768,
24
+ "heads": 12,
25
+ "layers": 12,
26
+ "xattn": false,
27
+ "fusedLN": true
28
+ }
29
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 64,
6
+ "width": 1792,
7
+ "head_width": 112,
8
+ "mlp_ratio": 8.571428571428571,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-4b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": true,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1024,
20
+ "heads": 16,
21
+ "layers": 24,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 448,
5
+ "layers": 77,
6
+ "width": 2304,
7
+ "head_width": 144,
8
+ "mlp_ratio": 10.9722,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-10b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": false,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1280,
20
+ "heads": 20,
21
+ "layers": 32,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 77,
6
+ "width": 2304,
7
+ "head_width": 144,
8
+ "mlp_ratio": 10.9722,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-10b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": false,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1280,
20
+ "heads": 20,
21
+ "layers": 32,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/openai.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ OpenAI pretrained model functions
2
+
3
+ Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+
6
+ import os
7
+ import warnings
8
+ from typing import List, Optional, Union
9
+
10
+ import torch
11
+
12
+ from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
13
+ from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
14
+
15
+ __all__ = ["list_openai_models", "load_openai_model"]
16
+
17
+
18
+ def list_openai_models() -> List[str]:
19
+ """Returns the names of available CLIP models"""
20
+ return list_pretrained_models_by_tag("openai")
21
+
22
+
23
+ def load_openai_model(
24
+ name: str,
25
+ precision: Optional[str] = None,
26
+ device: Optional[Union[str, torch.device]] = None,
27
+ jit: bool = True,
28
+ cache_dir: Optional[str] = None,
29
+ ):
30
+ """Load a CLIP model
31
+
32
+ Parameters
33
+ ----------
34
+ name : str
35
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
36
+ precision: str
37
+ Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
38
+ device : Union[str, torch.device]
39
+ The device to put the loaded model
40
+ jit : bool
41
+ Whether to load the optimized JIT model (default) or more hackable non-JIT model.
42
+ cache_dir : Optional[str]
43
+ The directory to cache the downloaded model weights
44
+
45
+ Returns
46
+ -------
47
+ model : torch.nn.Module
48
+ The CLIP model
49
+ preprocess : Callable[[PIL.Image], torch.Tensor]
50
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
51
+ """
52
+ if device is None:
53
+ device = "cuda" if torch.cuda.is_available() else "cpu"
54
+ if precision is None:
55
+ precision = "fp32" if device == "cpu" else "fp16"
56
+
57
+ if get_pretrained_url(name, "openai"):
58
+ model_path = download_pretrained_from_url(get_pretrained_url(name, "openai"), cache_dir=cache_dir)
59
+ elif os.path.isfile(name):
60
+ model_path = name
61
+ else:
62
+ raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
63
+
64
+ try:
65
+ # loading JIT archive
66
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
67
+ state_dict = None
68
+ except RuntimeError:
69
+ # loading saved state dict
70
+ if jit:
71
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
72
+ jit = False
73
+ state_dict = torch.load(model_path, map_location="cpu")
74
+
75
+ if not jit:
76
+ # Build a non-jit model from the OpenAI jitted model state dict
77
+ cast_dtype = get_cast_dtype(precision)
78
+ try:
79
+ model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
80
+ except KeyError:
81
+ sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
82
+ model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
83
+
84
+ # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
85
+ model = model.to(device)
86
+ if precision.startswith("amp") or precision == "fp32":
87
+ model.float()
88
+ elif precision == "bf16":
89
+ convert_weights_to_lp(model, dtype=torch.bfloat16)
90
+
91
+ return model
92
+
93
+ # patch the device names
94
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
95
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
96
+
97
+ def patch_device(module):
98
+ try:
99
+ graphs = [module.graph] if hasattr(module, "graph") else []
100
+ except RuntimeError:
101
+ graphs = []
102
+
103
+ if hasattr(module, "forward1"):
104
+ graphs.append(module.forward1.graph)
105
+
106
+ for graph in graphs:
107
+ for node in graph.findAllNodes("prim::Constant"):
108
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
109
+ node.copyAttributes(device_node)
110
+
111
+ model.apply(patch_device)
112
+ patch_device(model.encode_image)
113
+ patch_device(model.encode_text)
114
+
115
+ # patch dtype to float32 (typically for CPU)
116
+ if precision == "fp32":
117
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
118
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
119
+ float_node = float_input.node()
120
+
121
+ def patch_float(module):
122
+ try:
123
+ graphs = [module.graph] if hasattr(module, "graph") else []
124
+ except RuntimeError:
125
+ graphs = []
126
+
127
+ if hasattr(module, "forward1"):
128
+ graphs.append(module.forward1.graph)
129
+
130
+ for graph in graphs:
131
+ for node in graph.findAllNodes("aten::to"):
132
+ inputs = list(node.inputs())
133
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
134
+ if inputs[i].node()["value"] == 5:
135
+ inputs[i].node().copyAttributes(float_node)
136
+
137
+ model.apply(patch_float)
138
+ patch_float(model.encode_image)
139
+ patch_float(model.encode_text)
140
+ model.float()
141
+
142
+ # ensure image_size attr available at consistent location for both jit and non-jit
143
+ model.visual.image_size = model.input_resolution.item()
144
+ return model
VLMEvalKit-sudoku/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/timm_model.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ timm model adapter
2
+
3
+ Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
4
+ """
5
+
6
+ import logging
7
+ from collections import OrderedDict
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+
12
+ try:
13
+ import timm
14
+ from timm.models.layers import Mlp, to_2tuple
15
+
16
+ try:
17
+ # old timm imports < 0.8.1
18
+ from timm.models.layers.attention_pool2d import RotAttentionPool2d
19
+ from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
20
+ except ImportError:
21
+ # new timm imports >= 0.8.1
22
+ from timm.layers import RotAttentionPool2d
23
+ from timm.layers import AttentionPool2d as AbsAttentionPool2d
24
+ except ImportError:
25
+ timm = None
26
+
27
+ from .utils import freeze_batch_norm_2d
28
+
29
+
30
+ class TimmModel(nn.Module):
31
+ """timm model adapter
32
+ # FIXME this adapter is a work in progress, may change in ways that break weight compat
33
+ """
34
+
35
+ def __init__(self, model_name, embed_dim, image_size=224, pool="avg", proj="linear", proj_bias=False, drop=0.0, pretrained=False):
36
+ super().__init__()
37
+ if timm is None:
38
+ raise RuntimeError("Please `pip install timm` to use timm models.")
39
+
40
+ self.image_size = to_2tuple(image_size)
41
+ self.trunk = timm.create_model(model_name, pretrained=pretrained)
42
+ feat_size = self.trunk.default_cfg.get("pool_size", None)
43
+ feature_ndim = 1 if not feat_size else 2
44
+ if pool in ("abs_attn", "rot_attn"):
45
+ assert feature_ndim == 2
46
+ # if attn pooling used, remove both classifier and default pool
47
+ self.trunk.reset_classifier(0, global_pool="")
48
+ else:
49
+ # reset global pool if pool config set, otherwise leave as network default
50
+ reset_kwargs = dict(global_pool=pool) if pool else {}
51
+ self.trunk.reset_classifier(0, **reset_kwargs)
52
+ prev_chs = self.trunk.num_features
53
+
54
+ head_layers = OrderedDict()
55
+ if pool == "abs_attn":
56
+ head_layers["pool"] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
57
+ prev_chs = embed_dim
58
+ elif pool == "rot_attn":
59
+ head_layers["pool"] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
60
+ prev_chs = embed_dim
61
+ else:
62
+ assert proj, "projection layer needed if non-attention pooling is used."
63
+
64
+ # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
65
+ if proj == "linear":
66
+ head_layers["drop"] = nn.Dropout(drop)
67
+ head_layers["proj"] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
68
+ elif proj == "mlp":
69
+ head_layers["mlp"] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))
70
+
71
+ self.head = nn.Sequential(head_layers)
72
+
73
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
74
+ """lock modules
75
+ Args:
76
+ unlocked_groups (int): leave last n layer groups unlocked (default: 0)
77
+ """
78
+ if not unlocked_groups:
79
+ # lock full model
80
+ for param in self.trunk.parameters():
81
+ param.requires_grad = False
82
+ if freeze_bn_stats:
83
+ freeze_batch_norm_2d(self.trunk)
84
+ else:
85
+ # NOTE: partial freeze requires latest timm (master) branch and is subject to change
86
+ try:
87
+ # FIXME import here until API stable and in an official release
88
+ from timm.models.helpers import group_parameters, group_modules
89
+ except ImportError:
90
+ raise RuntimeError("Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`")
91
+ matcher = self.trunk.group_matcher()
92
+ gparams = group_parameters(self.trunk, matcher)
93
+ max_layer_id = max(gparams.keys())
94
+ max_layer_id = max_layer_id - unlocked_groups
95
+ for group_idx in range(max_layer_id + 1):
96
+ group = gparams[group_idx]
97
+ for param in group:
98
+ self.trunk.get_parameter(param).requires_grad = False
99
+ if freeze_bn_stats:
100
+ gmodules = group_modules(self.trunk, matcher, reverse=True)
101
+ gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
102
+ freeze_batch_norm_2d(self.trunk, gmodules)
103
+
104
+ @torch.jit.ignore
105
+ def set_grad_checkpointing(self, enable=True):
106
+ try:
107
+ self.trunk.set_grad_checkpointing(enable)
108
+ except Exception as e:
109
+ logging.warning("grad checkpointing not supported for this timm image tower, continuing without...")
110
+
111
+ def forward(self, x):
112
+ x = self.trunk(x)
113
+ x = self.head(x)
114
+ return x
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_clip_encoder.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .eva_clip_processors import EvaClipImageTrainProcessor
5
+ from .eva_vit import EVAEncoderWrapper
6
+ from .factory import list_models, add_model_config, get_model_config
7
+
8
+ from llava.utils import rank0_print
9
+
10
+
11
+ class EvaClipVisionTower(nn.Module):
12
+ def __init__(self, vision_tower, args, delay_load=False):
13
+ super().__init__()
14
+
15
+ self.is_loaded = False
16
+ self.vision_tower_name = vision_tower
17
+ self.vision_tower_pretrained = args.vision_tower_pretrained
18
+ self.config = get_model_config(vision_tower)
19
+
20
+ if not delay_load:
21
+ rank0_print(f"Loading EVA ViT: {self.vision_tower_name}")
22
+ self.load_model()
23
+ elif getattr(args, "unfreeze_mm_vision_tower", False):
24
+ # TODO: better detector is needed.
25
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
26
+ self.load_model()
27
+ elif hasattr(args, "mm_tunable_parts") and "mm_vision_tower" in args.mm_tunable_parts:
28
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
29
+ self.load_model()
30
+ else:
31
+ self.cfg_only = self.config
32
+
33
+ def load_model(self, device_map=None):
34
+ rank0_print(f"Pretrained: {self.vision_tower_pretrained}")
35
+ self.image_processor = EvaClipImageTrainProcessor(self.config["vision_cfg"]["image_size"])
36
+ self.vision_tower = EVAEncoderWrapper(self.vision_tower_pretrained, self.config)
37
+ rank0_print(f"Loaded image processor: {self.image_processor}")
38
+ self.vision_tower.requires_grad_(False)
39
+ self.is_loaded = True
40
+
41
+ def forward(self, images):
42
+ if type(images) is list:
43
+ image_features = []
44
+ for image in images:
45
+ image_feature = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0)).to(image.dtype)
46
+ image_features.append(image_feature)
47
+ else:
48
+ image_features = self.vision_tower(images.to(device=self.device, dtype=self.dtype)).to(images.dtype)
49
+
50
+ return image_features
51
+
52
+ @property
53
+ def dtype(self):
54
+ return self.vision_tower.dtype
55
+
56
+ @property
57
+ def device(self):
58
+ return self.vision_tower.device
59
+
60
+ @property
61
+ def hidden_size(self):
62
+ return self.config["vision_cfg"]["width"]
63
+
64
+ @property
65
+ def num_patches(self):
66
+ return (self.config["vision_cfg"]["image_size"] // self.config["vision_cfg"]["patch_size"]) ** 2
67
+
68
+ @property
69
+ def num_patches_per_side(self):
70
+ return self.config["vision_cfg"]["image_size"] // self.config["vision_cfg"]["patch_size"]
71
+
72
+ @property
73
+ def image_size(self):
74
+ return self.config["vision_cfg"]["image_size"]
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/eva_vit.py ADDED
@@ -0,0 +1,856 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ # Adapted from https://github.com/baaivision/EVA/tree/master/EVA-CLIP
3
+ """
4
+
5
+ from math import pi
6
+ import torch
7
+ from torch import nn
8
+ from einops import rearrange, repeat
9
+ import logging
10
+ from llava.utils import rank0_print
11
+
12
+
13
+ def broadcat(tensors, dim=-1):
14
+ num_tensors = len(tensors)
15
+ shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
16
+ assert len(shape_lens) == 1, "tensors must all have the same number of dimensions"
17
+ shape_len = list(shape_lens)[0]
18
+ dim = (dim + shape_len) if dim < 0 else dim
19
+ dims = list(zip(*map(lambda t: list(t.shape), tensors)))
20
+ expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
21
+ assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), "invalid dimensions for broadcastable concatentation"
22
+ max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
23
+ expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
24
+ expanded_dims.insert(dim, (dim, dims[dim]))
25
+ expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
26
+ tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
27
+ return torch.cat(tensors, dim=dim)
28
+
29
+
30
+ def rotate_half(x):
31
+ x = rearrange(x, "... (d r) -> ... d r", r=2)
32
+ x1, x2 = x.unbind(dim=-1)
33
+ x = torch.stack((-x2, x1), dim=-1)
34
+ return rearrange(x, "... d r -> ... (d r)")
35
+
36
+
37
+ class VisionRotaryEmbeddingFast(nn.Module):
38
+ def __init__(self, dim, pt_seq_len, ft_seq_len=None, custom_freqs=None, freqs_for="lang", theta=10000, max_freq=10, num_freqs=1, patch_dropout=0.0):
39
+ super().__init__()
40
+ if custom_freqs:
41
+ freqs = custom_freqs
42
+ elif freqs_for == "lang":
43
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
44
+ elif freqs_for == "pixel":
45
+ freqs = torch.linspace(1.0, max_freq / 2, dim // 2) * pi
46
+ elif freqs_for == "constant":
47
+ freqs = torch.ones(num_freqs).float()
48
+ else:
49
+ raise ValueError(f"unknown modality {freqs_for}")
50
+
51
+ if ft_seq_len is None:
52
+ ft_seq_len = pt_seq_len
53
+ t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
54
+
55
+ freqs = torch.einsum("..., f -> ... f", t, freqs)
56
+ freqs = repeat(freqs, "... n -> ... (n r)", r=2)
57
+ freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim=-1)
58
+
59
+ freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
60
+ freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
61
+
62
+ self.patch_dropout = patch_dropout
63
+
64
+ self.register_buffer("freqs_cos", freqs_cos)
65
+ self.register_buffer("freqs_sin", freqs_sin)
66
+
67
+ logging.info(f"Shape of rope freq: {self.freqs_cos.shape}")
68
+
69
+ def forward(self, t, patch_indices_keep=None):
70
+ if patch_indices_keep is not None:
71
+ batch = t.size()[0]
72
+ batch_indices = torch.arange(batch)
73
+ batch_indices = batch_indices[..., None]
74
+
75
+ freqs_cos = repeat(self.freqs_cos, "i j -> n i m j", n=t.shape[0], m=t.shape[1])
76
+ freqs_sin = repeat(self.freqs_sin, "i j -> n i m j", n=t.shape[0], m=t.shape[1])
77
+
78
+ freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
79
+ freqs_cos = rearrange(freqs_cos, "n i m j -> n m i j")
80
+ freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
81
+ freqs_sin = rearrange(freqs_sin, "n i m j -> n m i j")
82
+
83
+ return t * freqs_cos + rotate_half(t) * freqs_sin
84
+
85
+ return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
86
+
87
+
88
+ class LayerNorm(nn.LayerNorm):
89
+ """Subclass torch's LayerNorm (with cast back to input dtype)."""
90
+
91
+ def forward(self, x: torch.Tensor):
92
+ orig_type = x.dtype
93
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
94
+ return x.to(orig_type)
95
+
96
+
97
+ class PatchDropout(nn.Module):
98
+ """
99
+ https://arxiv.org/abs/2212.00794
100
+ """
101
+
102
+ def __init__(self, prob, exclude_first_token=True):
103
+ super().__init__()
104
+ assert 0 <= prob < 1.0
105
+ self.prob = prob
106
+ self.exclude_first_token = exclude_first_token # exclude CLS token
107
+ logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
108
+
109
+ def forward(self, x):
110
+ if not self.training or self.prob == 0.0:
111
+ return x
112
+
113
+ if self.exclude_first_token:
114
+ cls_tokens, x = x[:, :1], x[:, 1:]
115
+ else:
116
+ cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
117
+
118
+ batch = x.size()[0]
119
+ num_tokens = x.size()[1]
120
+
121
+ batch_indices = torch.arange(batch)
122
+ batch_indices = batch_indices[..., None]
123
+
124
+ keep_prob = 1 - self.prob
125
+ num_patches_keep = max(1, int(num_tokens * keep_prob))
126
+
127
+ rand = torch.randn(batch, num_tokens)
128
+ patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
129
+
130
+ x = x[batch_indices, patch_indices_keep]
131
+
132
+ if self.exclude_first_token:
133
+ x = torch.cat((cls_tokens, x), dim=1)
134
+
135
+ if self.training and os.getenv("RoPE") == "1":
136
+ return x, patch_indices_keep
137
+
138
+ return x
139
+
140
+
141
+ # --------------------------------------------------------
142
+ # Adapted from https://github.com/microsoft/unilm/tree/master/beit
143
+ # --------------------------------------------------------
144
+ import math
145
+ import os
146
+ import torch.nn as nn
147
+ import torch.nn.functional as F
148
+
149
+ try:
150
+ from timm.models.layers import drop_path, to_2tuple, trunc_normal_
151
+ except:
152
+ from timm.layers import drop_path, to_2tuple, trunc_normal_
153
+
154
+ if os.getenv("ENV_TYPE") == "deepspeed":
155
+ try:
156
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
157
+ except:
158
+ from torch.utils.checkpoint import checkpoint
159
+ else:
160
+ from torch.utils.checkpoint import checkpoint
161
+
162
+ try:
163
+ import xformers.ops as xops
164
+ except ImportError:
165
+ xops = None
166
+ # print("Please 'pip install xformers'")
167
+
168
+
169
+ class DropPath(nn.Module):
170
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
171
+
172
+ def __init__(self, drop_prob=None):
173
+ super(DropPath, self).__init__()
174
+ self.drop_prob = drop_prob
175
+
176
+ def forward(self, x):
177
+ return drop_path(x, self.drop_prob, self.training)
178
+
179
+ def extra_repr(self) -> str:
180
+ return "p={}".format(self.drop_prob)
181
+
182
+
183
+ class Mlp(nn.Module):
184
+ def __init__(
185
+ self,
186
+ in_features,
187
+ hidden_features=None,
188
+ out_features=None,
189
+ act_layer=nn.GELU,
190
+ norm_layer=nn.LayerNorm,
191
+ drop=0.0,
192
+ subln=False,
193
+ ):
194
+ super().__init__()
195
+ out_features = out_features or in_features
196
+ hidden_features = hidden_features or in_features
197
+ self.fc1 = nn.Linear(in_features, hidden_features)
198
+ self.act = act_layer()
199
+
200
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
201
+
202
+ self.fc2 = nn.Linear(hidden_features, out_features)
203
+ self.drop = nn.Dropout(drop)
204
+
205
+ def forward(self, x):
206
+ x = self.fc1(x)
207
+ x = self.act(x)
208
+ # x = self.drop(x)
209
+ # commit this for the orignal BERT implement
210
+ x = self.ffn_ln(x)
211
+
212
+ x = self.fc2(x)
213
+ x = self.drop(x)
214
+ return x
215
+
216
+
217
+ class SwiGLU(nn.Module):
218
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.0, norm_layer=nn.LayerNorm, subln=False):
219
+ super().__init__()
220
+ out_features = out_features or in_features
221
+ hidden_features = hidden_features or in_features
222
+
223
+ self.w1 = nn.Linear(in_features, hidden_features)
224
+ self.w2 = nn.Linear(in_features, hidden_features)
225
+
226
+ self.act = act_layer()
227
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
228
+ self.w3 = nn.Linear(hidden_features, out_features)
229
+
230
+ self.drop = nn.Dropout(drop)
231
+
232
+ def forward(self, x):
233
+ x1 = self.w1(x)
234
+ x2 = self.w2(x)
235
+ hidden = self.act(x1) * x2
236
+ x = self.ffn_ln(hidden)
237
+ x = self.w3(x)
238
+ x = self.drop(x)
239
+ return x
240
+
241
+
242
+ class Attention(nn.Module):
243
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
244
+ super().__init__()
245
+ self.num_heads = num_heads
246
+ head_dim = dim // num_heads
247
+ if attn_head_dim is not None:
248
+ head_dim = attn_head_dim
249
+ all_head_dim = head_dim * self.num_heads
250
+ self.scale = qk_scale or head_dim**-0.5
251
+
252
+ self.subln = subln
253
+ if self.subln:
254
+ self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
255
+ self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
256
+ self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
257
+ else:
258
+ self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
259
+
260
+ if qkv_bias:
261
+ self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
262
+ self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
263
+ else:
264
+ self.q_bias = None
265
+ self.v_bias = None
266
+
267
+ if window_size:
268
+ self.window_size = window_size
269
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
270
+ self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
271
+ # cls to token & token 2 cls & cls to cls
272
+
273
+ # get pair-wise relative position index for each token inside the window
274
+ coords_h = torch.arange(window_size[0])
275
+ coords_w = torch.arange(window_size[1])
276
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
277
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
278
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
279
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
280
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
281
+ relative_coords[:, :, 1] += window_size[1] - 1
282
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
283
+ relative_position_index = torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
284
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
285
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
286
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
287
+ relative_position_index[0, 0] = self.num_relative_distance - 1
288
+
289
+ self.register_buffer("relative_position_index", relative_position_index)
290
+ else:
291
+ self.window_size = None
292
+ self.relative_position_bias_table = None
293
+ self.relative_position_index = None
294
+
295
+ self.attn_drop = nn.Dropout(attn_drop)
296
+ self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
297
+ # self.proj = nn.Linear(all_head_dim, all_head_dim)
298
+ self.proj = nn.Linear(all_head_dim, dim)
299
+ self.proj_drop = nn.Dropout(proj_drop)
300
+ self.xattn = xattn
301
+ self.xattn_drop = attn_drop
302
+
303
+ self.rope = rope
304
+
305
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
306
+ B, N, C = x.shape
307
+ if self.subln:
308
+ q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
309
+ k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
310
+ v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
311
+
312
+ q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
313
+ k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
314
+ v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
315
+ else:
316
+
317
+ qkv_bias = None
318
+ if self.q_bias is not None:
319
+ qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
320
+
321
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
322
+ qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
323
+ q, k, v = qkv[0], qkv[1], qkv[2]
324
+
325
+ if self.rope:
326
+ # slightly fast impl
327
+ q_t = q[:, :, 1:, :]
328
+ ro_q_t = self.rope(q_t)
329
+ q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
330
+
331
+ k_t = k[:, :, 1:, :]
332
+ ro_k_t = self.rope(k_t)
333
+ k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
334
+
335
+ if self.xattn and xops is not None:
336
+ q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
337
+ k = k.permute(0, 2, 1, 3)
338
+ v = v.permute(0, 2, 1, 3)
339
+
340
+ x = xops.memory_efficient_attention(
341
+ q,
342
+ k,
343
+ v,
344
+ p=self.xattn_drop,
345
+ scale=self.scale,
346
+ )
347
+ x = x.reshape(B, N, -1)
348
+ x = self.inner_attn_ln(x)
349
+ x = self.proj(x)
350
+ x = self.proj_drop(x)
351
+ else:
352
+ q = q * self.scale
353
+ attn = q @ k.transpose(-2, -1)
354
+
355
+ if self.relative_position_bias_table is not None:
356
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
357
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
358
+ attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
359
+
360
+ if rel_pos_bias is not None:
361
+ attn = attn + rel_pos_bias.type_as(attn)
362
+
363
+ if attn_mask is not None:
364
+ attn_mask = attn_mask.bool()
365
+ attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
366
+
367
+ attn = attn.softmax(dim=-1)
368
+ attn = self.attn_drop(attn)
369
+
370
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
371
+ x = self.inner_attn_ln(x)
372
+ x = self.proj(x)
373
+ x = self.proj_drop(x)
374
+ return x
375
+
376
+
377
+ class Block(nn.Module):
378
+
379
+ def __init__(
380
+ self,
381
+ dim,
382
+ num_heads,
383
+ mlp_ratio=4.0,
384
+ qkv_bias=False,
385
+ qk_scale=None,
386
+ drop=0.0,
387
+ attn_drop=0.0,
388
+ drop_path=0.0,
389
+ init_values=None,
390
+ act_layer=nn.GELU,
391
+ norm_layer=nn.LayerNorm,
392
+ window_size=None,
393
+ attn_head_dim=None,
394
+ xattn=False,
395
+ rope=None,
396
+ postnorm=False,
397
+ subln=False,
398
+ naiveswiglu=False,
399
+ ):
400
+ super().__init__()
401
+ self.norm1 = norm_layer(dim)
402
+ self.attn = Attention(
403
+ dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer
404
+ )
405
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
406
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
407
+ self.norm2 = norm_layer(dim)
408
+ mlp_hidden_dim = int(dim * mlp_ratio)
409
+
410
+ if naiveswiglu:
411
+ self.mlp = SwiGLU(
412
+ in_features=dim,
413
+ hidden_features=mlp_hidden_dim,
414
+ subln=subln,
415
+ norm_layer=norm_layer,
416
+ )
417
+ else:
418
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, subln=subln, drop=drop)
419
+
420
+ if init_values is not None and init_values > 0:
421
+ self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
422
+ self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
423
+ else:
424
+ self.gamma_1, self.gamma_2 = None, None
425
+
426
+ self.postnorm = postnorm
427
+
428
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
429
+ if self.gamma_1 is None:
430
+ if self.postnorm:
431
+ x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
432
+ x = x + self.drop_path(self.norm2(self.mlp(x)))
433
+ else:
434
+ x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
435
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
436
+ else:
437
+ if self.postnorm:
438
+ x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
439
+ x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
440
+ else:
441
+ x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
442
+ x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
443
+ return x
444
+
445
+
446
+ class PatchEmbed(nn.Module):
447
+ """Image to Patch Embedding"""
448
+
449
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
450
+ super().__init__()
451
+ img_size = to_2tuple(img_size)
452
+ patch_size = to_2tuple(patch_size)
453
+ num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
454
+ self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
455
+ self.img_size = img_size
456
+ self.patch_size = patch_size
457
+ self.num_patches = num_patches
458
+
459
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
460
+
461
+ def forward(self, x, **kwargs):
462
+ B, C, H, W = x.shape
463
+ # FIXME look at relaxing size constraints
464
+ assert H == self.img_size[0] and W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
465
+ x = self.proj(x).flatten(2).transpose(1, 2)
466
+ return x
467
+
468
+
469
+ class RelativePositionBias(nn.Module):
470
+
471
+ def __init__(self, window_size, num_heads):
472
+ super().__init__()
473
+ self.window_size = window_size
474
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
475
+ self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
476
+ # cls to token & token 2 cls & cls to cls
477
+
478
+ # get pair-wise relative position index for each token inside the window
479
+ coords_h = torch.arange(window_size[0])
480
+ coords_w = torch.arange(window_size[1])
481
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
482
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
483
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
484
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
485
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
486
+ relative_coords[:, :, 1] += window_size[1] - 1
487
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
488
+ relative_position_index = torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
489
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
490
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
491
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
492
+ relative_position_index[0, 0] = self.num_relative_distance - 1
493
+
494
+ self.register_buffer("relative_position_index", relative_position_index)
495
+
496
+ def forward(self):
497
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
498
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
499
+
500
+
501
+ class EVAVisionTransformer(nn.Module):
502
+ """Vision Transformer with support for patch or hybrid CNN input stage"""
503
+
504
+ def __init__(
505
+ self,
506
+ img_size=224,
507
+ patch_size=16,
508
+ in_chans=3,
509
+ num_classes=1000,
510
+ embed_dim=768,
511
+ depth=12,
512
+ num_heads=12,
513
+ mlp_ratio=4.0,
514
+ qkv_bias=False,
515
+ qk_scale=None,
516
+ drop_rate=0.0,
517
+ attn_drop_rate=0.0,
518
+ drop_path_rate=0.0,
519
+ norm_layer=nn.LayerNorm,
520
+ init_values=None,
521
+ patch_dropout=0.0,
522
+ use_abs_pos_emb=True,
523
+ use_rel_pos_bias=False,
524
+ use_shared_rel_pos_bias=False,
525
+ rope=False,
526
+ use_mean_pooling=True,
527
+ init_scale=0.001,
528
+ grad_checkpointing=False,
529
+ xattn=False,
530
+ postnorm=False,
531
+ pt_hw_seq_len=16,
532
+ intp_freq=False,
533
+ naiveswiglu=False,
534
+ subln=False,
535
+ ):
536
+ super().__init__()
537
+ self.image_size = img_size
538
+ self.num_classes = num_classes
539
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
540
+
541
+ self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
542
+ num_patches = self.patch_embed.num_patches
543
+
544
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
545
+ # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
546
+ if use_abs_pos_emb:
547
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
548
+ else:
549
+ self.pos_embed = None
550
+ self.pos_drop = nn.Dropout(p=drop_rate)
551
+
552
+ if use_shared_rel_pos_bias:
553
+ self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
554
+ else:
555
+ self.rel_pos_bias = None
556
+
557
+ if rope:
558
+ half_head_dim = embed_dim // num_heads // 2
559
+ hw_seq_len = img_size // patch_size
560
+ self.rope = VisionRotaryEmbeddingFast(
561
+ dim=half_head_dim,
562
+ pt_seq_len=pt_hw_seq_len,
563
+ ft_seq_len=hw_seq_len if intp_freq else None,
564
+ # patch_dropout=patch_dropout
565
+ )
566
+ else:
567
+ self.rope = None
568
+
569
+ self.naiveswiglu = naiveswiglu
570
+
571
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
572
+ self.use_rel_pos_bias = use_rel_pos_bias
573
+ self.blocks = nn.ModuleList(
574
+ [
575
+ Block(
576
+ dim=embed_dim,
577
+ num_heads=num_heads,
578
+ mlp_ratio=mlp_ratio,
579
+ qkv_bias=qkv_bias,
580
+ qk_scale=qk_scale,
581
+ drop=drop_rate,
582
+ attn_drop=attn_drop_rate,
583
+ drop_path=dpr[i],
584
+ norm_layer=norm_layer,
585
+ init_values=init_values,
586
+ window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
587
+ xattn=xattn,
588
+ rope=self.rope,
589
+ postnorm=postnorm,
590
+ subln=subln,
591
+ naiveswiglu=naiveswiglu,
592
+ )
593
+ for i in range(depth)
594
+ ]
595
+ )
596
+ self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
597
+ self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
598
+ self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
599
+
600
+ if self.pos_embed is not None:
601
+ trunc_normal_(self.pos_embed, std=0.02)
602
+
603
+ trunc_normal_(self.cls_token, std=0.02)
604
+ # trunc_normal_(self.mask_token, std=.02)
605
+
606
+ self.apply(self._init_weights)
607
+ self.fix_init_weight()
608
+
609
+ if isinstance(self.head, nn.Linear):
610
+ trunc_normal_(self.head.weight, std=0.02)
611
+ self.head.weight.data.mul_(init_scale)
612
+ self.head.bias.data.mul_(init_scale)
613
+
614
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
615
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0.0 else nn.Identity()
616
+
617
+ self.grad_checkpointing = grad_checkpointing
618
+
619
+ def fix_init_weight(self):
620
+ def rescale(param, layer_id):
621
+ param.div_(math.sqrt(2.0 * layer_id))
622
+
623
+ for layer_id, layer in enumerate(self.blocks):
624
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
625
+ if self.naiveswiglu:
626
+ rescale(layer.mlp.w3.weight.data, layer_id + 1)
627
+ else:
628
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
629
+
630
+ def get_cast_dtype(self) -> torch.dtype:
631
+ return self.blocks[0].mlp.fc2.weight.dtype
632
+
633
+ def _init_weights(self, m):
634
+ if isinstance(m, nn.Linear):
635
+ trunc_normal_(m.weight, std=0.02)
636
+ if m.bias is not None:
637
+ nn.init.constant_(m.bias, 0)
638
+ elif isinstance(m, nn.LayerNorm):
639
+ nn.init.constant_(m.bias, 0)
640
+ nn.init.constant_(m.weight, 1.0)
641
+
642
+ def get_num_layers(self):
643
+ return len(self.blocks)
644
+
645
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
646
+ assert unlocked_groups == 0, "partial locking not currently supported for this model"
647
+ for param in self.parameters():
648
+ param.requires_grad = False
649
+
650
+ @torch.jit.ignore
651
+ def set_grad_checkpointing(self, enable=True):
652
+ self.grad_checkpointing = enable
653
+
654
+ @torch.jit.ignore
655
+ def no_weight_decay(self):
656
+ return {"pos_embed", "cls_token"}
657
+
658
+ def get_classifier(self):
659
+ return self.head
660
+
661
+ def reset_classifier(self, num_classes, global_pool=""):
662
+ self.num_classes = num_classes
663
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
664
+
665
+ def forward_features(self, x, return_all_features=False):
666
+
667
+ x = self.patch_embed(x)
668
+ batch_size, seq_len, _ = x.size()
669
+
670
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
671
+ x = torch.cat((cls_tokens, x), dim=1)
672
+ if self.pos_embed is not None:
673
+ x = x + self.pos_embed
674
+ x = self.pos_drop(x)
675
+
676
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
677
+ if os.getenv("RoPE") == "1":
678
+ if self.training and not isinstance(self.patch_dropout, nn.Identity):
679
+ x, patch_indices_keep = self.patch_dropout(x)
680
+ # Directly pass patch_indices_keep to self.rope.forward
681
+ x = self.rope.forward(x, patch_indices_keep=patch_indices_keep)
682
+ else:
683
+ # Pass None or omit the patch_indices_keep argument for default behavior
684
+ x = self.rope.forward(x, patch_indices_keep=None)
685
+ x = self.patch_dropout(x)
686
+ else:
687
+ x = self.patch_dropout(x)
688
+
689
+ rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
690
+ for i, blk in enumerate(self.blocks):
691
+ if i == len(self.blocks) - 1:
692
+ continue
693
+ if self.grad_checkpointing:
694
+ x = checkpoint(blk, x, (rel_pos_bias,))
695
+ else:
696
+ x = blk(x, rel_pos_bias=rel_pos_bias)
697
+
698
+ if not return_all_features:
699
+ x = self.norm(x)
700
+ if self.fc_norm is not None:
701
+ return self.fc_norm(x.mean(1))
702
+ else:
703
+ return x[:, 0]
704
+ return x
705
+
706
+ def forward(self, x, return_all_features=False):
707
+ if return_all_features:
708
+ return self.forward_features(x, return_all_features)
709
+ x = self.forward_features(x)
710
+ x = self.head(x)
711
+ return x
712
+
713
+
714
+ def load_state_dict(checkpoint_path: str, map_location: str = "cpu", model_key: str = "model|module|state_dict", is_openai: bool = False, skip_list: list = []):
715
+ if is_openai:
716
+ model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
717
+ state_dict = model.state_dict()
718
+ for key in ["input_resolution", "context_length", "vocab_size"]:
719
+ state_dict.pop(key, None)
720
+ else:
721
+ checkpoint = torch.load(checkpoint_path, map_location=map_location)
722
+ for mk in model_key.split("|"):
723
+ if isinstance(checkpoint, dict) and mk in checkpoint:
724
+ state_dict = checkpoint[mk]
725
+ break
726
+ else:
727
+ state_dict = checkpoint
728
+ if next(iter(state_dict.items()))[0].startswith("module"):
729
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
730
+
731
+ for k in skip_list:
732
+ if k in list(state_dict.keys()):
733
+ logging.info(f"Removing key {k} from pretrained checkpoint")
734
+ del state_dict[k]
735
+
736
+ if os.getenv("RoPE") == "1":
737
+ for k in list(state_dict.keys()):
738
+ if "freqs_cos" in k or "freqs_sin" in k:
739
+ del state_dict[k]
740
+ return state_dict
741
+
742
+
743
+ def load_clip_visual_state_dict(checkpoint_path: str, map_location: str = "cpu", is_openai: bool = False, skip_list: list = []):
744
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
745
+ # for k in list(state_dict.keys()):
746
+ # if not k.startswith("visual."):
747
+ # del state_dict[k]
748
+ # for k in list(state_dict.keys()):
749
+ # if k.startswith("visual."):
750
+ # new_k = k[7:]
751
+ # state_dict[new_k] = state_dict[k]
752
+ # del state_dict[k]
753
+ return state_dict
754
+
755
+
756
+ from dataclasses import dataclass
757
+ from typing import Optional, Tuple, Union
758
+
759
+ try:
760
+ from apex.normalization import FusedLayerNorm
761
+ except:
762
+ FusedLayerNorm = LayerNorm
763
+ # print("Please build and install Nvidia apex package with option '--cuda_ext' according to https://github.com/NVIDIA/apex#from-source .")
764
+
765
+
766
+ @dataclass
767
+ class CLIPVisionCfg:
768
+ layers: Union[Tuple[int, int, int, int], int] = 12
769
+ width: int = 768
770
+ head_width: int = 64
771
+ mlp_ratio: float = 4.0
772
+ patch_size: int = 16
773
+ image_size: Union[Tuple[int, int], int] = 224
774
+ ls_init_value: Optional[float] = None # layer scale initial value
775
+ patch_dropout: float = 0.0 # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
776
+ global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
777
+ drop_path_rate: Optional[float] = None # drop path rate
778
+ timm_model_name: str = None # a valid model name overrides layers, width, patch_size
779
+ timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
780
+ timm_pool: str = "avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
781
+ timm_proj: str = "linear" # linear projection for timm model output ('linear', 'mlp', '')
782
+ timm_proj_bias: bool = False # enable bias final projection
783
+ eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
784
+ qkv_bias: bool = True
785
+ fusedLN: bool = False
786
+ xattn: bool = False
787
+ postnorm: bool = False
788
+ rope: bool = False
789
+ pt_hw_seq_len: int = 16 # 224/14
790
+ intp_freq: bool = False
791
+ naiveswiglu: bool = False
792
+ subln: bool = False
793
+
794
+
795
+ def create_norm_layer_factory(use_fused_ln, eps=1e-6):
796
+ # Otherwise, use the standard LayerNorm
797
+ return lambda num_features: nn.LayerNorm(num_features, eps=eps)
798
+
799
+
800
+ def _build_vision_tower(vision_tower_path: str, embed_dim: int, vision_cfg: CLIPVisionCfg, **kwargs):
801
+ if isinstance(vision_cfg, dict):
802
+ vision_cfg = CLIPVisionCfg(**vision_cfg)
803
+
804
+ if vision_cfg.eva_model_name:
805
+ vision_heads = vision_cfg.width // vision_cfg.head_width
806
+ # Determine the appropriate norm layer factory based on the configuration
807
+ norm_layer_factory = create_norm_layer_factory(vision_cfg.fusedLN, eps=1e-6)
808
+
809
+ visual = EVAVisionTransformer(
810
+ img_size=vision_cfg.image_size,
811
+ patch_size=vision_cfg.patch_size,
812
+ num_classes=embed_dim,
813
+ use_mean_pooling=vision_cfg.global_average_pool, # False
814
+ init_values=vision_cfg.ls_init_value,
815
+ patch_dropout=vision_cfg.patch_dropout,
816
+ embed_dim=vision_cfg.width,
817
+ depth=vision_cfg.layers,
818
+ num_heads=vision_heads,
819
+ mlp_ratio=vision_cfg.mlp_ratio,
820
+ qkv_bias=vision_cfg.qkv_bias,
821
+ drop_path_rate=vision_cfg.drop_path_rate,
822
+ norm_layer=norm_layer_factory,
823
+ xattn=vision_cfg.xattn,
824
+ rope=vision_cfg.rope,
825
+ postnorm=vision_cfg.postnorm,
826
+ pt_hw_seq_len=vision_cfg.pt_hw_seq_len, # 224/14
827
+ intp_freq=vision_cfg.intp_freq,
828
+ naiveswiglu=vision_cfg.naiveswiglu,
829
+ subln=vision_cfg.subln,
830
+ )
831
+
832
+ state_dict = load_clip_visual_state_dict(vision_tower_path)
833
+ incompatible_keys = visual.load_state_dict(state_dict, strict=False)
834
+ rank0_print("EVA-CLIP incompatible_keys:", incompatible_keys)
835
+
836
+ return visual
837
+
838
+
839
+ class EVAEncoderWrapper(nn.Module):
840
+ def __init__(self, vision_tower_pretrained, config):
841
+ super(EVAEncoderWrapper, self).__init__()
842
+ self.config = config
843
+ self.config["vision_tower_path"] = vision_tower_pretrained
844
+ self.model = _build_vision_tower(**self.config)
845
+
846
+ def forward(self, image, **kwargs):
847
+ encode = self.model(image, return_all_features=True)[:, 1:, :] # remove the CLS token
848
+ return encode
849
+
850
+ @property
851
+ def dtype(self):
852
+ return list(self.parameters())[-1].dtype
853
+
854
+ @property
855
+ def device(self):
856
+ return list(self.parameters())[-1].device
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-18B.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1536,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 48,
6
+ "width": 5120,
7
+ "head_width": 128,
8
+ "mlp_ratio": 5,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-18b-14-x",
11
+ "drop_path_rate": 0,
12
+ "qkv_bias": false,
13
+ "xattn": true,
14
+ "postnorm": true,
15
+ "fusedLN": false,
16
+ "use_rms_norm": true
17
+ },
18
+ "text_cfg": {
19
+ "context_length": 77,
20
+ "vocab_size": 49408,
21
+ "width": 1280,
22
+ "heads": 20,
23
+ "layers": 32,
24
+ "xattn": false,
25
+ "fusedLN": false
26
+ }
27
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-B-16.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 512,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 12,
6
+ "width": 768,
7
+ "head_width": 64,
8
+ "patch_size": 16,
9
+ "mlp_ratio": 2.6667,
10
+ "eva_model_name": "eva-clip-b-16-X",
11
+ "drop_path_rate": 0.0,
12
+ "xattn": true,
13
+ "fusedLN": true,
14
+ "rope": true,
15
+ "pt_hw_seq_len": 16,
16
+ "intp_freq": true,
17
+ "naiveswiglu": true,
18
+ "subln": true
19
+ },
20
+ "text_cfg": {
21
+ "context_length": 77,
22
+ "vocab_size": 49408,
23
+ "width": 512,
24
+ "heads": 8,
25
+ "layers": 12,
26
+ "xattn": true,
27
+ "fusedLN": true
28
+ }
29
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 64,
6
+ "width": 1792,
7
+ "head_width": 112,
8
+ "mlp_ratio": 8.571428571428571,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-4b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": true,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1024,
20
+ "heads": 16,
21
+ "layers": 24,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "vision_cfg": {
4
+ "image_size": 224,
5
+ "layers": 77,
6
+ "width": 2304,
7
+ "head_width": 144,
8
+ "mlp_ratio": 10.9722,
9
+ "patch_size": 14,
10
+ "eva_model_name": "eva-clip-10b-14-x",
11
+ "drop_path_rate": 0,
12
+ "xattn": true,
13
+ "postnorm": false,
14
+ "fusedLN": true
15
+ },
16
+ "text_cfg": {
17
+ "context_length": 77,
18
+ "vocab_size": 49408,
19
+ "width": 1280,
20
+ "heads": 20,
21
+ "layers": 32,
22
+ "xattn": false,
23
+ "fusedLN": true
24
+ }
25
+ }
VLMEvalKit-sudoku/llava/model/multimodal_encoder/hf_vision.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from transformers import AutoModel, AutoImageProcessor, AutoConfig, CLIPImageProcessor
5
+ from llava.utils import rank0_print
6
+
7
+
8
+ class HFVisionTower(nn.Module):
9
+ def __init__(self, vision_tower, args, delay_load=False):
10
+ super().__init__()
11
+
12
+ self.is_loaded = False
13
+
14
+ self.vision_tower_name = vision_tower.replace("hf:", "", 1)
15
+ self.select_layer = args.mm_vision_select_layer
16
+ self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
17
+
18
+ if not delay_load:
19
+ self.load_model()
20
+ else:
21
+ self.cfg_only = AutoConfig.from_pretrained(self.vision_tower_name)
22
+
23
+ def load_model(self):
24
+ try:
25
+ self.image_processor = AutoImageProcessor.from_pretrained(self.vision_tower_name)
26
+ except Exception as e:
27
+ if "448" in self.vision_tower_name:
28
+ image_size = 448
29
+ # use image processor with conig
30
+ self.image_processor = CLIPImageProcessor(size={"shortest_edge": image_size}, do_center_crop=True, crop_size=image_size)
31
+ else:
32
+ self.image_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
33
+ rank0_print(f"Loaded image processor: {self.image_processor}")
34
+ self.vision_tower = AutoModel.from_pretrained(self.vision_tower_name, torch_dtype=torch.bfloat16, trust_remote_code=True).to("cuda")
35
+ self.device = self.vision_tower.device
36
+ self.dtype = self.vision_tower.dtype
37
+ self.config = self.vision_tower.config
38
+
39
+ if hasattr(self.vision_tower, "vision_model"):
40
+ self.vision_tower = self.vision_tower.vision_model
41
+ self.vision_tower.requires_grad_(False)
42
+ # self.vision_tower.eval()
43
+ self.is_loaded = True
44
+
45
+ def feature_select(self, image_forward_outs):
46
+ select_feature_type = self.select_feature
47
+
48
+ if self.select_feature in ["slicefour_patch", "slicefour_cls_patch"]:
49
+ select_every_k_layer = len(image_forward_outs.hidden_states) // 4
50
+ image_features = torch.cat([image_forward_outs.hidden_states[i] for i in range(select_every_k_layer + self.select_layer, len(image_forward_outs.hidden_states), select_every_k_layer)], dim=-1)
51
+ select_feature_type = select_feature_type.replace("slicefour_", "")
52
+ else:
53
+ image_features = image_forward_outs.hidden_states[self.select_layer]
54
+
55
+ if select_feature_type == "patch":
56
+ image_features = image_features[:, 1:]
57
+ elif select_feature_type == "cls_patch":
58
+ image_features = image_features
59
+ else:
60
+ raise ValueError(f"Unexpected select feature: {select_feature_type}")
61
+ return image_features
62
+
63
+ def forward(self, images):
64
+ if type(images) is list:
65
+ image_features = []
66
+ for image in images:
67
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
68
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
69
+ image_features.append(image_feature)
70
+ else:
71
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
72
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
73
+
74
+ return image_features
75
+
76
+ @property
77
+ def dummy_feature(self):
78
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
79
+
80
+ # @property
81
+ # def dtype(self):
82
+ # return self.vision_tower.dtype
83
+
84
+ # @property
85
+ # def device(self):
86
+ # return self.vision_tower.device
87
+
88
+ @property
89
+ def hidden_size(self):
90
+ try:
91
+ _hidden_size = self.config.hidden_size
92
+ except:
93
+ _hidden_size = self.config.vision_config.hidden_size
94
+ if "slicefour" in self.select_feature:
95
+ _hidden_size *= 4
96
+ return _hidden_size
97
+
98
+ @property
99
+ def num_patches(self):
100
+ _num_patches = (self.config.image_size // self.config.patch_size) ** 2
101
+ if "cls_patch" in self.select_feature:
102
+ _num_patches += 1
103
+ return _num_patches
104
+
105
+ @property
106
+ def num_patches_per_side(self):
107
+ return self.config.image_size // self.config.patch_size
108
+
109
+ @property
110
+ def image_size(self):
111
+ return self.config.image_size
VLMEvalKit-sudoku/llava/model/multimodal_encoder/hubconf.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # hubconf.py
2
+ import torch
3
+ from featup.featurizers.util import get_featurizer
4
+ from featup.layers import ChannelNorm
5
+ from featup.upsamplers import get_upsampler
6
+ from torch.nn import Module
7
+ import torch
8
+ from torch.multiprocessing import Pool, set_start_method
9
+ from functools import partial
10
+ import torch.nn.functional as F
11
+
12
+ dependencies = ['torch', 'torchvision', 'PIL', 'featup'] # List any dependencies here
13
+
14
+
15
+ class UpsampledBackbone(Module):
16
+
17
+ def __init__(self, model_name, use_norm, scale):
18
+ super().__init__()
19
+ model, patch_size, self.dim = get_featurizer(model_name, "token", num_classes=1000)
20
+ if use_norm:
21
+ self.model = torch.nn.Sequential(model, ChannelNorm(self.dim))
22
+ else:
23
+ self.model = model
24
+
25
+ if scale == '2x':
26
+ self.upsampler = get_upsampler("jbu_2x_stack", self.dim)
27
+ elif scale == '4x':
28
+ self.upsampler = get_upsampler("jbu_4x_stack", self.dim)
29
+ elif scale == '8x':
30
+ self.upsampler = get_upsampler("jbu_8x_stack", self.dim)
31
+ else:
32
+ self.upsampler = get_upsampler("jbu_stack", self.dim)
33
+
34
+ def forward(self, image):
35
+ return self.upsampler(self.model(image), image)
36
+
37
+ def forward_with_internal_features(self, image, lowres):
38
+ if type(lowres) == list:
39
+ #return self.forward_with_features_list(image, lowres)
40
+ features_2x = []
41
+ features_4x = []
42
+ features_8x = []
43
+ for i in range(len(lowres)):
44
+ #lowres_norm = self.model[1](lowres[i])
45
+ res = self.upsampler.forward_with_internal_features(lowres[i], image[i].unsqueeze(0))
46
+ features_2x.append(res['feat2x'])
47
+ if 'feat4x' in res:
48
+ features_4x.append(res['feat4x'])
49
+ if 'feat8x' in res:
50
+ features_8x.append(res['feat8x'])
51
+
52
+ return features_2x, features_4x, features_8x
53
+ else:
54
+ feat2x = None
55
+ feat4x = None
56
+ feat8x = None
57
+ res = self.upsampler.forward_with_internal_features(lowres, image)
58
+ if 'feat2x' in res:
59
+ feat2x = res['feat2x']
60
+ if 'feat4x' in res:
61
+ feat4x = res['feat4x']
62
+ if 'feat8x' in res:
63
+ feat8x = res['feat8x']
64
+ return feat2x, feat4x, feat8x
65
+
66
+ class Upsampled4xBackbone(Module):
67
+
68
+ def __init__(self, model_name, use_norm):
69
+ super().__init__()
70
+ model, patch_size, self.dim = get_featurizer(model_name, "token", num_classes=1000)
71
+ if use_norm:
72
+ self.model = torch.nn.Sequential(model, ChannelNorm(self.dim))
73
+ else:
74
+ self.model = model
75
+ self.upsampler = get_upsampler("jbu_4x_stack", self.dim)
76
+
77
+ def forward(self, image):
78
+ lowres = self.model(image).to(torch.bfloat16)
79
+ image = image.to(torch.bfloat16)
80
+ return self.upsampler(lowres, image)
81
+
82
+ def pad_to_square_tensor(self, feat, pad_res):#torch.Size([1, 1024, 25, 22]), 33
83
+ # Calculate padding for each dimension
84
+ pad_size = (0, pad_res-feat.size(3), 0, pad_res-feat.size(2)) # left, right, top, down
85
+ # Pad the image tensor
86
+ padded = F.pad(feat, pad_size, 'constant', 1)
87
+ return padded
88
+
89
+ def unpad_from_square_tensor(self, feat, h, w):
90
+ # Crop the image tensor from left top to the desired size
91
+ return feat[:, :h, :w]
92
+
93
+ def forward_with_features_list(self, images, lowres):
94
+ feature_scale = 14 # clip-large 336 -14
95
+ #list of torch.Size([3, 350, 308]),
96
+ #list of torch.Size([1, 1024, 25, 22])
97
+ pad_res = 0
98
+ for i in range(len(lowres)):
99
+ pad_res = max(pad_res, max(lowres[i].size(2), lowres[i].size(3)))
100
+
101
+ lowres_tensor = [self.pad_to_square_tensor(lowres[i], pad_res) for i in range(len(lowres))]
102
+ lowres_tensor = torch.cat(lowres_tensor, dim=0)
103
+
104
+ images_tensor = [self.pad_to_square_tensor(images[i].unsqueeze(0), pad_res * feature_scale) for i in range(len(images))]
105
+ images_tensor = torch.cat(images_tensor, dim=0)
106
+
107
+ features_2x, features_4x = self.upsampler.forward_with_internal_features(lowres_tensor, images_tensor)
108
+
109
+ feat_2x_list = [self.unpad_from_square_tensor(features_2x[i], lowres[i].size(2) * 2, lowres[i].size(3) * 2).unsqueeze(0) for i in range(len(features_2x))]
110
+ feat_4x_list = [self.unpad_from_square_tensor(features_4x[i], lowres[i].size(2) * 4, lowres[i].size(3) * 4).unsqueeze(0) for i in range(len(features_4x))]
111
+
112
+ return feat_2x_list, feat_4x_list
113
+
114
+ def forward_with_internal_features(self, image, lowres):
115
+ if type(lowres) == list:
116
+ #return self.forward_with_features_list(image, lowres)
117
+ features_2x = []
118
+ features_4x = []
119
+ features_8x = []
120
+ for i in range(len(lowres)):
121
+ res = self.upsampler.forward_with_internal_features(lowres[i], image[i].unsqueeze(0))
122
+ features_2x.append(res['feat2x'])
123
+ if res.get('feat4x') is not None:
124
+ features_4x.append(res['feat4x'])
125
+ if res.get('feat8x') is not None:
126
+ features_8x.append(res['feat8x'])
127
+
128
+ return {'feat2x': features_2x, 'feat4x': features_4x, 'feat8x': features_8x}
129
+ else:
130
+ return self.upsampler.forward_with_internal_features(lowres, image)
131
+
132
+ def _load_backbone(pretrained, use_norm, model_name):
133
+ """
134
+ The function that will be called by Torch Hub users to instantiate your model.
135
+ Args:
136
+ pretrained (bool): If True, returns a model pre-loaded with weights.
137
+ Returns:
138
+ An instance of your model.
139
+ """
140
+ model = UpsampledBackbone(model_name, use_norm)
141
+ if pretrained:
142
+ # Define how you load your pretrained weights here
143
+ # For example:
144
+ if use_norm:
145
+ exp_dir = ""
146
+ else:
147
+ exp_dir = "no_norm/"
148
+
149
+ checkpoint_url = f"https://marhamilresearch4.blob.core.windows.net/feature-upsampling-public/pretrained/{exp_dir}{model_name}_jbu_stack_cocostuff.ckpt"
150
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["state_dict"]
151
+ state_dict = {k: v for k, v in state_dict.items() if "scale_net" not in k and "downsampler" not in k}
152
+ model.load_state_dict(state_dict, strict=False)
153
+ return model
154
+
155
+ def _load_backbone_from_local(pretrained, use_norm, model_name, ckpt_path, scale = '16x'):
156
+ """
157
+ The function that will be called by Torch Hub users to instantiate your model.
158
+ Args:
159
+ pretrained (bool): If True, returns a model pre-loaded with weights.
160
+ Returns:
161
+ An instance of your model.
162
+ """
163
+ model = UpsampledBackbone(model_name, use_norm, scale)
164
+
165
+ if pretrained:
166
+ ckpt = torch.load(ckpt_path)
167
+ state_dict = ckpt['state_dict']
168
+ state_dict = {k: v for k, v in state_dict.items() if "scale_net" not in k and "downsampler" not in k}
169
+ model.load_state_dict(state_dict, strict=False)
170
+ return model
171
+
172
+ def _get_state_dict(ckpt_path):
173
+ ckpt = torch.load(ckpt_path)
174
+ state_dict = ckpt['state_dict']
175
+ state_dict = {k: v for k, v in state_dict.items() if "scale_net" not in k and "downsampler" not in k}
176
+ return state_dict
177
+
178
+ # def vit(pretrained=True, use_norm=True):
179
+ # return _load_backbone(pretrained, use_norm, "vit")
180
+
181
+ def vit(pretrained=True, use_norm=True):
182
+ ckpt_path = '/home/jeeves/LowResCV/checkpoints/jbu/final_1e6_token_unnorm_pad30_jitter5_lr4_vit_jbu_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0/home/jeeves/LowResCV/checkpoints/jbu/final_1e6_token_unnorm_pad30_jitter5_lr4_vit_jbu_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0_685.ckpt'
183
+ return _load_backbone_from_local(pretrained, use_norm, "vit", ckpt_path)
184
+
185
+ def dino16(pretrained=True, use_norm=True):
186
+ return _load_backbone(pretrained, use_norm, "dino16")
187
+
188
+
189
+ def clip(pretrained=True, use_norm=True):
190
+ return _load_backbone(pretrained, use_norm, "clip")
191
+
192
+ def clipLarge(pretrained=True, use_norm=True, scale = '4x'):
193
+ ckpt_path = '/mnt/data/user/tc_agi/zyp/featup/upsampler/0919/checkpoints/clip-large_jbu_4x_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0-0.001-True-30-2-5_2000.ckpt'
194
+ return _load_backbone_from_local(pretrained, use_norm, "clip-large", ckpt_path, scale)
195
+
196
+
197
+ def featup(model_name='clip-large', pretrained=False, use_norm=False, scale = '4x'):
198
+ ckpt_path = '/mnt/data/user/tc_agi/zyp/featup/upsampler/0919/checkpoints/clip-large_jbu_4x_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0-0.001-True-30-2-5_2000.ckpt'
199
+ return _load_backbone_from_local(pretrained, use_norm, model_name, ckpt_path, scale)
200
+
201
+ def get_featup_state_dict(ckpt_path='/mnt/data/user/tc_agi/zyp/featup/upsampler/0919/checkpoints/clip-large_jbu_4x_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0-0.001-True-30-2-5_2000.ckpt'):
202
+ return _get_state_dict(ckpt_path)
203
+
204
+ def dinov2(pretrained=True, use_norm=True):
205
+ return _load_backbone(pretrained, use_norm, "dinov2")
206
+
207
+
208
+ def resnet50(pretrained=True, use_norm=True):
209
+ return _load_backbone(pretrained, use_norm, "resnet50")
210
+
211
+ def maskclip(pretrained=True, use_norm=True):
212
+ assert not use_norm, "MaskCLIP only supports unnormed model"
213
+ return _load_backbone(pretrained, use_norm, "maskclip")
VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_siglip2.py ADDED
@@ -0,0 +1,1386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_siglip2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The HuggingFace Inc. team.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from functools import partial, reduce
25
+ import torch.utils.checkpoint
26
+ from PIL import Image
27
+ from typing import Any, Optional, Tuple, Union, Dict
28
+ import os
29
+
30
+ import numpy as np
31
+ import torch
32
+ import torch.nn as nn
33
+ import torch.nn.functional as F
34
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
35
+ from torch.nn.init import _calculate_fan_in_and_fan_out
36
+
37
+ from transformers.activations import ACT2FN
38
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
39
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
40
+ from transformers.modeling_utils import PreTrainedModel
41
+ from transformers.utils import (
42
+ ModelOutput,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_2_available,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from transformers.configuration_utils import PretrainedConfig
51
+ from transformers.image_processing_utils import BatchFeature, get_size_dict
52
+ from transformers.image_transforms import (
53
+ convert_to_rgb,
54
+ normalize,
55
+ rescale,
56
+ resize,
57
+ to_channel_dimension_format,
58
+ )
59
+ from transformers.image_utils import (
60
+ ChannelDimension,
61
+ PILImageResampling,
62
+ to_numpy_array,
63
+ )
64
+ from transformers.activations import ACT2FN
65
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
66
+ from transformers.modeling_utils import PreTrainedModel
67
+ from transformers import PretrainedConfig
68
+ from transformers.utils import ModelOutput
69
+ from llava.utils import rank0_print
70
+ from einops import rearrange
71
+
72
+ # if is_flash_attn_2_available():
73
+ # from transformers.modeling_flash_attention_utils import _flash_attention_forward
74
+
75
+
76
+ class SigLipImageProcessor:
77
+ def __init__(self, image_mean=(0.5, 0.5, 0.5), image_std=(0.5, 0.5, 0.5), size=(384, 384), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
78
+ crop_size = crop_size if crop_size is not None else {"height": 384, "width": 384}
79
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
80
+
81
+ self.image_mean = image_mean
82
+ self.image_std = image_std
83
+ self.size = size
84
+ self.resample = resample
85
+ self.rescale_factor = rescale_factor
86
+ self.data_format = data_format
87
+ self.crop_size = crop_size
88
+
89
+ def preprocess(self, images, do_resize = True, do_center_crop = True, do_rescale = True, do_normalize = True, return_tensors = 'pt'):
90
+ if isinstance(images, Image.Image):
91
+ images = [images]
92
+ else:
93
+ # to adapt video data
94
+ images = [to_numpy_array(image) for image in images]
95
+ assert isinstance(images, list)
96
+
97
+ # do_resize=False, do_center_crop=False, do_rescale=True, do_normalize=True,
98
+
99
+ transforms = [
100
+ convert_to_rgb,
101
+ to_numpy_array
102
+ ]
103
+
104
+ if do_resize:
105
+ transforms.append(partial(resize, size=self.size, resample=self.resample, data_format=self.data_format))
106
+ if do_rescale:
107
+ transforms.append(partial(rescale, scale=self.rescale_factor, data_format=self.data_format))
108
+ if do_normalize:
109
+ transforms.append(partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format))
110
+
111
+ transforms.append(partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format))
112
+
113
+ images = reduce(lambda x, f: [*map(f, x)], transforms, images)
114
+ data = {"pixel_values": images}
115
+
116
+ return BatchFeature(data=data, tensor_type=return_tensors)
117
+
118
+
119
+ class Siglip2TextConfig(PretrainedConfig):
120
+ r"""
121
+ This is the configuration class to store the configuration of a [`Siglip2TextModel`]. It is used to instantiate a
122
+ Siglip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
123
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip2
124
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
125
+
126
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
127
+ documentation from [`PretrainedConfig`] for more information.
128
+
129
+ Args:
130
+ vocab_size (`int`, *optional*, defaults to 32000):
131
+ Vocabulary size of the Siglip2 text model. Defines the number of different tokens that can be represented by
132
+ the `inputs_ids` passed when calling [`Siglip2Model`].
133
+ hidden_size (`int`, *optional*, defaults to 768):
134
+ Dimensionality of the encoder layers and the pooler layer.
135
+ intermediate_size (`int`, *optional*, defaults to 3072):
136
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
137
+ num_hidden_layers (`int`, *optional*, defaults to 12):
138
+ Number of hidden layers in the Transformer encoder.
139
+ num_attention_heads (`int`, *optional*, defaults to 12):
140
+ Number of attention heads for each attention layer in the Transformer encoder.
141
+ max_position_embeddings (`int`, *optional*, defaults to 64):
142
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
143
+ just in case (e.g., 512 or 1024 or 2048).
144
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
145
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
146
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
147
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
148
+ The epsilon used by the layer normalization layers.
149
+ attention_dropout (`float`, *optional*, defaults to 0.0):
150
+ The dropout ratio for the attention probabilities.
151
+ pad_token_id (`int`, *optional*, defaults to 1):
152
+ The id of the padding token in the vocabulary.
153
+ bos_token_id (`int`, *optional*, defaults to 49406):
154
+ The id of the beginning-of-sequence token in the vocabulary.
155
+ eos_token_id (`int`, *optional*, defaults to 49407):
156
+ The id of the end-of-sequence token in the vocabulary.
157
+ projection_size (`int`, *optional*, defaults to `hidden_size`):
158
+ The size of the projection head.
159
+
160
+ Example:
161
+
162
+ ```python
163
+ >>> from transformers import Siglip2TextConfig, Siglip2TextModel
164
+
165
+ >>> # Initializing a Siglip2TextConfig with google/siglip2-base-patch16-224 style configuration
166
+ >>> configuration = Siglip2TextConfig()
167
+
168
+ >>> # Initializing a Siglip2TextModel (with random weights) from the google/siglip2-base-patch16-224 style configuration
169
+ >>> model = Siglip2TextModel(configuration)
170
+
171
+ >>> # Accessing the model configuration
172
+ >>> configuration = model.config
173
+ ```"""
174
+
175
+ model_type = "siglip2_text_model"
176
+ base_config_key = "text_config"
177
+
178
+ def __init__(
179
+ self,
180
+ vocab_size=32000,
181
+ hidden_size=768,
182
+ intermediate_size=3072,
183
+ num_hidden_layers=12,
184
+ num_attention_heads=12,
185
+ max_position_embeddings=64,
186
+ hidden_act="gelu_pytorch_tanh",
187
+ layer_norm_eps=1e-6,
188
+ attention_dropout=0.0,
189
+ # This differs from `CLIPTokenizer`'s default and from openai/siglip2
190
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
191
+ pad_token_id=1,
192
+ bos_token_id=49406,
193
+ eos_token_id=49407,
194
+ projection_size=None,
195
+ **kwargs,
196
+ ):
197
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
198
+
199
+ self.vocab_size = vocab_size
200
+ self.hidden_size = hidden_size
201
+ self.intermediate_size = intermediate_size
202
+ self.num_hidden_layers = num_hidden_layers
203
+ self.num_attention_heads = num_attention_heads
204
+ self.max_position_embeddings = max_position_embeddings
205
+ self.layer_norm_eps = layer_norm_eps
206
+ self.hidden_act = hidden_act
207
+ self.attention_dropout = attention_dropout
208
+ self.projection_size = projection_size if projection_size is not None else hidden_size
209
+
210
+
211
+ class Siglip2VisionConfig(PretrainedConfig):
212
+ r"""
213
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
214
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
215
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
216
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
217
+
218
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
219
+ documentation from [`PretrainedConfig`] for more information.
220
+
221
+ Args:
222
+ hidden_size (`int`, *optional*, defaults to 768):
223
+ Dimensionality of the encoder layers and the pooler layer.
224
+ intermediate_size (`int`, *optional*, defaults to 3072):
225
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
226
+ num_hidden_layers (`int`, *optional*, defaults to 12):
227
+ Number of hidden layers in the Transformer encoder.
228
+ num_attention_heads (`int`, *optional*, defaults to 12):
229
+ Number of attention heads for each attention layer in the Transformer encoder.
230
+ num_channels (`int`, *optional*, defaults to 3):
231
+ Number of channels in the input images.
232
+ num_patches (`int`, *optional*, defaults to 256):
233
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
234
+ The image is resized to fill maximum of this number of patches, and to preserve
235
+ the aspect ratio. In case the resulted number of patches is lower, the image is
236
+ padded in "patch" dimension.
237
+ patch_size (`int`, *optional*, defaults to 16):
238
+ The size (resolution) of each patch.
239
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
240
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
241
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
242
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
243
+ The epsilon used by the layer normalization layers.
244
+ attention_dropout (`float`, *optional*, defaults to 0.0):
245
+ The dropout ratio for the attention probabilities.
246
+
247
+ Example:
248
+
249
+ ```python
250
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
251
+
252
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
253
+ >>> configuration = Siglip2VisionConfig()
254
+
255
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
256
+ >>> model = Siglip2VisionModel(configuration)
257
+
258
+ >>> # Accessing the model configuration
259
+ >>> configuration = model.config
260
+ ```"""
261
+
262
+ model_type = "siglip2_vision_model"
263
+ base_config_key = "vision_config"
264
+
265
+ def __init__(
266
+ self,
267
+ hidden_size=1152,
268
+ intermediate_size=4304,
269
+ num_hidden_layers=27,
270
+ num_attention_heads=16,
271
+ num_channels=3,
272
+ num_patches=256,
273
+ patch_size=16,
274
+ hidden_act="gelu_pytorch_tanh",
275
+ layer_norm_eps=1e-6,
276
+ attention_dropout=0.0,
277
+ **kwargs,
278
+ ):
279
+ super().__init__(**kwargs)
280
+
281
+ self.hidden_size = hidden_size
282
+ self.intermediate_size = intermediate_size
283
+ self.num_hidden_layers = num_hidden_layers
284
+ self.num_attention_heads = num_attention_heads
285
+ self.num_channels = num_channels
286
+ self.patch_size = patch_size
287
+ self.image_size = 384 #fixme
288
+ self.attention_dropout = attention_dropout
289
+ self.layer_norm_eps = layer_norm_eps
290
+ self.hidden_act = hidden_act
291
+ self.num_patches = num_patches
292
+
293
+
294
+ class Siglip2Config(PretrainedConfig):
295
+ r"""
296
+ [`Siglip2Config`] is the configuration class to store the configuration of a [`Siglip2Model`]. It is used to
297
+ instantiate a Siglip2 model according to the specified arguments, defining the text model and vision model configs.
298
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip2
299
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
300
+
301
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
302
+ documentation from [`PretrainedConfig`] for more information.
303
+
304
+ Args:
305
+ text_config (`dict`, *optional*):
306
+ Dictionary of configuration options used to initialize [`Siglip2TextConfig`].
307
+ vision_config (`dict`, *optional*):
308
+ Dictionary of configuration options used to initialize [`Siglip2VisionConfig`].
309
+ kwargs (*optional*):
310
+ Dictionary of keyword arguments.
311
+
312
+ Example:
313
+
314
+ ```python
315
+ >>> from transformers import Siglip2Config, Siglip2Model
316
+
317
+ >>> # Initializing a Siglip2Config with google/siglip2-base-patch16-224 style configuration
318
+ >>> configuration = Siglip2Config()
319
+
320
+ >>> # Initializing a Siglip2Model (with random weights) from the google/siglip2-base-patch16-224 style configuration
321
+ >>> model = Siglip2Model(configuration)
322
+
323
+ >>> # Accessing the model configuration
324
+ >>> configuration = model.config
325
+
326
+ >>> # We can also initialize a Siglip2Config from a Siglip2TextConfig and a Siglip2VisionConfig
327
+ >>> from transformers import Siglip2TextConfig, Siglip2VisionConfig
328
+
329
+ >>> # Initializing a Siglip2Text and Siglip2Vision configuration
330
+ >>> config_text = Siglip2TextConfig()
331
+ >>> config_vision = Siglip2VisionConfig()
332
+
333
+ >>> config = Siglip2Config.from_text_vision_configs(config_text, config_vision)
334
+ ```"""
335
+
336
+ model_type = "siglip2"
337
+ sub_configs = {"text_config": Siglip2TextConfig, "vision_config": Siglip2VisionConfig}
338
+
339
+ def __init__(self, text_config=None, vision_config=None, **kwargs):
340
+ super().__init__(**kwargs)
341
+
342
+ if text_config is None:
343
+ text_config = {}
344
+ logger.info("`text_config` is `None`. Initializing the `Siglip2TextConfig` with default values.")
345
+
346
+ if vision_config is None:
347
+ vision_config = {}
348
+ logger.info("`vision_config` is `None`. initializing the `Siglip2VisionConfig` with default values.")
349
+
350
+ self.text_config = Siglip2TextConfig(**text_config)
351
+ self.vision_config = Siglip2VisionConfig(**vision_config)
352
+
353
+ self.initializer_factor = 1.0
354
+
355
+ @classmethod
356
+ def from_text_vision_configs(cls, text_config: Siglip2TextConfig, vision_config: Siglip2VisionConfig, **kwargs):
357
+ r"""
358
+ Instantiate a [`Siglip2Config`] (or a derived class) from siglip2 text model configuration and siglip2 vision
359
+ model configuration.
360
+
361
+ Returns:
362
+ [`Siglip2Config`]: An instance of a configuration object
363
+ """
364
+
365
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
366
+ r"""
367
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
368
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
369
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
370
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
371
+
372
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
373
+ documentation from [`PretrainedConfig`] for more information.
374
+
375
+ Args:
376
+ hidden_size (`int`, *optional*, defaults to 768):
377
+ Dimensionality of the encoder layers and the pooler layer.
378
+ intermediate_size (`int`, *optional*, defaults to 3072):
379
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
380
+ num_hidden_layers (`int`, *optional*, defaults to 12):
381
+ Number of hidden layers in the Transformer encoder.
382
+ num_attention_heads (`int`, *optional*, defaults to 12):
383
+ Number of attention heads for each attention layer in the Transformer encoder.
384
+ num_channels (`int`, *optional*, defaults to 3):
385
+ Number of channels in the input images.
386
+ num_patches (`int`, *optional*, defaults to 256):
387
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
388
+ The image is resized to fill maximum of this number of patches, and to preserve
389
+ the aspect ratio. In case the resulted number of patches is lower, the image is
390
+ padded in "patch" dimension.
391
+ patch_size (`int`, *optional*, defaults to 16):
392
+ The size (resolution) of each patch.
393
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
394
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
395
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
396
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
397
+ The epsilon used by the layer normalization layers.
398
+ attention_dropout (`float`, *optional*, defaults to 0.0):
399
+ The dropout ratio for the attention probabilities.
400
+
401
+ Example:
402
+
403
+ ```python
404
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
405
+
406
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
407
+ >>> configuration = Siglip2VisionConfig()
408
+
409
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
410
+ >>> model = Siglip2VisionModel(configuration)
411
+
412
+ >>> # Accessing the model configuration
413
+ >>> configuration = model.config
414
+ ```"""
415
+
416
+ model_type = "siglip2_vision_model"
417
+ base_config_key = "vision_config"
418
+
419
+ def __init__(
420
+ self,
421
+ hidden_size=768,
422
+ intermediate_size=3072,
423
+ num_hidden_layers=12,
424
+ num_attention_heads=12,
425
+ num_channels=3,
426
+ num_patches=256,
427
+ patch_size=16,
428
+ hidden_act="gelu_pytorch_tanh",
429
+ layer_norm_eps=1e-6,
430
+ attention_dropout=0.0,
431
+ **kwargs,
432
+ ):
433
+ super().__init__(**kwargs)
434
+
435
+ self.hidden_size = hidden_size
436
+ self.intermediate_size = intermediate_size
437
+ self.num_hidden_layers = num_hidden_layers
438
+ self.num_attention_heads = num_attention_heads
439
+ self.num_channels = num_channels
440
+ self.patch_size = patch_size
441
+ self.attention_dropout = attention_dropout
442
+ self.layer_norm_eps = layer_norm_eps
443
+ self.hidden_act = hidden_act
444
+ self.num_patches = num_patches
445
+
446
+ logger = logging.get_logger(__name__)
447
+
448
+ # General docstring
449
+ _CONFIG_FOR_DOC = "Siglip2VisionConfig"
450
+
451
+
452
+ @dataclass
453
+ class Siglip2VisionOutput(ModelOutput):
454
+ """
455
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
456
+
457
+ Args:
458
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
459
+ The image embeddings obtained by applying the projection layer to the pooler_output.
460
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
461
+ Sequence of hidden-states at the output of the last layer of the model.
462
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
463
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
464
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
465
+
466
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
467
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
468
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
469
+ sequence_length)`.
470
+
471
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
472
+ heads.
473
+ """
474
+
475
+ image_embeds: Optional[torch.FloatTensor] = None
476
+ last_hidden_state: torch.FloatTensor = None
477
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
478
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
479
+
480
+
481
+ class Siglip2VisionEmbeddings(nn.Module):
482
+ def __init__(self, config: Siglip2VisionConfig):
483
+ super().__init__()
484
+ self.config = config
485
+ self.embed_dim = config.hidden_size
486
+ self.image_size = config.image_size
487
+ self.patch_size = config.patch_size
488
+
489
+ self.patch_embedding = nn.Linear(
490
+ in_features=config.num_channels * self.patch_size * self.patch_size,
491
+ out_features=self.embed_dim,
492
+ )
493
+
494
+ self.num_patches = config.num_patches
495
+ self.position_embedding_size = int(self.num_patches**0.5)
496
+ self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
497
+
498
+ @staticmethod
499
+ def resize_positional_embeddings(
500
+ positional_embeddings: torch.Tensor,
501
+ spatial_shapes: torch.LongTensor,
502
+ max_length: int,
503
+ ) -> torch.Tensor:
504
+ """
505
+ Resize positional embeddings to image-specific size and pad to a fixed size.
506
+
507
+ Args:
508
+ positional_embeddings (`torch.Tensor`):
509
+ Position embeddings of shape (height, width, embed_dim)
510
+ spatial_shapes (`torch.LongTensor`):
511
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
512
+ max_length (`int`):
513
+ Maximum length of the positional embeddings to pad resized positional embeddings to
514
+
515
+ Returns:
516
+ `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
517
+ """
518
+ # import pdb; pdb.set_trace()
519
+ batch_size = spatial_shapes.shape[0]
520
+ embed_dim = positional_embeddings.shape[-1]
521
+ source_dtype = positional_embeddings.dtype
522
+
523
+ resulted_positional_embeddings = torch.empty(
524
+ (batch_size, max_length, embed_dim),
525
+ device=positional_embeddings.device,
526
+ dtype=source_dtype,
527
+ )
528
+
529
+ # (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
530
+ positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
531
+
532
+ # Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
533
+ if positional_embeddings.device.type == "cpu":
534
+ positional_embeddings = positional_embeddings.to(torch.float32)
535
+
536
+ for i in range(batch_size):
537
+ # (1, dim, height, width) -> (1, dim, target_height, target_width)
538
+ height, width = spatial_shapes[i]
539
+ resized_embeddings = F.interpolate(
540
+ positional_embeddings,
541
+ size=(height, width),
542
+ mode="bilinear",
543
+ align_corners=False,
544
+ antialias=True,
545
+ )
546
+
547
+ # (1, dim, target_height, target_width) -> (target_height * target_width, dim)
548
+ resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
549
+
550
+ # Cast to original dtype
551
+ resized_embeddings = resized_embeddings.to(source_dtype)
552
+
553
+ resulted_positional_embeddings[i, : height * width] = resized_embeddings
554
+ resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
555
+
556
+ return resulted_positional_embeddings
557
+
558
+ def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:
559
+ """
560
+ Args:
561
+ pixel_values (`torch.FloatTensor`):
562
+ Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
563
+ spatial_shapes (`List[Tuple[int, int]]`):
564
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
565
+ """
566
+ # import pdb; pdb.set_trace()
567
+ # Apply patch embeddings to already patchified pixel values
568
+ target_dtype = self.patch_embedding.weight.dtype
569
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
570
+
571
+ # Get positional resized and padded positional embeddings
572
+ positional_embeddings = self.position_embedding.weight.reshape(
573
+ self.position_embedding_size, self.position_embedding_size, -1
574
+ )
575
+ resized_positional_embeddings = self.resize_positional_embeddings(
576
+ positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]
577
+ )
578
+
579
+ # Add positional embeddings to patch embeddings
580
+ embeddings = patch_embeds + resized_positional_embeddings
581
+ return embeddings
582
+
583
+
584
+ class Siglip2Attention(nn.Module):
585
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
586
+
587
+ def __init__(self, config):
588
+ super().__init__()
589
+ self.config = config
590
+ self.embed_dim = config.hidden_size
591
+ self.num_heads = config.num_attention_heads
592
+ self.head_dim = self.embed_dim // self.num_heads
593
+ if self.head_dim * self.num_heads != self.embed_dim:
594
+ raise ValueError(
595
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
596
+ f" {self.num_heads})."
597
+ )
598
+ self.scale = self.head_dim**-0.5
599
+ self.dropout = config.attention_dropout
600
+
601
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
602
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
603
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
604
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
605
+
606
+ def forward(
607
+ self,
608
+ hidden_states: torch.Tensor,
609
+ attention_mask: Optional[torch.Tensor] = None,
610
+ output_attentions: Optional[bool] = False,
611
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
612
+ """Input shape: Batch x Time x Channel"""
613
+
614
+ batch_size, q_len, _ = hidden_states.size()
615
+
616
+ query_states = self.q_proj(hidden_states)
617
+ key_states = self.k_proj(hidden_states)
618
+ value_states = self.v_proj(hidden_states)
619
+
620
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
621
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
622
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
623
+
624
+ k_v_seq_len = key_states.shape[-2]
625
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
626
+
627
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
628
+ raise ValueError(
629
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
630
+ f" {attn_weights.size()}"
631
+ )
632
+
633
+ if attention_mask is not None:
634
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
635
+ raise ValueError(
636
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
637
+ )
638
+ attn_weights = attn_weights + attention_mask
639
+
640
+ # upcast attention to fp32
641
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
642
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
643
+ attn_output = torch.matmul(attn_weights, value_states)
644
+
645
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
646
+ raise ValueError(
647
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
648
+ f" {attn_output.size()}"
649
+ )
650
+
651
+ attn_output = attn_output.transpose(1, 2).contiguous()
652
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
653
+
654
+ attn_output = self.out_proj(attn_output)
655
+
656
+ return attn_output, attn_weights
657
+
658
+
659
+ class Siglip2SdpaAttention(Siglip2Attention):
660
+ """
661
+ Siglip2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
662
+ `Siglip2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
663
+ SDPA API.
664
+ """
665
+
666
+ is_causal = False
667
+
668
+ # Adapted from Siglip2Attention.forward and transformers.models.llama.modeling_llama.LlamaSdpaAttention.forward
669
+ def forward(
670
+ self,
671
+ hidden_states: torch.Tensor,
672
+ attention_mask: Optional[torch.Tensor] = None,
673
+ output_attentions: Optional[bool] = False,
674
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
675
+ if output_attentions:
676
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
677
+ logger.warning_once(
678
+ "Siglip2Model is using Siglip2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
679
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
680
+ )
681
+ return super().forward(
682
+ hidden_states=hidden_states,
683
+ attention_mask=attention_mask,
684
+ output_attentions=output_attentions,
685
+ )
686
+
687
+ batch_size, q_len, _ = hidden_states.size()
688
+
689
+ query_states = self.q_proj(hidden_states)
690
+ key_states = self.k_proj(hidden_states)
691
+ value_states = self.v_proj(hidden_states)
692
+
693
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
694
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
695
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
696
+
697
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
698
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
699
+ if query_states.device.type == "cuda" and attention_mask is not None:
700
+ query_states = query_states.contiguous()
701
+ key_states = key_states.contiguous()
702
+ value_states = value_states.contiguous()
703
+
704
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
705
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
706
+ is_causal = True if self.is_causal and q_len > 1 else False
707
+
708
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
709
+ query_states,
710
+ key_states,
711
+ value_states,
712
+ attn_mask=attention_mask,
713
+ dropout_p=self.dropout if self.training else 0.0,
714
+ is_causal=is_causal,
715
+ )
716
+
717
+ attn_output = attn_output.transpose(1, 2).contiguous()
718
+ attn_output = attn_output.view(batch_size, q_len, self.embed_dim)
719
+
720
+ attn_output = self.out_proj(attn_output)
721
+
722
+ return attn_output, None
723
+
724
+ class Siglip2FlashAttention2(Siglip2Attention):
725
+ """
726
+ Siglip2Attention flash attention module. This module inherits from `Siglip2Attention` as the weights of the module stays
727
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
728
+ flash attention and deal with padding tokens in case the input contains any of them.
729
+ """
730
+
731
+ is_causal = False
732
+
733
+ def __init__(self, *args, **kwargs):
734
+ super().__init__(*args, **kwargs)
735
+
736
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
737
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
738
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
739
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
740
+
741
+ # Adapted from transformers.models.llama.modeling_llama.LlamaFlashAttention2.forward
742
+ def forward(
743
+ self,
744
+ hidden_states: torch.Tensor,
745
+ attention_mask: Optional[torch.LongTensor] = None,
746
+ output_attentions: bool = False,
747
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
748
+ output_attentions = False
749
+
750
+ batch_size, q_len, _ = hidden_states.size()
751
+
752
+ query_states = self.q_proj(hidden_states)
753
+ key_states = self.k_proj(hidden_states)
754
+ value_states = self.v_proj(hidden_states)
755
+
756
+ # Flash attention requires the input to have the shape
757
+ # batch_size x seq_length x head_dim x hidden_dim
758
+ # therefore we just need to keep the original shape
759
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim)
760
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim)
761
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim)
762
+
763
+ dropout_rate = self.dropout if self.training else 0.0
764
+
765
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
766
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
767
+ # cast them back in the correct dtype just to be sure everything works as expected.
768
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
769
+ # in fp32.
770
+
771
+ input_dtype = query_states.dtype
772
+ if input_dtype == torch.float32:
773
+ if torch.is_autocast_enabled():
774
+ target_dtype = torch.get_autocast_gpu_dtype()
775
+ # Handle the case where the model is quantized
776
+ elif hasattr(self.config, "_pre_quantization_dtype"):
777
+ target_dtype = self.config._pre_quantization_dtype
778
+ else:
779
+ target_dtype = self.q_proj.weight.dtype
780
+
781
+ logger.warning_once(
782
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
783
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
784
+ f" {target_dtype}."
785
+ )
786
+
787
+ query_states = query_states.to(target_dtype)
788
+ key_states = key_states.to(target_dtype)
789
+ value_states = value_states.to(target_dtype)
790
+
791
+ attn_output = _flash_attention_forward(
792
+ query_states,
793
+ key_states,
794
+ value_states,
795
+ attention_mask,
796
+ q_len,
797
+ dropout=dropout_rate,
798
+ is_causal=self.is_causal,
799
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
800
+ )
801
+
802
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim).contiguous()
803
+ attn_output = self.out_proj(attn_output)
804
+
805
+ if not output_attentions:
806
+ attn_weights = None
807
+
808
+ return attn_output, attn_weights
809
+
810
+ class Siglip2MLP(nn.Module):
811
+ def __init__(self, config):
812
+ super().__init__()
813
+ self.config = config
814
+ self.activation_fn = ACT2FN[config.hidden_act]
815
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
816
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
817
+
818
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
819
+ hidden_states = self.fc1(hidden_states)
820
+ hidden_states = self.activation_fn(hidden_states)
821
+ hidden_states = self.fc2(hidden_states)
822
+ return hidden_states
823
+
824
+
825
+ SIGLIP2_ATTENTION_CLASSES = {
826
+ "eager": Siglip2Attention,
827
+ "sdpa": Siglip2SdpaAttention,
828
+ "flash_attention_2": Siglip2FlashAttention2,
829
+ }
830
+
831
+
832
+ class Siglip2EncoderLayer(nn.Module):
833
+ def __init__(self, config: Siglip2Config):
834
+ super().__init__()
835
+ self.embed_dim = config.hidden_size
836
+ self.self_attn = SIGLIP2_ATTENTION_CLASSES[config._attn_implementation](config=config)
837
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
838
+ self.mlp = Siglip2MLP(config)
839
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
840
+
841
+ # Ignore copy
842
+ def forward(
843
+ self,
844
+ hidden_states: torch.Tensor,
845
+ attention_mask: torch.Tensor,
846
+ output_attentions: Optional[bool] = False,
847
+ ) -> Tuple[torch.FloatTensor]:
848
+ """
849
+ Args:
850
+ hidden_states (`torch.FloatTensor`):
851
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
852
+ attention_mask (`torch.FloatTensor`):
853
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
854
+ output_attentions (`bool`, *optional*, defaults to `False`):
855
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
856
+ returned tensors for more detail.
857
+ """
858
+ residual = hidden_states
859
+
860
+ hidden_states = self.layer_norm1(hidden_states)
861
+ hidden_states, attn_weights = self.self_attn(
862
+ hidden_states=hidden_states,
863
+ attention_mask=attention_mask,
864
+ output_attentions=output_attentions,
865
+ )
866
+ hidden_states = residual + hidden_states
867
+
868
+ residual = hidden_states
869
+ hidden_states = self.layer_norm2(hidden_states)
870
+ hidden_states = self.mlp(hidden_states)
871
+ hidden_states = residual + hidden_states
872
+
873
+ outputs = (hidden_states,)
874
+
875
+ if output_attentions:
876
+ outputs += (attn_weights,)
877
+
878
+ return outputs
879
+
880
+
881
+ class Siglip2Encoder(nn.Module):
882
+ """
883
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
884
+ [`Siglip2EncoderLayer`].
885
+
886
+ Args:
887
+ config: Siglip2Config
888
+ """
889
+
890
+ def __init__(self, config: Siglip2Config):
891
+ super().__init__()
892
+ self.config = config
893
+ self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
894
+ self.gradient_checkpointing = False
895
+
896
+ # Ignore copy
897
+ def forward(
898
+ self,
899
+ inputs_embeds,
900
+ attention_mask: Optional[torch.Tensor] = None,
901
+ output_attentions: Optional[bool] = None,
902
+ output_hidden_states: Optional[bool] = None,
903
+ return_dict: Optional[bool] = None,
904
+ ) -> Union[Tuple, BaseModelOutput]:
905
+ r"""
906
+ Args:
907
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
908
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
909
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
910
+ than the model's internal embedding lookup matrix.
911
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
912
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
913
+
914
+ - 1 for tokens that are **not masked**,
915
+ - 0 for tokens that are **masked**.
916
+
917
+ [What are attention masks?](../glossary#attention-mask)
918
+ output_attentions (`bool`, *optional*):
919
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
920
+ returned tensors for more detail.
921
+ output_hidden_states (`bool`, *optional*):
922
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
923
+ for more detail.
924
+ return_dict (`bool`, *optional*):
925
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
926
+ """
927
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
928
+ output_hidden_states = (
929
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
930
+ )
931
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
932
+
933
+ encoder_states = () if output_hidden_states else None
934
+ all_attentions = () if output_attentions else None
935
+
936
+ hidden_states = inputs_embeds
937
+ for encoder_layer in self.layers:
938
+ if output_hidden_states:
939
+ encoder_states = encoder_states + (hidden_states,)
940
+ if self.gradient_checkpointing and self.training:
941
+ layer_outputs = self._gradient_checkpointing_func(
942
+ encoder_layer.__call__,
943
+ hidden_states,
944
+ attention_mask,
945
+ output_attentions,
946
+ )
947
+ else:
948
+ layer_outputs = encoder_layer(
949
+ hidden_states,
950
+ attention_mask,
951
+ output_attentions=output_attentions,
952
+ )
953
+
954
+ hidden_states = layer_outputs[0]
955
+
956
+ if output_attentions:
957
+ all_attentions = all_attentions + (layer_outputs[1],)
958
+
959
+ if output_hidden_states:
960
+ encoder_states = encoder_states + (hidden_states,)
961
+
962
+ if not return_dict:
963
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
964
+ return BaseModelOutput(
965
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
966
+ )
967
+
968
+
969
+ SIGLIP2_VISION_INPUTS_DOCSTRING = r"""
970
+ Args:
971
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
972
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
973
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
974
+ output_attentions (`bool`, *optional*):
975
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
976
+ tensors for more detail.
977
+ output_hidden_states (`bool`, *optional*):
978
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
979
+ more detail.
980
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
981
+ Whether to interpolate the pre-trained position encodings.
982
+ return_dict (`bool`, *optional*):
983
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
984
+ """
985
+
986
+
987
+ class Siglip2VisionTransformer(nn.Module):
988
+ def __init__(self, config: Siglip2VisionConfig):
989
+ super().__init__()
990
+ self.config = config
991
+ embed_dim = config.hidden_size
992
+
993
+ self.embeddings = Siglip2VisionEmbeddings(config)
994
+ self.encoder = Siglip2Encoder(config)
995
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
996
+ self.use_head = False if not hasattr(config, "vision_use_head") else config.vision_use_head
997
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
998
+
999
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
1000
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
1001
+ def forward(
1002
+ self,
1003
+ pixel_values,
1004
+ attention_mask: torch.Tensor,
1005
+ spatial_shapes: torch.LongTensor,
1006
+ output_attentions: Optional[bool] = None,
1007
+ output_hidden_states: Optional[bool] = None,
1008
+ return_dict: Optional[bool] = None,
1009
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1010
+ r"""
1011
+ Returns:
1012
+
1013
+ """
1014
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1015
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1016
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1017
+
1018
+ hidden_states = self.embeddings(pixel_values, spatial_shapes)
1019
+ # import pdb; pdb.set_trace()
1020
+ if attention_mask is not None and not self._use_flash_attention_2:
1021
+ # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
1022
+ encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
1023
+ else:
1024
+ encoder_attention_mask = attention_mask
1025
+
1026
+ encoder_outputs = self.encoder(
1027
+ inputs_embeds=hidden_states,
1028
+ attention_mask=encoder_attention_mask,
1029
+ output_attentions=output_attentions,
1030
+ output_hidden_states=output_hidden_states,
1031
+ return_dict=return_dict,
1032
+ )
1033
+ last_hidden_state = encoder_outputs[0]
1034
+ last_hidden_state = self.post_layernorm(last_hidden_state)
1035
+
1036
+ pooled_output = self.head(last_hidden_state)
1037
+
1038
+ if not return_dict:
1039
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1040
+
1041
+ return BaseModelOutputWithPooling(
1042
+ last_hidden_state=last_hidden_state,
1043
+ pooler_output=pooled_output,
1044
+ hidden_states=encoder_outputs.hidden_states,
1045
+ attentions=encoder_outputs.attentions,
1046
+ )
1047
+
1048
+
1049
+ def _trunc_normal_(tensor, mean, std, a, b):
1050
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
1051
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
1052
+ def norm_cdf(x):
1053
+ # Computes standard normal cumulative distribution function
1054
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
1055
+
1056
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
1057
+ warnings.warn(
1058
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
1059
+ "The distribution of values may be incorrect.",
1060
+ stacklevel=2,
1061
+ )
1062
+
1063
+ # Values are generated by using a truncated uniform distribution and
1064
+ # then using the inverse CDF for the normal distribution.
1065
+ # Get upper and lower cdf values
1066
+ l = norm_cdf((a - mean) / std)
1067
+ u = norm_cdf((b - mean) / std)
1068
+
1069
+ # Uniformly fill tensor with values from [l, u], then translate to
1070
+ # [2l-1, 2u-1].
1071
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
1072
+
1073
+ # Use inverse cdf transform for normal distribution to get truncated
1074
+ # standard normal
1075
+ tensor.erfinv_()
1076
+
1077
+ # Transform to proper mean, std
1078
+ tensor.mul_(std * math.sqrt(2.0))
1079
+ tensor.add_(mean)
1080
+
1081
+ # Clamp to ensure it's in the proper range
1082
+ tensor.clamp_(min=a, max=b)
1083
+
1084
+
1085
+ def trunc_normal_tf_(
1086
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
1087
+ ) -> torch.Tensor:
1088
+ """Fills the input Tensor with values drawn from a truncated
1089
+ normal distribution. The values are effectively drawn from the
1090
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
1091
+ with values outside :math:`[a, b]` redrawn until they are within
1092
+ the bounds. The method used for generating the random values works
1093
+ best when :math:`a \\leq \text{mean} \\leq b`.
1094
+
1095
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
1096
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
1097
+ and the result is subsequently scaled and shifted by the mean and std args.
1098
+
1099
+ Args:
1100
+ tensor: an n-dimensional `torch.Tensor`
1101
+ mean: the mean of the normal distribution
1102
+ std: the standard deviation of the normal distribution
1103
+ a: the minimum cutoff value
1104
+ b: the maximum cutoff value
1105
+ """
1106
+ with torch.no_grad():
1107
+ _trunc_normal_(tensor, 0, 1.0, a, b)
1108
+ tensor.mul_(std).add_(mean)
1109
+
1110
+
1111
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
1112
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
1113
+ if mode == "fan_in":
1114
+ denom = fan_in
1115
+ elif mode == "fan_out":
1116
+ denom = fan_out
1117
+ elif mode == "fan_avg":
1118
+ denom = (fan_in + fan_out) / 2
1119
+
1120
+ variance = scale / denom
1121
+
1122
+ if distribution == "truncated_normal":
1123
+ # constant is stddev of standard normal truncated to (-2, 2)
1124
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
1125
+ elif distribution == "normal":
1126
+ with torch.no_grad():
1127
+ tensor.normal_(std=math.sqrt(variance))
1128
+ elif distribution == "uniform":
1129
+ bound = math.sqrt(3 * variance)
1130
+ with torch.no_grad():
1131
+ tensor.uniform_(-bound, bound)
1132
+ else:
1133
+ raise ValueError(f"invalid distribution {distribution}")
1134
+
1135
+
1136
+ def lecun_normal_(tensor):
1137
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
1138
+
1139
+
1140
+ def default_flax_embed_init(tensor):
1141
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
1142
+
1143
+
1144
+ class Siglip2PreTrainedModel(PreTrainedModel):
1145
+ """
1146
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1147
+ models.
1148
+ """
1149
+
1150
+ config_class = Siglip2Config
1151
+ base_model_prefix = "siglip2"
1152
+ supports_gradient_checkpointing = True
1153
+
1154
+ _no_split_modules = [
1155
+ "Siglip2TextEmbeddings",
1156
+ "Siglip2EncoderLayer",
1157
+ "Siglip2VisionEmbeddings",
1158
+ "Siglip2EncoderLayer",
1159
+ "Siglip2MultiheadAttentionPoolingHead",
1160
+ ]
1161
+ _supports_flash_attn_2 = True
1162
+ _supports_sdpa = True
1163
+
1164
+ def _init_weights(self, module):
1165
+ """Initialize the weights"""
1166
+ if isinstance(module, Siglip2VisionEmbeddings):
1167
+ width = self.config.hidden_size
1168
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
1169
+ elif isinstance(module, nn.Embedding):
1170
+ default_flax_embed_init(module.weight)
1171
+ elif isinstance(module, Siglip2Attention):
1172
+ nn.init.xavier_uniform_(module.q_proj.weight)
1173
+ nn.init.xavier_uniform_(module.k_proj.weight)
1174
+ nn.init.xavier_uniform_(module.v_proj.weight)
1175
+ nn.init.xavier_uniform_(module.out_proj.weight)
1176
+ nn.init.zeros_(module.q_proj.bias)
1177
+ nn.init.zeros_(module.k_proj.bias)
1178
+ nn.init.zeros_(module.v_proj.bias)
1179
+ nn.init.zeros_(module.out_proj.bias)
1180
+ elif isinstance(module, Siglip2MLP):
1181
+ nn.init.xavier_uniform_(module.fc1.weight)
1182
+ nn.init.xavier_uniform_(module.fc2.weight)
1183
+ nn.init.normal_(module.fc1.bias, std=1e-6)
1184
+ nn.init.normal_(module.fc2.bias, std=1e-6)
1185
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
1186
+ lecun_normal_(module.weight)
1187
+ if module.bias is not None:
1188
+ nn.init.zeros_(module.bias)
1189
+ elif isinstance(module, nn.LayerNorm):
1190
+ module.bias.data.zero_()
1191
+ module.weight.data.fill_(1.0)
1192
+
1193
+
1194
+ class Siglip2VisionModel(Siglip2PreTrainedModel):
1195
+ config_class = Siglip2VisionConfig
1196
+ main_input_name = "pixel_values"
1197
+
1198
+ def __init__(self, config: Siglip2VisionConfig):
1199
+ super().__init__(config)
1200
+
1201
+ self.vision_model = Siglip2VisionTransformer(config)
1202
+
1203
+ # Initialize weights and apply final processing
1204
+ self.post_init()
1205
+
1206
+ def get_input_embeddings(self) -> nn.Module:
1207
+ return self.vision_model.embeddings.patch_embedding
1208
+
1209
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
1210
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
1211
+ def forward(
1212
+ self,
1213
+ pixel_values: torch.FloatTensor,
1214
+ pixel_attention_mask: torch.Tensor,
1215
+ spatial_shapes: torch.LongTensor,
1216
+ output_attentions: Optional[bool] = None,
1217
+ output_hidden_states: Optional[bool] = None,
1218
+ return_dict: Optional[bool] = None,
1219
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1220
+ r"""
1221
+ Returns:
1222
+
1223
+ Examples:
1224
+
1225
+ ```python
1226
+ >>> from PIL import Image
1227
+ >>> import requests
1228
+ >>> from transformers import AutoProcessor, Siglip2VisionModel
1229
+
1230
+ >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
1231
+ >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
1232
+
1233
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1234
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1235
+
1236
+ >>> inputs = processor(images=image, return_tensors="pt")
1237
+
1238
+ >>> outputs = model(**inputs)
1239
+ >>> last_hidden_state = outputs.last_hidden_state
1240
+ >>> pooled_output = outputs.pooler_output # pooled features
1241
+ ```"""
1242
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1243
+
1244
+ return self.vision_model(
1245
+ pixel_values=pixel_values,
1246
+ attention_mask=pixel_attention_mask,
1247
+ spatial_shapes=spatial_shapes,
1248
+ output_attentions=output_attentions,
1249
+ output_hidden_states=output_hidden_states,
1250
+ return_dict=return_dict,
1251
+ )
1252
+
1253
+
1254
+ class SigLip2VisionTower(nn.Module):
1255
+ def __init__(self, vision_tower, vision_tower_cfg, delay_load=False):
1256
+ super().__init__()
1257
+
1258
+ self.is_loaded = False
1259
+
1260
+ self.config = Siglip2VisionConfig()
1261
+
1262
+ self.vision_tower_name = vision_tower
1263
+
1264
+ self.image_processor = SigLipImageProcessor()
1265
+
1266
+ if not delay_load:
1267
+ rank0_print(f"Loading vision tower: {vision_tower}")
1268
+ self.load_model()
1269
+ elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False):
1270
+ # TODO: better detector is needed.
1271
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
1272
+ self.load_model()
1273
+ elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts:
1274
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
1275
+ self.load_model()
1276
+ else:
1277
+ self.cfg_only = self.config
1278
+
1279
+ def load_model(self, device_map=None):
1280
+ if self.is_loaded:
1281
+ rank0_print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
1282
+ return
1283
+ # self.vision_tower = Siglip2VisionModel.from_pretrained(self.vision_tower_name, device_map=device_map, ignore_mismatched_sizes=True)
1284
+ self.vision_tower = Siglip2VisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
1285
+ # import pdb; pdb.set_trace()
1286
+ print('siglip2_naflex')
1287
+ self.vision_tower.vision_model.head = nn.Identity()
1288
+ self.vision_tower.requires_grad_(False)
1289
+ self.is_loaded = True
1290
+
1291
+ def forward(self, images, patch_sizes):
1292
+ if type(images) is list:
1293
+ # image_features = []
1294
+ # for image, spatial_shape in zip(images, patch_sizes):
1295
+ # spatial_shape = torch.as_tensor(spatial_shape)[None]
1296
+ # image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), spatial_shapes=spatial_shape, output_hidden_states=True)
1297
+ # # 根据崇屹经验,siglip2和siglip都是最后一层
1298
+ # image_feature = image_forward_out.last_hidden_state.to(image.dtype)
1299
+ # # hidden_states的长度为 ViT层数+1,因为patching后的embedding也有
1300
+ # # image_feature = image_forward_out.hidden_states[-2].to(image.dtype)
1301
+ # image_features.append(image_feature)
1302
+ pixel_values = []
1303
+ pixel_attention_masks = []
1304
+ spatial_shapes = []
1305
+ max_length = max([patch_size[0] * patch_size[1] for patch_size in patch_sizes])
1306
+ encoder_patch_size = self.vision_tower.vision_model.embeddings.patch_size
1307
+ for image, spatial_shape in zip(images, patch_sizes):
1308
+ valid_pixel_num = spatial_shape[0] * spatial_shape[1]
1309
+ spatial_shape = torch.as_tensor(spatial_shape)[None]
1310
+ image = image.to(device=self.device, dtype=self.dtype).unsqueeze(0)
1311
+ pixel_value = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=encoder_patch_size, p2=encoder_patch_size)
1312
+ # b, n, c
1313
+ padding_pixel = torch.zeros_like(pixel_value)[:, :1]
1314
+ pixel_value = torch.cat([pixel_value, padding_pixel.repeat(1, max_length - valid_pixel_num, 1)], dim=1)
1315
+ pixel_attention_mask = torch.zeros_like(pixel_value[:, :, 0])
1316
+ pixel_attention_mask[:, :valid_pixel_num] = 1
1317
+
1318
+ pixel_values.append(pixel_value)
1319
+ pixel_attention_masks.append(pixel_attention_mask)
1320
+ spatial_shapes.append(spatial_shape)
1321
+
1322
+ pixel_values = torch.cat(pixel_values)
1323
+ pixel_attention_masks = torch.cat(pixel_attention_masks)
1324
+ spatial_shapes = torch.cat(spatial_shapes)
1325
+
1326
+ image_forward_outs = self.vision_tower(pixel_values,
1327
+ pixel_attention_mask=pixel_attention_masks,
1328
+ spatial_shapes=spatial_shapes, output_hidden_states=True)
1329
+
1330
+ # import pdb; pdb.set_trace()
1331
+ image_features = image_forward_outs.last_hidden_state.to(pixel_values.dtype)
1332
+ image_features = image_features.split(1)
1333
+ # 应该为list
1334
+
1335
+ else: #torch.Size([1, 3, 384, 384])
1336
+ print('no support for paralla')
1337
+ exit()
1338
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype),spatial_shapes=patch_sizes, output_hidden_states=True)
1339
+ image_features = image_forward_outs.last_hidden_state.to(images.dtype)
1340
+ # image_features = image_forward_outs.hidden_states[-2].to(images.dtype)
1341
+
1342
+ return image_features
1343
+
1344
+ # def forward_uhd_v2(self, images, patch_sizes):
1345
+ # if type(images) is list:
1346
+ # image_features = []
1347
+ # for image in images:
1348
+ # image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), spatial_shapes=patch_sizes,output_hidden_states=True)
1349
+ # image_feature = image_forward_out.hidden_states[-1].to(image.dtype)
1350
+ # image_features.append(image_feature)
1351
+ # else:
1352
+ # image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), spatial_shapes=patch_sizes,output_hidden_states=True)
1353
+ # image_features = image_forward_outs.hidden_states[-1].to(images.dtype)
1354
+
1355
+ # return image_features
1356
+
1357
+ @property
1358
+ def dummy_feature(self):
1359
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
1360
+
1361
+ @property
1362
+ def dtype(self):
1363
+ for p in self.vision_tower.parameters():
1364
+ return p.dtype
1365
+
1366
+ @property
1367
+ def device(self):
1368
+ for p in self.vision_tower.parameters():
1369
+ return p.device
1370
+
1371
+ @property
1372
+ def hidden_size(self):
1373
+ return self.config.hidden_size
1374
+
1375
+ @property
1376
+ def num_patches(self):
1377
+ return (self.config.image_size // self.config.patch_size) ** 2
1378
+
1379
+ @property
1380
+ def num_patches_per_side(self):
1381
+ return self.config.image_size // self.config.patch_size
1382
+ # return self.model_config["vision_cfg"]["image_size"] // self.model_config["vision_cfg"]["patch_size"]
1383
+
1384
+ @property
1385
+ def image_size(self):
1386
+ return self.config.image_size
VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_siglip2_cjm.py ADDED
@@ -0,0 +1,1187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_siglip2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The HuggingFace Inc. team.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Any, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import torch
28
+ import torch.nn as nn
29
+ import torch.nn.functional as F
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+ from torch.nn.init import _calculate_fan_in_and_fan_out
32
+
33
+ from transformers.activations import ACT2FN
34
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
35
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
36
+ from transformers.modeling_utils import PreTrainedModel
37
+ from transformers.utils import (
38
+ ModelOutput,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ is_flash_attn_2_available,
42
+ is_flash_attn_greater_or_equal_2_10,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from transformers.configuration_utils import PretrainedConfig
47
+
48
+
49
+ class Siglip2TextConfig(PretrainedConfig):
50
+ r"""
51
+ This is the configuration class to store the configuration of a [`Siglip2TextModel`]. It is used to instantiate a
52
+ Siglip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
53
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip2
54
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
55
+
56
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
57
+ documentation from [`PretrainedConfig`] for more information.
58
+
59
+ Args:
60
+ vocab_size (`int`, *optional*, defaults to 32000):
61
+ Vocabulary size of the Siglip2 text model. Defines the number of different tokens that can be represented by
62
+ the `inputs_ids` passed when calling [`Siglip2Model`].
63
+ hidden_size (`int`, *optional*, defaults to 768):
64
+ Dimensionality of the encoder layers and the pooler layer.
65
+ intermediate_size (`int`, *optional*, defaults to 3072):
66
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
67
+ num_hidden_layers (`int`, *optional*, defaults to 12):
68
+ Number of hidden layers in the Transformer encoder.
69
+ num_attention_heads (`int`, *optional*, defaults to 12):
70
+ Number of attention heads for each attention layer in the Transformer encoder.
71
+ max_position_embeddings (`int`, *optional*, defaults to 64):
72
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
73
+ just in case (e.g., 512 or 1024 or 2048).
74
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
75
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
76
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
77
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
78
+ The epsilon used by the layer normalization layers.
79
+ attention_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for the attention probabilities.
81
+ pad_token_id (`int`, *optional*, defaults to 1):
82
+ The id of the padding token in the vocabulary.
83
+ bos_token_id (`int`, *optional*, defaults to 49406):
84
+ The id of the beginning-of-sequence token in the vocabulary.
85
+ eos_token_id (`int`, *optional*, defaults to 49407):
86
+ The id of the end-of-sequence token in the vocabulary.
87
+ projection_size (`int`, *optional*, defaults to `hidden_size`):
88
+ The size of the projection head.
89
+
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import Siglip2TextConfig, Siglip2TextModel
94
+
95
+ >>> # Initializing a Siglip2TextConfig with google/siglip2-base-patch16-224 style configuration
96
+ >>> configuration = Siglip2TextConfig()
97
+
98
+ >>> # Initializing a Siglip2TextModel (with random weights) from the google/siglip2-base-patch16-224 style configuration
99
+ >>> model = Siglip2TextModel(configuration)
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config
103
+ ```"""
104
+
105
+ model_type = "siglip2_text_model"
106
+ base_config_key = "text_config"
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=32000,
111
+ hidden_size=768,
112
+ intermediate_size=3072,
113
+ num_hidden_layers=12,
114
+ num_attention_heads=12,
115
+ max_position_embeddings=64,
116
+ hidden_act="gelu_pytorch_tanh",
117
+ layer_norm_eps=1e-6,
118
+ attention_dropout=0.0,
119
+ # This differs from `CLIPTokenizer`'s default and from openai/siglip2
120
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
121
+ pad_token_id=1,
122
+ bos_token_id=49406,
123
+ eos_token_id=49407,
124
+ projection_size=None,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
128
+
129
+ self.vocab_size = vocab_size
130
+ self.hidden_size = hidden_size
131
+ self.intermediate_size = intermediate_size
132
+ self.num_hidden_layers = num_hidden_layers
133
+ self.num_attention_heads = num_attention_heads
134
+ self.max_position_embeddings = max_position_embeddings
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.hidden_act = hidden_act
137
+ self.attention_dropout = attention_dropout
138
+ self.projection_size = projection_size if projection_size is not None else hidden_size
139
+
140
+
141
+ class Siglip2VisionConfig(PretrainedConfig):
142
+ r"""
143
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
144
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
145
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
146
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
147
+
148
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
149
+ documentation from [`PretrainedConfig`] for more information.
150
+
151
+ Args:
152
+ hidden_size (`int`, *optional*, defaults to 768):
153
+ Dimensionality of the encoder layers and the pooler layer.
154
+ intermediate_size (`int`, *optional*, defaults to 3072):
155
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
156
+ num_hidden_layers (`int`, *optional*, defaults to 12):
157
+ Number of hidden layers in the Transformer encoder.
158
+ num_attention_heads (`int`, *optional*, defaults to 12):
159
+ Number of attention heads for each attention layer in the Transformer encoder.
160
+ num_channels (`int`, *optional*, defaults to 3):
161
+ Number of channels in the input images.
162
+ num_patches (`int`, *optional*, defaults to 256):
163
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
164
+ The image is resized to fill maximum of this number of patches, and to preserve
165
+ the aspect ratio. In case the resulted number of patches is lower, the image is
166
+ padded in "patch" dimension.
167
+ patch_size (`int`, *optional*, defaults to 16):
168
+ The size (resolution) of each patch.
169
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
170
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
171
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
172
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
173
+ The epsilon used by the layer normalization layers.
174
+ attention_dropout (`float`, *optional*, defaults to 0.0):
175
+ The dropout ratio for the attention probabilities.
176
+
177
+ Example:
178
+
179
+ ```python
180
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
181
+
182
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
183
+ >>> configuration = Siglip2VisionConfig()
184
+
185
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
186
+ >>> model = Siglip2VisionModel(configuration)
187
+
188
+ >>> # Accessing the model configuration
189
+ >>> configuration = model.config
190
+ ```"""
191
+
192
+ model_type = "siglip2_vision_model"
193
+ base_config_key = "vision_config"
194
+
195
+ def __init__(
196
+ self,
197
+ hidden_size=768,
198
+ intermediate_size=3072,
199
+ num_hidden_layers=12,
200
+ num_attention_heads=12,
201
+ num_channels=3,
202
+ num_patches=256,
203
+ patch_size=16,
204
+ hidden_act="gelu_pytorch_tanh",
205
+ layer_norm_eps=1e-6,
206
+ attention_dropout=0.0,
207
+ **kwargs,
208
+ ):
209
+ super().__init__(**kwargs)
210
+
211
+ self.hidden_size = hidden_size
212
+ self.intermediate_size = intermediate_size
213
+ self.num_hidden_layers = num_hidden_layers
214
+ self.num_attention_heads = num_attention_heads
215
+ self.num_channels = num_channels
216
+ self.patch_size = patch_size
217
+ self.attention_dropout = attention_dropout
218
+ self.layer_norm_eps = layer_norm_eps
219
+ self.hidden_act = hidden_act
220
+ self.num_patches = num_patches
221
+
222
+
223
+ class Siglip2Config(PretrainedConfig):
224
+ r"""
225
+ [`Siglip2Config`] is the configuration class to store the configuration of a [`Siglip2Model`]. It is used to
226
+ instantiate a Siglip2 model according to the specified arguments, defining the text model and vision model configs.
227
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip2
228
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
229
+
230
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
231
+ documentation from [`PretrainedConfig`] for more information.
232
+
233
+ Args:
234
+ text_config (`dict`, *optional*):
235
+ Dictionary of configuration options used to initialize [`Siglip2TextConfig`].
236
+ vision_config (`dict`, *optional*):
237
+ Dictionary of configuration options used to initialize [`Siglip2VisionConfig`].
238
+ kwargs (*optional*):
239
+ Dictionary of keyword arguments.
240
+
241
+ Example:
242
+
243
+ ```python
244
+ >>> from transformers import Siglip2Config, Siglip2Model
245
+
246
+ >>> # Initializing a Siglip2Config with google/siglip2-base-patch16-224 style configuration
247
+ >>> configuration = Siglip2Config()
248
+
249
+ >>> # Initializing a Siglip2Model (with random weights) from the google/siglip2-base-patch16-224 style configuration
250
+ >>> model = Siglip2Model(configuration)
251
+
252
+ >>> # Accessing the model configuration
253
+ >>> configuration = model.config
254
+
255
+ >>> # We can also initialize a Siglip2Config from a Siglip2TextConfig and a Siglip2VisionConfig
256
+ >>> from transformers import Siglip2TextConfig, Siglip2VisionConfig
257
+
258
+ >>> # Initializing a Siglip2Text and Siglip2Vision configuration
259
+ >>> config_text = Siglip2TextConfig()
260
+ >>> config_vision = Siglip2VisionConfig()
261
+
262
+ >>> config = Siglip2Config.from_text_vision_configs(config_text, config_vision)
263
+ ```"""
264
+
265
+ model_type = "siglip2"
266
+ sub_configs = {"text_config": Siglip2TextConfig, "vision_config": Siglip2VisionConfig}
267
+
268
+ def __init__(self, text_config=None, vision_config=None, **kwargs):
269
+ super().__init__(**kwargs)
270
+
271
+ if text_config is None:
272
+ text_config = {}
273
+ logger.info("`text_config` is `None`. Initializing the `Siglip2TextConfig` with default values.")
274
+
275
+ if vision_config is None:
276
+ vision_config = {}
277
+ logger.info("`vision_config` is `None`. initializing the `Siglip2VisionConfig` with default values.")
278
+
279
+ self.text_config = Siglip2TextConfig(**text_config)
280
+ self.vision_config = Siglip2VisionConfig(**vision_config)
281
+
282
+ self.initializer_factor = 1.0
283
+
284
+ @classmethod
285
+ def from_text_vision_configs(cls, text_config: Siglip2TextConfig, vision_config: Siglip2VisionConfig, **kwargs):
286
+ r"""
287
+ Instantiate a [`Siglip2Config`] (or a derived class) from siglip2 text model configuration and siglip2 vision
288
+ model configuration.
289
+
290
+ Returns:
291
+ [`Siglip2Config`]: An instance of a configuration object
292
+ """
293
+
294
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
295
+ r"""
296
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
297
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
298
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
299
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
300
+
301
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
302
+ documentation from [`PretrainedConfig`] for more information.
303
+
304
+ Args:
305
+ hidden_size (`int`, *optional*, defaults to 768):
306
+ Dimensionality of the encoder layers and the pooler layer.
307
+ intermediate_size (`int`, *optional*, defaults to 3072):
308
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
309
+ num_hidden_layers (`int`, *optional*, defaults to 12):
310
+ Number of hidden layers in the Transformer encoder.
311
+ num_attention_heads (`int`, *optional*, defaults to 12):
312
+ Number of attention heads for each attention layer in the Transformer encoder.
313
+ num_channels (`int`, *optional*, defaults to 3):
314
+ Number of channels in the input images.
315
+ num_patches (`int`, *optional*, defaults to 256):
316
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
317
+ The image is resized to fill maximum of this number of patches, and to preserve
318
+ the aspect ratio. In case the resulted number of patches is lower, the image is
319
+ padded in "patch" dimension.
320
+ patch_size (`int`, *optional*, defaults to 16):
321
+ The size (resolution) of each patch.
322
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
323
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
324
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
325
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
326
+ The epsilon used by the layer normalization layers.
327
+ attention_dropout (`float`, *optional*, defaults to 0.0):
328
+ The dropout ratio for the attention probabilities.
329
+
330
+ Example:
331
+
332
+ ```python
333
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
334
+
335
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
336
+ >>> configuration = Siglip2VisionConfig()
337
+
338
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
339
+ >>> model = Siglip2VisionModel(configuration)
340
+
341
+ >>> # Accessing the model configuration
342
+ >>> configuration = model.config
343
+ ```"""
344
+
345
+ model_type = "siglip2_vision_model"
346
+ base_config_key = "vision_config"
347
+
348
+ def __init__(
349
+ self,
350
+ hidden_size=768,
351
+ intermediate_size=3072,
352
+ num_hidden_layers=12,
353
+ num_attention_heads=12,
354
+ num_channels=3,
355
+ num_patches=256,
356
+ patch_size=16,
357
+ hidden_act="gelu_pytorch_tanh",
358
+ layer_norm_eps=1e-6,
359
+ attention_dropout=0.0,
360
+ **kwargs,
361
+ ):
362
+ super().__init__(**kwargs)
363
+
364
+ self.hidden_size = hidden_size
365
+ self.intermediate_size = intermediate_size
366
+ self.num_hidden_layers = num_hidden_layers
367
+ self.num_attention_heads = num_attention_heads
368
+ self.num_channels = num_channels
369
+ self.patch_size = patch_size
370
+ self.attention_dropout = attention_dropout
371
+ self.layer_norm_eps = layer_norm_eps
372
+ self.hidden_act = hidden_act
373
+ self.num_patches = num_patches
374
+
375
+
376
+ if is_flash_attn_2_available():
377
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
378
+
379
+
380
+ logger = logging.get_logger(__name__)
381
+
382
+ # General docstring
383
+ _CONFIG_FOR_DOC = "Siglip2VisionConfig"
384
+
385
+
386
+ @dataclass
387
+ class Siglip2VisionOutput(ModelOutput):
388
+ """
389
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
390
+
391
+ Args:
392
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
393
+ The image embeddings obtained by applying the projection layer to the pooler_output.
394
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
395
+ Sequence of hidden-states at the output of the last layer of the model.
396
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
397
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
398
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
399
+
400
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
401
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
402
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
403
+ sequence_length)`.
404
+
405
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
406
+ heads.
407
+ """
408
+
409
+ image_embeds: Optional[torch.FloatTensor] = None
410
+ last_hidden_state: torch.FloatTensor = None
411
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
412
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
413
+
414
+
415
+ class Siglip2VisionEmbeddings(nn.Module):
416
+ def __init__(self, config: Siglip2VisionConfig):
417
+ super().__init__()
418
+ self.config = config
419
+ self.embed_dim = config.hidden_size
420
+ self.patch_size = config.patch_size
421
+
422
+ self.patch_embedding = nn.Linear(
423
+ in_features=config.num_channels * self.patch_size * self.patch_size,
424
+ out_features=self.embed_dim,
425
+ )
426
+
427
+ self.num_patches = config.num_patches
428
+ self.position_embedding_size = int(self.num_patches**0.5)
429
+ self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
430
+
431
+ @staticmethod
432
+ def resize_positional_embeddings(
433
+ positional_embeddings: torch.Tensor,
434
+ spatial_shapes: torch.LongTensor,
435
+ max_length: int,
436
+ ) -> torch.Tensor:
437
+ """
438
+ Resize positional embeddings to image-specific size and pad to a fixed size.
439
+
440
+ Args:
441
+ positional_embeddings (`torch.Tensor`):
442
+ Position embeddings of shape (height, width, embed_dim)
443
+ spatial_shapes (`torch.LongTensor`):
444
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
445
+ max_length (`int`):
446
+ Maximum length of the positional embeddings to pad resized positional embeddings to
447
+
448
+ Returns:
449
+ `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
450
+ """
451
+ batch_size = spatial_shapes.shape[0]
452
+ embed_dim = positional_embeddings.shape[-1]
453
+ source_dtype = positional_embeddings.dtype
454
+
455
+ resulted_positional_embeddings = torch.empty(
456
+ (batch_size, max_length, embed_dim),
457
+ device=positional_embeddings.device,
458
+ dtype=source_dtype,
459
+ )
460
+
461
+ # (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
462
+ positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
463
+
464
+ # Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
465
+ if positional_embeddings.device.type == "cpu":
466
+ positional_embeddings = positional_embeddings.to(torch.float32)
467
+
468
+ for i in range(batch_size):
469
+ # (1, dim, height, width) -> (1, dim, target_height, target_width)
470
+ height, width = spatial_shapes[i]
471
+ resized_embeddings = F.interpolate(
472
+ positional_embeddings,
473
+ size=(height, width),
474
+ mode="bilinear",
475
+ align_corners=False,
476
+ antialias=True,
477
+ )
478
+
479
+ # (1, dim, target_height, target_width) -> (target_height * target_width, dim)
480
+ resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
481
+
482
+ # Cast to original dtype
483
+ resized_embeddings = resized_embeddings.to(source_dtype)
484
+
485
+ resulted_positional_embeddings[i, : height * width] = resized_embeddings
486
+ resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
487
+
488
+ return resulted_positional_embeddings
489
+
490
+ def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:
491
+ """
492
+ Args:
493
+ pixel_values (`torch.FloatTensor`):
494
+ Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
495
+ spatial_shapes (`List[Tuple[int, int]]`):
496
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
497
+ """
498
+
499
+ # Apply patch embeddings to already patchified pixel values
500
+ target_dtype = self.patch_embedding.weight.dtype
501
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
502
+
503
+ # Get positional resized and padded positional embeddings
504
+ positional_embeddings = self.position_embedding.weight.reshape(
505
+ self.position_embedding_size, self.position_embedding_size, -1
506
+ )
507
+ resized_positional_embeddings = self.resize_positional_embeddings(
508
+ positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]
509
+ )
510
+
511
+ # Add positional embeddings to patch embeddings
512
+ embeddings = patch_embeds + resized_positional_embeddings
513
+ return embeddings
514
+
515
+
516
+ class Siglip2Attention(nn.Module):
517
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
518
+
519
+ def __init__(self, config):
520
+ super().__init__()
521
+ self.config = config
522
+ self.embed_dim = config.hidden_size
523
+ self.num_heads = config.num_attention_heads
524
+ self.head_dim = self.embed_dim // self.num_heads
525
+ if self.head_dim * self.num_heads != self.embed_dim:
526
+ raise ValueError(
527
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
528
+ f" {self.num_heads})."
529
+ )
530
+ self.scale = self.head_dim**-0.5
531
+ self.dropout = config.attention_dropout
532
+
533
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
534
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
535
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
536
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
537
+
538
+ def forward(
539
+ self,
540
+ hidden_states: torch.Tensor,
541
+ attention_mask: Optional[torch.Tensor] = None,
542
+ output_attentions: Optional[bool] = False,
543
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
544
+ """Input shape: Batch x Time x Channel"""
545
+
546
+ batch_size, q_len, _ = hidden_states.size()
547
+
548
+ query_states = self.q_proj(hidden_states)
549
+ key_states = self.k_proj(hidden_states)
550
+ value_states = self.v_proj(hidden_states)
551
+
552
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
553
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
554
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
555
+
556
+ k_v_seq_len = key_states.shape[-2]
557
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
558
+
559
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
560
+ raise ValueError(
561
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
562
+ f" {attn_weights.size()}"
563
+ )
564
+
565
+ if attention_mask is not None:
566
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
567
+ raise ValueError(
568
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
569
+ )
570
+ attn_weights = attn_weights + attention_mask
571
+
572
+ # upcast attention to fp32
573
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
574
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
575
+ attn_output = torch.matmul(attn_weights, value_states)
576
+
577
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
578
+ raise ValueError(
579
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
580
+ f" {attn_output.size()}"
581
+ )
582
+
583
+ attn_output = attn_output.transpose(1, 2).contiguous()
584
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
585
+
586
+ attn_output = self.out_proj(attn_output)
587
+
588
+ return attn_output, attn_weights
589
+
590
+
591
+ class Siglip2FlashAttention2(Siglip2Attention):
592
+ """
593
+ Siglip2Attention flash attention module. This module inherits from `Siglip2Attention` as the weights of the module stays
594
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
595
+ flash attention and deal with padding tokens in case the input contains any of them.
596
+ """
597
+
598
+ is_causal = False
599
+
600
+ def __init__(self, *args, **kwargs):
601
+ super().__init__(*args, **kwargs)
602
+
603
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
604
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
605
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
606
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
607
+
608
+ # Adapted from transformers.models.llama.modeling_llama.LlamaFlashAttention2.forward
609
+ def forward(
610
+ self,
611
+ hidden_states: torch.Tensor,
612
+ attention_mask: Optional[torch.LongTensor] = None,
613
+ output_attentions: bool = False,
614
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
615
+ output_attentions = False
616
+
617
+ batch_size, q_len, _ = hidden_states.size()
618
+
619
+ query_states = self.q_proj(hidden_states)
620
+ key_states = self.k_proj(hidden_states)
621
+ value_states = self.v_proj(hidden_states)
622
+
623
+ # Flash attention requires the input to have the shape
624
+ # batch_size x seq_length x head_dim x hidden_dim
625
+ # therefore we just need to keep the original shape
626
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim)
627
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim)
628
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim)
629
+
630
+ dropout_rate = self.dropout if self.training else 0.0
631
+
632
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
633
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
634
+ # cast them back in the correct dtype just to be sure everything works as expected.
635
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
636
+ # in fp32.
637
+
638
+ input_dtype = query_states.dtype
639
+ if input_dtype == torch.float32:
640
+ if torch.is_autocast_enabled():
641
+ target_dtype = torch.get_autocast_gpu_dtype()
642
+ # Handle the case where the model is quantized
643
+ elif hasattr(self.config, "_pre_quantization_dtype"):
644
+ target_dtype = self.config._pre_quantization_dtype
645
+ else:
646
+ target_dtype = self.q_proj.weight.dtype
647
+
648
+ logger.warning_once(
649
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
650
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
651
+ f" {target_dtype}."
652
+ )
653
+
654
+ query_states = query_states.to(target_dtype)
655
+ key_states = key_states.to(target_dtype)
656
+ value_states = value_states.to(target_dtype)
657
+
658
+ attn_output = _flash_attention_forward(
659
+ query_states,
660
+ key_states,
661
+ value_states,
662
+ attention_mask,
663
+ q_len,
664
+ dropout=dropout_rate,
665
+ is_causal=self.is_causal,
666
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
667
+ )
668
+
669
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim).contiguous()
670
+ attn_output = self.out_proj(attn_output)
671
+
672
+ if not output_attentions:
673
+ attn_weights = None
674
+
675
+ return attn_output, attn_weights
676
+
677
+
678
+ class Siglip2SdpaAttention(Siglip2Attention):
679
+ """
680
+ Siglip2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
681
+ `Siglip2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
682
+ SDPA API.
683
+ """
684
+
685
+ is_causal = False
686
+
687
+ # Adapted from Siglip2Attention.forward and transformers.models.llama.modeling_llama.LlamaSdpaAttention.forward
688
+ def forward(
689
+ self,
690
+ hidden_states: torch.Tensor,
691
+ attention_mask: Optional[torch.Tensor] = None,
692
+ output_attentions: Optional[bool] = False,
693
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
694
+ if output_attentions:
695
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
696
+ logger.warning_once(
697
+ "Siglip2Model is using Siglip2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
698
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
699
+ )
700
+ return super().forward(
701
+ hidden_states=hidden_states,
702
+ attention_mask=attention_mask,
703
+ output_attentions=output_attentions,
704
+ )
705
+
706
+ batch_size, q_len, _ = hidden_states.size()
707
+
708
+ query_states = self.q_proj(hidden_states)
709
+ key_states = self.k_proj(hidden_states)
710
+ value_states = self.v_proj(hidden_states)
711
+
712
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
713
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
714
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
715
+
716
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
717
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
718
+ if query_states.device.type == "cuda" and attention_mask is not None:
719
+ query_states = query_states.contiguous()
720
+ key_states = key_states.contiguous()
721
+ value_states = value_states.contiguous()
722
+
723
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
724
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
725
+ is_causal = True if self.is_causal and q_len > 1 else False
726
+
727
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
728
+ query_states,
729
+ key_states,
730
+ value_states,
731
+ attn_mask=attention_mask,
732
+ dropout_p=self.dropout if self.training else 0.0,
733
+ is_causal=is_causal,
734
+ )
735
+
736
+ attn_output = attn_output.transpose(1, 2).contiguous()
737
+ attn_output = attn_output.view(batch_size, q_len, self.embed_dim)
738
+
739
+ attn_output = self.out_proj(attn_output)
740
+
741
+ return attn_output, None
742
+
743
+
744
+ class Siglip2MLP(nn.Module):
745
+ def __init__(self, config):
746
+ super().__init__()
747
+ self.config = config
748
+ self.activation_fn = ACT2FN[config.hidden_act]
749
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
750
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
751
+
752
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
753
+ hidden_states = self.fc1(hidden_states)
754
+ hidden_states = self.activation_fn(hidden_states)
755
+ hidden_states = self.fc2(hidden_states)
756
+ return hidden_states
757
+
758
+
759
+ SIGLIP2_ATTENTION_CLASSES = {
760
+ "eager": Siglip2Attention,
761
+ "flash_attention_2": Siglip2FlashAttention2,
762
+ "sdpa": Siglip2SdpaAttention,
763
+ }
764
+
765
+
766
+ class Siglip2EncoderLayer(nn.Module):
767
+ def __init__(self, config: Siglip2Config):
768
+ super().__init__()
769
+ self.embed_dim = config.hidden_size
770
+ self.self_attn = SIGLIP2_ATTENTION_CLASSES[config._attn_implementation](config=config)
771
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
772
+ self.mlp = Siglip2MLP(config)
773
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
774
+
775
+ # Ignore copy
776
+ def forward(
777
+ self,
778
+ hidden_states: torch.Tensor,
779
+ attention_mask: torch.Tensor,
780
+ output_attentions: Optional[bool] = False,
781
+ ) -> Tuple[torch.FloatTensor]:
782
+ """
783
+ Args:
784
+ hidden_states (`torch.FloatTensor`):
785
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
786
+ attention_mask (`torch.FloatTensor`):
787
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
788
+ output_attentions (`bool`, *optional*, defaults to `False`):
789
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
790
+ returned tensors for more detail.
791
+ """
792
+ residual = hidden_states
793
+
794
+ hidden_states = self.layer_norm1(hidden_states)
795
+ hidden_states, attn_weights = self.self_attn(
796
+ hidden_states=hidden_states,
797
+ attention_mask=attention_mask,
798
+ output_attentions=output_attentions,
799
+ )
800
+ hidden_states = residual + hidden_states
801
+
802
+ residual = hidden_states
803
+ hidden_states = self.layer_norm2(hidden_states)
804
+ hidden_states = self.mlp(hidden_states)
805
+ hidden_states = residual + hidden_states
806
+
807
+ outputs = (hidden_states,)
808
+
809
+ if output_attentions:
810
+ outputs += (attn_weights,)
811
+
812
+ return outputs
813
+
814
+
815
+ class Siglip2Encoder(nn.Module):
816
+ """
817
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
818
+ [`Siglip2EncoderLayer`].
819
+
820
+ Args:
821
+ config: Siglip2Config
822
+ """
823
+
824
+ def __init__(self, config: Siglip2Config):
825
+ super().__init__()
826
+ self.config = config
827
+ self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
828
+ self.gradient_checkpointing = False
829
+
830
+ # Ignore copy
831
+ def forward(
832
+ self,
833
+ inputs_embeds,
834
+ attention_mask: Optional[torch.Tensor] = None,
835
+ output_attentions: Optional[bool] = None,
836
+ output_hidden_states: Optional[bool] = None,
837
+ return_dict: Optional[bool] = None,
838
+ ) -> Union[Tuple, BaseModelOutput]:
839
+ r"""
840
+ Args:
841
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
842
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
843
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
844
+ than the model's internal embedding lookup matrix.
845
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
846
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
847
+
848
+ - 1 for tokens that are **not masked**,
849
+ - 0 for tokens that are **masked**.
850
+
851
+ [What are attention masks?](../glossary#attention-mask)
852
+ output_attentions (`bool`, *optional*):
853
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
854
+ returned tensors for more detail.
855
+ output_hidden_states (`bool`, *optional*):
856
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
857
+ for more detail.
858
+ return_dict (`bool`, *optional*):
859
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
860
+ """
861
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
862
+ output_hidden_states = (
863
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
864
+ )
865
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
866
+
867
+ encoder_states = () if output_hidden_states else None
868
+ all_attentions = () if output_attentions else None
869
+
870
+ hidden_states = inputs_embeds
871
+ for encoder_layer in self.layers:
872
+ if output_hidden_states:
873
+ encoder_states = encoder_states + (hidden_states,)
874
+ if self.gradient_checkpointing and self.training:
875
+ layer_outputs = self._gradient_checkpointing_func(
876
+ encoder_layer.__call__,
877
+ hidden_states,
878
+ attention_mask,
879
+ output_attentions,
880
+ )
881
+ else:
882
+ layer_outputs = encoder_layer(
883
+ hidden_states,
884
+ attention_mask,
885
+ output_attentions=output_attentions,
886
+ )
887
+
888
+ hidden_states = layer_outputs[0]
889
+
890
+ if output_attentions:
891
+ all_attentions = all_attentions + (layer_outputs[1],)
892
+
893
+ if output_hidden_states:
894
+ encoder_states = encoder_states + (hidden_states,)
895
+
896
+ if not return_dict:
897
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
898
+ return BaseModelOutput(
899
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
900
+ )
901
+
902
+
903
+ SIGLIP2_VISION_INPUTS_DOCSTRING = r"""
904
+ Args:
905
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
906
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
907
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
908
+ output_attentions (`bool`, *optional*):
909
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
910
+ tensors for more detail.
911
+ output_hidden_states (`bool`, *optional*):
912
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
913
+ more detail.
914
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
915
+ Whether to interpolate the pre-trained position encodings.
916
+ return_dict (`bool`, *optional*):
917
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
918
+ """
919
+
920
+
921
+ class Siglip2VisionTransformer(nn.Module):
922
+ def __init__(self, config: Siglip2VisionConfig):
923
+ super().__init__()
924
+ self.config = config
925
+ embed_dim = config.hidden_size
926
+
927
+ self.embeddings = Siglip2VisionEmbeddings(config)
928
+ self.encoder = Siglip2Encoder(config)
929
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
930
+ self.use_head = False if not hasattr(config, "vision_use_head") else config.vision_use_head
931
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
932
+
933
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
934
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
935
+ def forward(
936
+ self,
937
+ pixel_values: torch.FloatTensor,
938
+ attention_mask: torch.Tensor,
939
+ spatial_shapes: torch.LongTensor,
940
+ output_attentions: Optional[bool] = None,
941
+ output_hidden_states: Optional[bool] = None,
942
+ return_dict: Optional[bool] = None,
943
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
944
+ r"""
945
+ Returns:
946
+
947
+ """
948
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
949
+ output_hidden_states = (
950
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
951
+ )
952
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
953
+
954
+ hidden_states = self.embeddings(pixel_values, spatial_shapes)
955
+
956
+ if attention_mask is not None and not self._use_flash_attention_2:
957
+ # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
958
+ encoder_attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
959
+ else:
960
+ encoder_attention_mask = attention_mask
961
+
962
+ encoder_outputs = self.encoder(
963
+ inputs_embeds=hidden_states,
964
+ attention_mask=encoder_attention_mask,
965
+ output_attentions=output_attentions,
966
+ output_hidden_states=output_hidden_states,
967
+ return_dict=return_dict,
968
+ )
969
+
970
+ last_hidden_state = encoder_outputs[0]
971
+ last_hidden_state = self.post_layernorm(last_hidden_state)
972
+
973
+ pooler_output = self.head(last_hidden_state, attention_mask) if self.use_head else None
974
+ if not return_dict:
975
+ return (last_hidden_state, pooler_output) + encoder_outputs[1:]
976
+
977
+ return BaseModelOutputWithPooling(
978
+ last_hidden_state=last_hidden_state,
979
+ pooler_output=pooler_output,
980
+ hidden_states=encoder_outputs.hidden_states,
981
+ attentions=encoder_outputs.attentions,
982
+ )
983
+
984
+
985
+ def _trunc_normal_(tensor, mean, std, a, b):
986
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
987
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
988
+ def norm_cdf(x):
989
+ # Computes standard normal cumulative distribution function
990
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
991
+
992
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
993
+ warnings.warn(
994
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
995
+ "The distribution of values may be incorrect.",
996
+ stacklevel=2,
997
+ )
998
+
999
+ # Values are generated by using a truncated uniform distribution and
1000
+ # then using the inverse CDF for the normal distribution.
1001
+ # Get upper and lower cdf values
1002
+ l = norm_cdf((a - mean) / std)
1003
+ u = norm_cdf((b - mean) / std)
1004
+
1005
+ # Uniformly fill tensor with values from [l, u], then translate to
1006
+ # [2l-1, 2u-1].
1007
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
1008
+
1009
+ # Use inverse cdf transform for normal distribution to get truncated
1010
+ # standard normal
1011
+ tensor.erfinv_()
1012
+
1013
+ # Transform to proper mean, std
1014
+ tensor.mul_(std * math.sqrt(2.0))
1015
+ tensor.add_(mean)
1016
+
1017
+ # Clamp to ensure it's in the proper range
1018
+ tensor.clamp_(min=a, max=b)
1019
+
1020
+
1021
+ def trunc_normal_tf_(
1022
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
1023
+ ) -> torch.Tensor:
1024
+ """Fills the input Tensor with values drawn from a truncated
1025
+ normal distribution. The values are effectively drawn from the
1026
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
1027
+ with values outside :math:`[a, b]` redrawn until they are within
1028
+ the bounds. The method used for generating the random values works
1029
+ best when :math:`a \\leq \text{mean} \\leq b`.
1030
+
1031
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
1032
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
1033
+ and the result is subsequently scaled and shifted by the mean and std args.
1034
+
1035
+ Args:
1036
+ tensor: an n-dimensional `torch.Tensor`
1037
+ mean: the mean of the normal distribution
1038
+ std: the standard deviation of the normal distribution
1039
+ a: the minimum cutoff value
1040
+ b: the maximum cutoff value
1041
+ """
1042
+ with torch.no_grad():
1043
+ _trunc_normal_(tensor, 0, 1.0, a, b)
1044
+ tensor.mul_(std).add_(mean)
1045
+
1046
+
1047
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
1048
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
1049
+ if mode == "fan_in":
1050
+ denom = fan_in
1051
+ elif mode == "fan_out":
1052
+ denom = fan_out
1053
+ elif mode == "fan_avg":
1054
+ denom = (fan_in + fan_out) / 2
1055
+
1056
+ variance = scale / denom
1057
+
1058
+ if distribution == "truncated_normal":
1059
+ # constant is stddev of standard normal truncated to (-2, 2)
1060
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
1061
+ elif distribution == "normal":
1062
+ with torch.no_grad():
1063
+ tensor.normal_(std=math.sqrt(variance))
1064
+ elif distribution == "uniform":
1065
+ bound = math.sqrt(3 * variance)
1066
+ with torch.no_grad():
1067
+ tensor.uniform_(-bound, bound)
1068
+ else:
1069
+ raise ValueError(f"invalid distribution {distribution}")
1070
+
1071
+
1072
+ def lecun_normal_(tensor):
1073
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
1074
+
1075
+
1076
+ def default_flax_embed_init(tensor):
1077
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
1078
+
1079
+
1080
+ class Siglip2PreTrainedModel(PreTrainedModel):
1081
+ """
1082
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1083
+ models.
1084
+ """
1085
+
1086
+ config_class = Siglip2Config
1087
+ base_model_prefix = "siglip2"
1088
+ supports_gradient_checkpointing = True
1089
+
1090
+ _no_split_modules = [
1091
+ "Siglip2TextEmbeddings",
1092
+ "Siglip2EncoderLayer",
1093
+ "Siglip2VisionEmbeddings",
1094
+ "Siglip2EncoderLayer",
1095
+ "Siglip2MultiheadAttentionPoolingHead",
1096
+ ]
1097
+ _supports_flash_attn_2 = True
1098
+ _supports_sdpa = True
1099
+
1100
+ def _init_weights(self, module):
1101
+ """Initialize the weights"""
1102
+ if isinstance(module, Siglip2VisionEmbeddings):
1103
+ width = self.config.hidden_size
1104
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
1105
+ elif isinstance(module, nn.Embedding):
1106
+ default_flax_embed_init(module.weight)
1107
+ elif isinstance(module, Siglip2Attention):
1108
+ nn.init.xavier_uniform_(module.q_proj.weight)
1109
+ nn.init.xavier_uniform_(module.k_proj.weight)
1110
+ nn.init.xavier_uniform_(module.v_proj.weight)
1111
+ nn.init.xavier_uniform_(module.out_proj.weight)
1112
+ nn.init.zeros_(module.q_proj.bias)
1113
+ nn.init.zeros_(module.k_proj.bias)
1114
+ nn.init.zeros_(module.v_proj.bias)
1115
+ nn.init.zeros_(module.out_proj.bias)
1116
+ elif isinstance(module, Siglip2MLP):
1117
+ nn.init.xavier_uniform_(module.fc1.weight)
1118
+ nn.init.xavier_uniform_(module.fc2.weight)
1119
+ nn.init.normal_(module.fc1.bias, std=1e-6)
1120
+ nn.init.normal_(module.fc2.bias, std=1e-6)
1121
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
1122
+ lecun_normal_(module.weight)
1123
+ if module.bias is not None:
1124
+ nn.init.zeros_(module.bias)
1125
+ elif isinstance(module, nn.LayerNorm):
1126
+ module.bias.data.zero_()
1127
+ module.weight.data.fill_(1.0)
1128
+
1129
+
1130
+ class Siglip2VisionModel(Siglip2PreTrainedModel):
1131
+ config_class = Siglip2VisionConfig
1132
+ main_input_name = "pixel_values"
1133
+
1134
+ def __init__(self, config: Siglip2VisionConfig):
1135
+ super().__init__(config)
1136
+
1137
+ self.vision_model = Siglip2VisionTransformer(config)
1138
+
1139
+ # Initialize weights and apply final processing
1140
+ self.post_init()
1141
+
1142
+ def get_input_embeddings(self) -> nn.Module:
1143
+ return self.vision_model.embeddings.patch_embedding
1144
+
1145
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
1146
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
1147
+ def forward(
1148
+ self,
1149
+ pixel_values: torch.FloatTensor,
1150
+ pixel_attention_mask: torch.Tensor,
1151
+ spatial_shapes: torch.LongTensor,
1152
+ output_attentions: Optional[bool] = None,
1153
+ output_hidden_states: Optional[bool] = None,
1154
+ return_dict: Optional[bool] = None,
1155
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1156
+ r"""
1157
+ Returns:
1158
+
1159
+ Examples:
1160
+
1161
+ ```python
1162
+ >>> from PIL import Image
1163
+ >>> import requests
1164
+ >>> from transformers import AutoProcessor, Siglip2VisionModel
1165
+
1166
+ >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
1167
+ >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
1168
+
1169
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1170
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1171
+
1172
+ >>> inputs = processor(images=image, return_tensors="pt")
1173
+
1174
+ >>> outputs = model(**inputs)
1175
+ >>> last_hidden_state = outputs.last_hidden_state
1176
+ >>> pooled_output = outputs.pooler_output # pooled features
1177
+ ```"""
1178
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1179
+
1180
+ return self.vision_model(
1181
+ pixel_values=pixel_values,
1182
+ attention_mask=pixel_attention_mask,
1183
+ spatial_shapes=spatial_shapes,
1184
+ output_attentions=output_attentions,
1185
+ output_hidden_states=output_hidden_states,
1186
+ return_dict=return_dict,
1187
+ )
VLMEvalKit-sudoku/llava/model/multimodal_encoder/modeling_swin_siglip2.py ADDED
@@ -0,0 +1,1319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_siglip2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The HuggingFace Inc. team.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from functools import partial, reduce
25
+ import torch.utils.checkpoint
26
+ from PIL import Image
27
+ from typing import Any, Optional, Tuple, Union, Dict
28
+ import os
29
+
30
+ import numpy as np
31
+ import torch
32
+ import torch.nn as nn
33
+ import torch.nn.functional as F
34
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
35
+ from torch.nn.init import _calculate_fan_in_and_fan_out
36
+
37
+ from transformers.activations import ACT2FN
38
+ from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask
39
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
40
+ from transformers.modeling_utils import PreTrainedModel
41
+ from transformers.utils import (
42
+ ModelOutput,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_2_available,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from transformers.configuration_utils import PretrainedConfig
51
+ from transformers.image_processing_utils import BatchFeature, get_size_dict
52
+ from transformers.image_transforms import (
53
+ convert_to_rgb,
54
+ normalize,
55
+ rescale,
56
+ resize,
57
+ to_channel_dimension_format,
58
+ )
59
+ from transformers.image_utils import (
60
+ ChannelDimension,
61
+ PILImageResampling,
62
+ to_numpy_array,
63
+ )
64
+ from transformers.activations import ACT2FN
65
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
66
+ from transformers.modeling_utils import PreTrainedModel
67
+ from transformers import PretrainedConfig
68
+ from transformers.utils import ModelOutput
69
+ from llava.utils import rank0_print
70
+ from einops import rearrange
71
+
72
+ class SigLipImageProcessor:
73
+ def __init__(self, image_mean=(0.5, 0.5, 0.5), image_std=(0.5, 0.5, 0.5), size=(384, 384), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
74
+ crop_size = crop_size if crop_size is not None else {"height": 384, "width": 384}
75
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
76
+
77
+ self.image_mean = image_mean
78
+ self.image_std = image_std
79
+ self.size = size
80
+ self.resample = resample
81
+ self.rescale_factor = rescale_factor
82
+ self.data_format = data_format
83
+ self.crop_size = crop_size
84
+
85
+ def preprocess(self, images, do_resize = True, do_center_crop = True, do_rescale = True, do_normalize = True, return_tensors = 'pt'):
86
+ if isinstance(images, Image.Image):
87
+ images = [images]
88
+ else:
89
+ # to adapt video data
90
+ images = [to_numpy_array(image) for image in images]
91
+ assert isinstance(images, list)
92
+
93
+ # do_resize=False, do_center_crop=False, do_rescale=True, do_normalize=True,
94
+
95
+ transforms = [
96
+ convert_to_rgb,
97
+ to_numpy_array
98
+ ]
99
+
100
+ if do_resize:
101
+ transforms.append(partial(resize, size=self.size, resample=self.resample, data_format=self.data_format))
102
+ if do_rescale:
103
+ transforms.append(partial(rescale, scale=self.rescale_factor, data_format=self.data_format))
104
+ if do_normalize:
105
+ transforms.append(partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format))
106
+
107
+ transforms.append(partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format))
108
+
109
+ images = reduce(lambda x, f: [*map(f, x)], transforms, images)
110
+ data = {"pixel_values": images}
111
+
112
+ return BatchFeature(data=data, tensor_type=return_tensors)
113
+
114
+
115
+ class Siglip2TextConfig(PretrainedConfig):
116
+ r"""
117
+ This is the configuration class to store the configuration of a [`Siglip2TextModel`]. It is used to instantiate a
118
+ Siglip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
119
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip2
120
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
121
+
122
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
123
+ documentation from [`PretrainedConfig`] for more information.
124
+
125
+ Args:
126
+ vocab_size (`int`, *optional*, defaults to 32000):
127
+ Vocabulary size of the Siglip2 text model. Defines the number of different tokens that can be represented by
128
+ the `inputs_ids` passed when calling [`Siglip2Model`].
129
+ hidden_size (`int`, *optional*, defaults to 768):
130
+ Dimensionality of the encoder layers and the pooler layer.
131
+ intermediate_size (`int`, *optional*, defaults to 3072):
132
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
133
+ num_hidden_layers (`int`, *optional*, defaults to 12):
134
+ Number of hidden layers in the Transformer encoder.
135
+ num_attention_heads (`int`, *optional*, defaults to 12):
136
+ Number of attention heads for each attention layer in the Transformer encoder.
137
+ max_position_embeddings (`int`, *optional*, defaults to 64):
138
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
139
+ just in case (e.g., 512 or 1024 or 2048).
140
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
141
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
142
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
143
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
144
+ The epsilon used by the layer normalization layers.
145
+ attention_dropout (`float`, *optional*, defaults to 0.0):
146
+ The dropout ratio for the attention probabilities.
147
+ pad_token_id (`int`, *optional*, defaults to 1):
148
+ The id of the padding token in the vocabulary.
149
+ bos_token_id (`int`, *optional*, defaults to 49406):
150
+ The id of the beginning-of-sequence token in the vocabulary.
151
+ eos_token_id (`int`, *optional*, defaults to 49407):
152
+ The id of the end-of-sequence token in the vocabulary.
153
+ projection_size (`int`, *optional*, defaults to `hidden_size`):
154
+ The size of the projection head.
155
+
156
+ Example:
157
+
158
+ ```python
159
+ >>> from transformers import Siglip2TextConfig, Siglip2TextModel
160
+
161
+ >>> # Initializing a Siglip2TextConfig with google/siglip2-base-patch16-224 style configuration
162
+ >>> configuration = Siglip2TextConfig()
163
+
164
+ >>> # Initializing a Siglip2TextModel (with random weights) from the google/siglip2-base-patch16-224 style configuration
165
+ >>> model = Siglip2TextModel(configuration)
166
+
167
+ >>> # Accessing the model configuration
168
+ >>> configuration = model.config
169
+ ```"""
170
+
171
+ model_type = "siglip2_text_model"
172
+ base_config_key = "text_config"
173
+
174
+ def __init__(
175
+ self,
176
+ vocab_size=32000,
177
+ hidden_size=768,
178
+ intermediate_size=3072,
179
+ num_hidden_layers=12,
180
+ num_attention_heads=12,
181
+ max_position_embeddings=64,
182
+ hidden_act="gelu_pytorch_tanh",
183
+ layer_norm_eps=1e-6,
184
+ attention_dropout=0.0,
185
+ # This differs from `CLIPTokenizer`'s default and from openai/siglip2
186
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
187
+ pad_token_id=1,
188
+ bos_token_id=49406,
189
+ eos_token_id=49407,
190
+ projection_size=None,
191
+ **kwargs,
192
+ ):
193
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
194
+
195
+ self.vocab_size = vocab_size
196
+ self.hidden_size = hidden_size
197
+ self.intermediate_size = intermediate_size
198
+ self.num_hidden_layers = num_hidden_layers
199
+ self.num_attention_heads = num_attention_heads
200
+ self.max_position_embeddings = max_position_embeddings
201
+ self.layer_norm_eps = layer_norm_eps
202
+ self.hidden_act = hidden_act
203
+ self.attention_dropout = attention_dropout
204
+ self.projection_size = projection_size if projection_size is not None else hidden_size
205
+
206
+
207
+ class Siglip2VisionConfig(PretrainedConfig):
208
+ r"""
209
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
210
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
211
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
212
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
213
+
214
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
215
+ documentation from [`PretrainedConfig`] for more information.
216
+
217
+ Args:
218
+ hidden_size (`int`, *optional*, defaults to 768):
219
+ Dimensionality of the encoder layers and the pooler layer.
220
+ intermediate_size (`int`, *optional*, defaults to 3072):
221
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
222
+ num_hidden_layers (`int`, *optional*, defaults to 12):
223
+ Number of hidden layers in the Transformer encoder.
224
+ num_attention_heads (`int`, *optional*, defaults to 12):
225
+ Number of attention heads for each attention layer in the Transformer encoder.
226
+ num_channels (`int`, *optional*, defaults to 3):
227
+ Number of channels in the input images.
228
+ num_patches (`int`, *optional*, defaults to 256):
229
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
230
+ The image is resized to fill maximum of this number of patches, and to preserve
231
+ the aspect ratio. In case the resulted number of patches is lower, the image is
232
+ padded in "patch" dimension.
233
+ patch_size (`int`, *optional*, defaults to 16):
234
+ The size (resolution) of each patch.
235
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
236
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
237
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
238
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
239
+ The epsilon used by the layer normalization layers.
240
+ attention_dropout (`float`, *optional*, defaults to 0.0):
241
+ The dropout ratio for the attention probabilities.
242
+
243
+ Example:
244
+
245
+ ```python
246
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
247
+
248
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
249
+ >>> configuration = Siglip2VisionConfig()
250
+
251
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
252
+ >>> model = Siglip2VisionModel(configuration)
253
+
254
+ >>> # Accessing the model configuration
255
+ >>> configuration = model.config
256
+ ```"""
257
+
258
+ model_type = "siglip2_vision_model"
259
+ base_config_key = "vision_config"
260
+
261
+ def __init__(
262
+ self,
263
+ hidden_size=1152,
264
+ intermediate_size=4304,
265
+ num_hidden_layers=27,
266
+ num_attention_heads=16,
267
+ num_channels=3,
268
+ num_patches=256,
269
+ patch_size=16,
270
+ hidden_act="gelu_pytorch_tanh",
271
+ layer_norm_eps=1e-6,
272
+ attention_dropout=0.0,
273
+ **kwargs,
274
+ ):
275
+ super().__init__(**kwargs)
276
+
277
+ self.hidden_size = hidden_size
278
+ self.intermediate_size = intermediate_size
279
+ self.num_hidden_layers = num_hidden_layers
280
+ self.num_attention_heads = num_attention_heads
281
+ self.num_channels = num_channels
282
+ self.patch_size = patch_size
283
+ self.image_size = 384 #fixme
284
+ self.attention_dropout = attention_dropout
285
+ self.layer_norm_eps = layer_norm_eps
286
+ self.hidden_act = hidden_act
287
+ self.num_patches = num_patches
288
+
289
+
290
+ class Siglip2Config(PretrainedConfig):
291
+ r"""
292
+ [`Siglip2Config`] is the configuration class to store the configuration of a [`Siglip2Model`]. It is used to
293
+ instantiate a Siglip2 model according to the specified arguments, defining the text model and vision model configs.
294
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip2
295
+ [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture.
296
+
297
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
298
+ documentation from [`PretrainedConfig`] for more information.
299
+
300
+ Args:
301
+ text_config (`dict`, *optional*):
302
+ Dictionary of configuration options used to initialize [`Siglip2TextConfig`].
303
+ vision_config (`dict`, *optional*):
304
+ Dictionary of configuration options used to initialize [`Siglip2VisionConfig`].
305
+ kwargs (*optional*):
306
+ Dictionary of keyword arguments.
307
+
308
+ Example:
309
+
310
+ ```python
311
+ >>> from transformers import Siglip2Config, Siglip2Model
312
+
313
+ >>> # Initializing a Siglip2Config with google/siglip2-base-patch16-224 style configuration
314
+ >>> configuration = Siglip2Config()
315
+
316
+ >>> # Initializing a Siglip2Model (with random weights) from the google/siglip2-base-patch16-224 style configuration
317
+ >>> model = Siglip2Model(configuration)
318
+
319
+ >>> # Accessing the model configuration
320
+ >>> configuration = model.config
321
+
322
+ >>> # We can also initialize a Siglip2Config from a Siglip2TextConfig and a Siglip2VisionConfig
323
+ >>> from transformers import Siglip2TextConfig, Siglip2VisionConfig
324
+
325
+ >>> # Initializing a Siglip2Text and Siglip2Vision configuration
326
+ >>> config_text = Siglip2TextConfig()
327
+ >>> config_vision = Siglip2VisionConfig()
328
+
329
+ >>> config = Siglip2Config.from_text_vision_configs(config_text, config_vision)
330
+ ```"""
331
+
332
+ model_type = "siglip2"
333
+ sub_configs = {"text_config": Siglip2TextConfig, "vision_config": Siglip2VisionConfig}
334
+
335
+ def __init__(self, text_config=None, vision_config=None, **kwargs):
336
+ super().__init__(**kwargs)
337
+
338
+ if text_config is None:
339
+ text_config = {}
340
+ logger.info("`text_config` is `None`. Initializing the `Siglip2TextConfig` with default values.")
341
+
342
+ if vision_config is None:
343
+ vision_config = {}
344
+ logger.info("`vision_config` is `None`. initializing the `Siglip2VisionConfig` with default values.")
345
+
346
+ self.text_config = Siglip2TextConfig(**text_config)
347
+ self.vision_config = Siglip2VisionConfig(**vision_config)
348
+
349
+ self.initializer_factor = 1.0
350
+
351
+ @classmethod
352
+ def from_text_vision_configs(cls, text_config: Siglip2TextConfig, vision_config: Siglip2VisionConfig, **kwargs):
353
+ r"""
354
+ Instantiate a [`Siglip2Config`] (or a derived class) from siglip2 text model configuration and siglip2 vision
355
+ model configuration.
356
+
357
+ Returns:
358
+ [`Siglip2Config`]: An instance of a configuration object
359
+ """
360
+
361
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
362
+ r"""
363
+ This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a
364
+ Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
365
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2
366
+ [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture.
367
+
368
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
369
+ documentation from [`PretrainedConfig`] for more information.
370
+
371
+ Args:
372
+ hidden_size (`int`, *optional*, defaults to 768):
373
+ Dimensionality of the encoder layers and the pooler layer.
374
+ intermediate_size (`int`, *optional*, defaults to 3072):
375
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
376
+ num_hidden_layers (`int`, *optional*, defaults to 12):
377
+ Number of hidden layers in the Transformer encoder.
378
+ num_attention_heads (`int`, *optional*, defaults to 12):
379
+ Number of attention heads for each attention layer in the Transformer encoder.
380
+ num_channels (`int`, *optional*, defaults to 3):
381
+ Number of channels in the input images.
382
+ num_patches (`int`, *optional*, defaults to 256):
383
+ The number of patches in the image with the size of (`patch_size`, `patch_size`).
384
+ The image is resized to fill maximum of this number of patches, and to preserve
385
+ the aspect ratio. In case the resulted number of patches is lower, the image is
386
+ padded in "patch" dimension.
387
+ patch_size (`int`, *optional*, defaults to 16):
388
+ The size (resolution) of each patch.
389
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
390
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
391
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
392
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
393
+ The epsilon used by the layer normalization layers.
394
+ attention_dropout (`float`, *optional*, defaults to 0.0):
395
+ The dropout ratio for the attention probabilities.
396
+
397
+ Example:
398
+
399
+ ```python
400
+ >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel
401
+
402
+ >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration
403
+ >>> configuration = Siglip2VisionConfig()
404
+
405
+ >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration
406
+ >>> model = Siglip2VisionModel(configuration)
407
+
408
+ >>> # Accessing the model configuration
409
+ >>> configuration = model.config
410
+ ```"""
411
+
412
+ model_type = "siglip2_vision_model"
413
+ base_config_key = "vision_config"
414
+
415
+ def __init__(
416
+ self,
417
+ hidden_size=768,
418
+ intermediate_size=3072,
419
+ num_hidden_layers=12,
420
+ num_attention_heads=12,
421
+ num_channels=3,
422
+ num_patches=256,
423
+ patch_size=16,
424
+ hidden_act="gelu_pytorch_tanh",
425
+ layer_norm_eps=1e-6,
426
+ attention_dropout=0.0,
427
+ **kwargs,
428
+ ):
429
+ super().__init__(**kwargs)
430
+
431
+ self.hidden_size = hidden_size
432
+ self.intermediate_size = intermediate_size
433
+ self.num_hidden_layers = num_hidden_layers
434
+ self.num_attention_heads = num_attention_heads
435
+ self.num_channels = num_channels
436
+ self.patch_size = patch_size
437
+ self.attention_dropout = attention_dropout
438
+ self.layer_norm_eps = layer_norm_eps
439
+ self.hidden_act = hidden_act
440
+ self.num_patches = num_patches
441
+
442
+ logger = logging.get_logger(__name__)
443
+
444
+ # General docstring
445
+ _CONFIG_FOR_DOC = "Siglip2VisionConfig"
446
+
447
+
448
+ @dataclass
449
+ class Siglip2VisionOutput(ModelOutput):
450
+ """
451
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
452
+
453
+ Args:
454
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
455
+ The image embeddings obtained by applying the projection layer to the pooler_output.
456
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
457
+ Sequence of hidden-states at the output of the last layer of the model.
458
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
459
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
460
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
461
+
462
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
463
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
464
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
465
+ sequence_length)`.
466
+
467
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
468
+ heads.
469
+ """
470
+
471
+ image_embeds: Optional[torch.FloatTensor] = None
472
+ last_hidden_state: torch.FloatTensor = None
473
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
474
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
475
+
476
+
477
+ class Siglip2VisionEmbeddings(nn.Module):
478
+ def __init__(self, config: Siglip2VisionConfig):
479
+ super().__init__()
480
+ self.config = config
481
+ self.embed_dim = config.hidden_size
482
+ self.image_size = config.image_size
483
+ self.patch_size = config.patch_size
484
+
485
+ self.patch_embedding = nn.Linear(
486
+ in_features=config.num_channels * self.patch_size * self.patch_size,
487
+ out_features=self.embed_dim,
488
+ )
489
+
490
+ self.num_patches = config.num_patches
491
+ self.position_embedding_size = int(self.num_patches**0.5)
492
+ self.position_embedding = nn.Embedding(self.num_patches, self.embed_dim)
493
+
494
+ @staticmethod
495
+ def resize_positional_embeddings(
496
+ positional_embeddings: torch.Tensor,
497
+ spatial_shapes: torch.LongTensor,
498
+ max_length: int,
499
+ ) -> torch.Tensor:
500
+ """
501
+ Resize positional embeddings to image-specific size and pad to a fixed size.
502
+
503
+ Args:
504
+ positional_embeddings (`torch.Tensor`):
505
+ Position embeddings of shape (height, width, embed_dim)
506
+ spatial_shapes (`torch.LongTensor`):
507
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
508
+ max_length (`int`):
509
+ Maximum length of the positional embeddings to pad resized positional embeddings to
510
+
511
+ Returns:
512
+ `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
513
+ """
514
+ batch_size = spatial_shapes.shape[0]
515
+ embed_dim = positional_embeddings.shape[-1]
516
+ source_dtype = positional_embeddings.dtype
517
+
518
+ resulted_positional_embeddings = torch.empty(
519
+ (batch_size, max_length, embed_dim),
520
+ device=positional_embeddings.device,
521
+ dtype=source_dtype,
522
+ )
523
+
524
+ # (height, width, embed_dim) -> (1, embed_dim, height, width) for interpolation
525
+ positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)
526
+
527
+ # Upcast to float32 on CPU because antialias is not supported for bfloat16/float16 on CPU
528
+ if positional_embeddings.device.type == "cpu":
529
+ positional_embeddings = positional_embeddings.to(torch.float32)
530
+
531
+ for i in range(batch_size):
532
+ # (1, dim, height, width) -> (1, dim, target_height, target_width)
533
+ height, width = spatial_shapes[i]
534
+ resized_embeddings = F.interpolate(
535
+ positional_embeddings,
536
+ size=(height, width),
537
+ mode="bilinear",
538
+ align_corners=False,
539
+ antialias=True,
540
+ )
541
+
542
+ # (1, dim, target_height, target_width) -> (target_height * target_width, dim)
543
+ resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)
544
+
545
+ # Cast to original dtype
546
+ resized_embeddings = resized_embeddings.to(source_dtype)
547
+
548
+ resulted_positional_embeddings[i, : height * width] = resized_embeddings
549
+ resulted_positional_embeddings[i, height * width :] = resized_embeddings[0]
550
+
551
+ return resulted_positional_embeddings
552
+
553
+ def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:
554
+ """
555
+ Args:
556
+ pixel_values (`torch.FloatTensor`):
557
+ Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
558
+ spatial_shapes (`List[Tuple[int, int]]`):
559
+ Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
560
+ """
561
+
562
+ # Apply patch embeddings to already patchified pixel values
563
+ #(batch, num_patches, channels, patch_size, patch_size)
564
+
565
+ pixel_values = rearrange(pixel_values, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=self.patch_size, p2=self.patch_size)
566
+ target_dtype = self.patch_embedding.weight.dtype
567
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
568
+
569
+ # Get positional resized and padded positional embeddings
570
+ positional_embeddings = self.position_embedding.weight.reshape(
571
+ self.position_embedding_size, self.position_embedding_size, -1
572
+ )
573
+ resized_positional_embeddings = self.resize_positional_embeddings(
574
+ positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1]
575
+ )
576
+ # Add positional embeddings to patch embeddings
577
+ embeddings = patch_embeds + resized_positional_embeddings
578
+ return embeddings
579
+
580
+
581
+ class Siglip2Attention(nn.Module):
582
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
583
+
584
+ def __init__(self, config):
585
+ super().__init__()
586
+ self.config = config
587
+ self.embed_dim = config.hidden_size
588
+ self.num_heads = config.num_attention_heads
589
+ self.head_dim = self.embed_dim // self.num_heads
590
+ if self.head_dim * self.num_heads != self.embed_dim:
591
+ raise ValueError(
592
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
593
+ f" {self.num_heads})."
594
+ )
595
+ self.scale = self.head_dim**-0.5
596
+ self.dropout = config.attention_dropout
597
+
598
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
599
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
600
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
601
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
602
+
603
+ def forward(
604
+ self,
605
+ hidden_states: torch.Tensor,
606
+ attention_mask: Optional[torch.Tensor] = None,
607
+ output_attentions: Optional[bool] = False,
608
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
609
+ """Input shape: Batch x Time x Channel"""
610
+
611
+ batch_size, q_len, _ = hidden_states.size()
612
+
613
+ query_states = self.q_proj(hidden_states)
614
+ key_states = self.k_proj(hidden_states)
615
+ value_states = self.v_proj(hidden_states)
616
+
617
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
618
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
619
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
620
+
621
+ k_v_seq_len = key_states.shape[-2]
622
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
623
+
624
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
625
+ raise ValueError(
626
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
627
+ f" {attn_weights.size()}"
628
+ )
629
+
630
+ if attention_mask is not None:
631
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
632
+ raise ValueError(
633
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
634
+ )
635
+ attn_weights = attn_weights + attention_mask
636
+
637
+ # upcast attention to fp32
638
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
639
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
640
+ attn_output = torch.matmul(attn_weights, value_states)
641
+
642
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
643
+ raise ValueError(
644
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
645
+ f" {attn_output.size()}"
646
+ )
647
+
648
+ attn_output = attn_output.transpose(1, 2).contiguous()
649
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
650
+
651
+ attn_output = self.out_proj(attn_output)
652
+
653
+ return attn_output, attn_weights
654
+
655
+
656
+ class Siglip2SdpaAttention(Siglip2Attention):
657
+ """
658
+ Siglip2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
659
+ `Siglip2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
660
+ SDPA API.
661
+ """
662
+
663
+ is_causal = False
664
+
665
+ # Adapted from Siglip2Attention.forward and transformers.models.llama.modeling_llama.LlamaSdpaAttention.forward
666
+ def forward(
667
+ self,
668
+ hidden_states: torch.Tensor,
669
+ attention_mask: Optional[torch.Tensor] = None,
670
+ output_attentions: Optional[bool] = False,
671
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
672
+ if output_attentions:
673
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
674
+ logger.warning_once(
675
+ "Siglip2Model is using Siglip2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
676
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
677
+ )
678
+ return super().forward(
679
+ hidden_states=hidden_states,
680
+ attention_mask=attention_mask,
681
+ output_attentions=output_attentions,
682
+ )
683
+
684
+ batch_size, q_len, _ = hidden_states.size()
685
+
686
+ query_states = self.q_proj(hidden_states)
687
+ key_states = self.k_proj(hidden_states)
688
+ value_states = self.v_proj(hidden_states)
689
+
690
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
691
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
692
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
693
+
694
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
695
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
696
+ if query_states.device.type == "cuda" and attention_mask is not None:
697
+ query_states = query_states.contiguous()
698
+ key_states = key_states.contiguous()
699
+ value_states = value_states.contiguous()
700
+
701
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
702
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
703
+ is_causal = True if self.is_causal and q_len > 1 else False
704
+
705
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
706
+ query_states,
707
+ key_states,
708
+ value_states,
709
+ attn_mask=attention_mask,
710
+ dropout_p=self.dropout if self.training else 0.0,
711
+ is_causal=is_causal,
712
+ )
713
+
714
+ attn_output = attn_output.transpose(1, 2).contiguous()
715
+ attn_output = attn_output.view(batch_size, q_len, self.embed_dim)
716
+
717
+ attn_output = self.out_proj(attn_output)
718
+
719
+ return attn_output, None
720
+
721
+
722
+ class Siglip2MLP(nn.Module):
723
+ def __init__(self, config):
724
+ super().__init__()
725
+ self.config = config
726
+ self.activation_fn = ACT2FN[config.hidden_act]
727
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
728
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
729
+
730
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
731
+ hidden_states = self.fc1(hidden_states)
732
+ hidden_states = self.activation_fn(hidden_states)
733
+ hidden_states = self.fc2(hidden_states)
734
+ return hidden_states
735
+
736
+
737
+ SIGLIP2_ATTENTION_CLASSES = {
738
+ "eager": Siglip2Attention,
739
+ "sdpa": Siglip2SdpaAttention,
740
+ }
741
+
742
+
743
+
744
+ class PatchMerging(nn.Module):
745
+ r""" Patch Merging Layer.
746
+
747
+ Args:
748
+ input_resolution (tuple[int]): Resolution of input feature.
749
+ dim (int): Number of input channels.
750
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
751
+ """
752
+
753
+ # def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
754
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
755
+ super().__init__()
756
+ # self.input_resolution = input_resolution
757
+ self.dim = dim
758
+ self.norm = norm_layer(4 * dim)
759
+ self.reduction = nn.Sequential(
760
+ nn.Linear(4 * dim, 4 * dim, bias=False),
761
+ nn.GELU(),
762
+ nn.Linear(4 * dim, dim, bias=False),
763
+ )
764
+ # self.dim = dim
765
+ # self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
766
+ # self.norm = norm_layer(4 * dim)
767
+
768
+
769
+ def forward(self, x, spatial_shapes):
770
+ B, L, C = x.shape
771
+ H, W = spatial_shapes[0]
772
+ assert H * W == L
773
+ # TODO: 查看spatial_shapes是hw还是wh 确定了:是hw
774
+ x = x.reshape(B, H, W, C)
775
+ x = rearrange(x, 'b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1=2, p2=2)
776
+ x = self.norm(x)
777
+ x = self.reduction(x)
778
+ return x, spatial_shapes // 2
779
+
780
+ class Siglip2EncoderLayer(nn.Module):
781
+ def __init__(self, config: Siglip2Config, layer_index):
782
+ super().__init__()
783
+ self.embed_dim = config.hidden_size
784
+ self.self_attn = SIGLIP2_ATTENTION_CLASSES[config._attn_implementation](config=config)
785
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
786
+ self.mlp = Siglip2MLP(config)
787
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
788
+ # add layer_index 来指示哪里存在merger层
789
+ self.layer_index = layer_index
790
+ if layer_index in config.vision_config['merger_layer_index']:
791
+ self.merger = PatchMerging(config.hidden_size)
792
+ else:
793
+ self.merger = None
794
+
795
+ # Ignore copy
796
+ def forward(
797
+ self,
798
+ hidden_states: torch.Tensor,
799
+ spatial_shapes,
800
+ attention_mask: torch.Tensor,
801
+ output_attentions: Optional[bool] = False,
802
+ ) -> Tuple[torch.FloatTensor]:
803
+ """
804
+ Args:
805
+ hidden_states (`torch.FloatTensor`):
806
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
807
+ attention_mask (`torch.FloatTensor`):
808
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
809
+ output_attentions (`bool`, *optional*, defaults to `False`):
810
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
811
+ returned tensors for more detail.
812
+ """
813
+ residual = hidden_states
814
+
815
+ hidden_states = self.layer_norm1(hidden_states)
816
+ hidden_states, attn_weights = self.self_attn(
817
+ hidden_states=hidden_states,
818
+ attention_mask=attention_mask,
819
+ output_attentions=output_attentions,
820
+ )
821
+ hidden_states = residual + hidden_states
822
+
823
+ residual = hidden_states
824
+ hidden_states = self.layer_norm2(hidden_states)
825
+ hidden_states = self.mlp(hidden_states)
826
+ hidden_states = residual + hidden_states
827
+
828
+ # 如果有merger就执行
829
+ if self.merger is not None:
830
+ hidden_states, spatial_shapes = self.merger(hidden_states, spatial_shapes)
831
+ outputs = (hidden_states, spatial_shapes)
832
+ else:
833
+ outputs = (hidden_states,)
834
+
835
+ if output_attentions:
836
+ outputs += (attn_weights,)
837
+
838
+ return outputs
839
+
840
+
841
+ class Siglip2Encoder(nn.Module):
842
+ """
843
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
844
+ [`Siglip2EncoderLayer`].
845
+
846
+ Args:
847
+ config: Siglip2Config
848
+ """
849
+
850
+ def __init__(self, config: Siglip2Config):
851
+ super().__init__()
852
+ self.config = config
853
+ # self.layers = nn.ModuleList([Siglip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
854
+ self.layers = nn.ModuleList([Siglip2EncoderLayer(config, layer_index=i) for i in range(config.num_hidden_layers)])
855
+ self.gradient_checkpointing = False
856
+
857
+ # Ignore copy
858
+ def forward(
859
+ self,
860
+ inputs_embeds,
861
+ spatial_shapes,
862
+ attention_mask: Optional[torch.Tensor] = None,
863
+ output_attentions: Optional[bool] = None,
864
+ output_hidden_states: Optional[bool] = None,
865
+ return_dict: Optional[bool] = None,
866
+ ) -> Union[Tuple, BaseModelOutput]:
867
+ r"""
868
+ Args:
869
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
870
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
871
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
872
+ than the model's internal embedding lookup matrix.
873
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
874
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
875
+
876
+ - 1 for tokens that are **not masked**,
877
+ - 0 for tokens that are **masked**.
878
+
879
+ [What are attention masks?](../glossary#attention-mask)
880
+ output_attentions (`bool`, *optional*):
881
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
882
+ returned tensors for more detail.
883
+ output_hidden_states (`bool`, *optional*):
884
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
885
+ for more detail.
886
+ return_dict (`bool`, *optional*):
887
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
888
+ """
889
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
890
+ output_hidden_states = (
891
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
892
+ )
893
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
894
+
895
+ encoder_states = () if output_hidden_states else None
896
+ all_attentions = () if output_attentions else None
897
+
898
+ hidden_states = inputs_embeds
899
+ for encoder_layer in self.layers:
900
+ if output_hidden_states:
901
+ encoder_states = encoder_states + (hidden_states,)
902
+ if self.gradient_checkpointing and self.training:
903
+ layer_outputs = self._gradient_checkpointing_func(
904
+ encoder_layer.__call__,
905
+ hidden_states,
906
+ spatial_shapes,
907
+ attention_mask,
908
+ output_attentions,
909
+ )
910
+ else:
911
+ layer_outputs = encoder_layer(
912
+ hidden_states,
913
+ spatial_shapes,
914
+ attention_mask,
915
+ output_attentions=output_attentions,
916
+ )
917
+
918
+ hidden_states = layer_outputs[0]
919
+
920
+ # 说明是swin的更新 spatial shapes
921
+ if len(layer_outputs) == 2 and not output_attentions:
922
+ spatial_shapes = layer_outputs[1]
923
+
924
+ if output_attentions:
925
+ all_attentions = all_attentions + (layer_outputs[1],)
926
+
927
+ if output_hidden_states:
928
+ encoder_states = encoder_states + (hidden_states,)
929
+
930
+ if not return_dict:
931
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
932
+ return BaseModelOutput(
933
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
934
+ )
935
+
936
+
937
+ SIGLIP2_VISION_INPUTS_DOCSTRING = r"""
938
+ Args:
939
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
940
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
941
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
942
+ output_attentions (`bool`, *optional*):
943
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
944
+ tensors for more detail.
945
+ output_hidden_states (`bool`, *optional*):
946
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
947
+ more detail.
948
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
949
+ Whether to interpolate the pre-trained position encodings.
950
+ return_dict (`bool`, *optional*):
951
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
952
+ """
953
+
954
+
955
+ class Siglip2VisionTransformer(nn.Module):
956
+ def __init__(self, config: Siglip2VisionConfig):
957
+ super().__init__()
958
+ self.config = config
959
+ embed_dim = config.hidden_size
960
+
961
+ self.embeddings = Siglip2VisionEmbeddings(config)
962
+ self.encoder = Siglip2Encoder(config)
963
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
964
+ self.use_head = False if not hasattr(config, "vision_use_head") else config.vision_use_head
965
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
966
+
967
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
968
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
969
+ def forward(
970
+ self,
971
+ pixel_values,
972
+ spatial_shapes: torch.LongTensor,
973
+ output_attentions: Optional[bool] = None,
974
+ output_hidden_states: Optional[bool] = None,
975
+ return_dict: Optional[bool] = None,
976
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
977
+ r"""
978
+ Returns:
979
+
980
+ """
981
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
982
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
983
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
984
+
985
+ hidden_states = self.embeddings(pixel_values, spatial_shapes)
986
+
987
+ encoder_outputs = self.encoder(
988
+ inputs_embeds=hidden_states,
989
+ spatial_shapes=spatial_shapes,
990
+ output_attentions=output_attentions,
991
+ output_hidden_states=output_hidden_states,
992
+ return_dict=return_dict,
993
+ )
994
+
995
+ last_hidden_state = encoder_outputs[0]
996
+ last_hidden_state = self.post_layernorm(last_hidden_state)
997
+
998
+ pooled_output = self.head(last_hidden_state)
999
+
1000
+ if not return_dict:
1001
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1002
+
1003
+ return BaseModelOutputWithPooling(
1004
+ last_hidden_state=last_hidden_state,
1005
+ pooler_output=pooled_output,
1006
+ hidden_states=encoder_outputs.hidden_states,
1007
+ attentions=encoder_outputs.attentions,
1008
+ )
1009
+
1010
+
1011
+ def _trunc_normal_(tensor, mean, std, a, b):
1012
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
1013
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
1014
+ def norm_cdf(x):
1015
+ # Computes standard normal cumulative distribution function
1016
+ return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
1017
+
1018
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
1019
+ warnings.warn(
1020
+ "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
1021
+ "The distribution of values may be incorrect.",
1022
+ stacklevel=2,
1023
+ )
1024
+
1025
+ # Values are generated by using a truncated uniform distribution and
1026
+ # then using the inverse CDF for the normal distribution.
1027
+ # Get upper and lower cdf values
1028
+ l = norm_cdf((a - mean) / std)
1029
+ u = norm_cdf((b - mean) / std)
1030
+
1031
+ # Uniformly fill tensor with values from [l, u], then translate to
1032
+ # [2l-1, 2u-1].
1033
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
1034
+
1035
+ # Use inverse cdf transform for normal distribution to get truncated
1036
+ # standard normal
1037
+ tensor.erfinv_()
1038
+
1039
+ # Transform to proper mean, std
1040
+ tensor.mul_(std * math.sqrt(2.0))
1041
+ tensor.add_(mean)
1042
+
1043
+ # Clamp to ensure it's in the proper range
1044
+ tensor.clamp_(min=a, max=b)
1045
+
1046
+
1047
+ def trunc_normal_tf_(
1048
+ tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0
1049
+ ) -> torch.Tensor:
1050
+ """Fills the input Tensor with values drawn from a truncated
1051
+ normal distribution. The values are effectively drawn from the
1052
+ normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)`
1053
+ with values outside :math:`[a, b]` redrawn until they are within
1054
+ the bounds. The method used for generating the random values works
1055
+ best when :math:`a \\leq \text{mean} \\leq b`.
1056
+
1057
+ NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
1058
+ bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
1059
+ and the result is subsequently scaled and shifted by the mean and std args.
1060
+
1061
+ Args:
1062
+ tensor: an n-dimensional `torch.Tensor`
1063
+ mean: the mean of the normal distribution
1064
+ std: the standard deviation of the normal distribution
1065
+ a: the minimum cutoff value
1066
+ b: the maximum cutoff value
1067
+ """
1068
+ with torch.no_grad():
1069
+ _trunc_normal_(tensor, 0, 1.0, a, b)
1070
+ tensor.mul_(std).add_(mean)
1071
+
1072
+
1073
+ def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
1074
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
1075
+ if mode == "fan_in":
1076
+ denom = fan_in
1077
+ elif mode == "fan_out":
1078
+ denom = fan_out
1079
+ elif mode == "fan_avg":
1080
+ denom = (fan_in + fan_out) / 2
1081
+
1082
+ variance = scale / denom
1083
+
1084
+ if distribution == "truncated_normal":
1085
+ # constant is stddev of standard normal truncated to (-2, 2)
1086
+ trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
1087
+ elif distribution == "normal":
1088
+ with torch.no_grad():
1089
+ tensor.normal_(std=math.sqrt(variance))
1090
+ elif distribution == "uniform":
1091
+ bound = math.sqrt(3 * variance)
1092
+ with torch.no_grad():
1093
+ tensor.uniform_(-bound, bound)
1094
+ else:
1095
+ raise ValueError(f"invalid distribution {distribution}")
1096
+
1097
+
1098
+ def lecun_normal_(tensor):
1099
+ variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
1100
+
1101
+
1102
+ def default_flax_embed_init(tensor):
1103
+ variance_scaling_(tensor, mode="fan_in", distribution="normal")
1104
+
1105
+
1106
+ class Siglip2PreTrainedModel(PreTrainedModel):
1107
+ """
1108
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1109
+ models.
1110
+ """
1111
+
1112
+ config_class = Siglip2Config
1113
+ base_model_prefix = "siglip2"
1114
+ supports_gradient_checkpointing = True
1115
+
1116
+ _no_split_modules = [
1117
+ "Siglip2TextEmbeddings",
1118
+ "Siglip2EncoderLayer",
1119
+ "Siglip2VisionEmbeddings",
1120
+ "Siglip2EncoderLayer",
1121
+ "Siglip2MultiheadAttentionPoolingHead",
1122
+ ]
1123
+ _supports_flash_attn_2 = True
1124
+ _supports_sdpa = True
1125
+
1126
+ def _init_weights(self, module):
1127
+ """Initialize the weights"""
1128
+ if isinstance(module, Siglip2VisionEmbeddings):
1129
+ width = self.config.hidden_size
1130
+ nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
1131
+ elif isinstance(module, nn.Embedding):
1132
+ default_flax_embed_init(module.weight)
1133
+ elif isinstance(module, Siglip2Attention):
1134
+ nn.init.xavier_uniform_(module.q_proj.weight)
1135
+ nn.init.xavier_uniform_(module.k_proj.weight)
1136
+ nn.init.xavier_uniform_(module.v_proj.weight)
1137
+ nn.init.xavier_uniform_(module.out_proj.weight)
1138
+ nn.init.zeros_(module.q_proj.bias)
1139
+ nn.init.zeros_(module.k_proj.bias)
1140
+ nn.init.zeros_(module.v_proj.bias)
1141
+ nn.init.zeros_(module.out_proj.bias)
1142
+ elif isinstance(module, Siglip2MLP):
1143
+ nn.init.xavier_uniform_(module.fc1.weight)
1144
+ nn.init.xavier_uniform_(module.fc2.weight)
1145
+ nn.init.normal_(module.fc1.bias, std=1e-6)
1146
+ nn.init.normal_(module.fc2.bias, std=1e-6)
1147
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
1148
+ lecun_normal_(module.weight)
1149
+ if module.bias is not None:
1150
+ nn.init.zeros_(module.bias)
1151
+ elif isinstance(module, nn.LayerNorm):
1152
+ module.bias.data.zero_()
1153
+ module.weight.data.fill_(1.0)
1154
+
1155
+
1156
+ class Siglip2VisionModel(Siglip2PreTrainedModel):
1157
+ config_class = Siglip2VisionConfig
1158
+ main_input_name = "pixel_values"
1159
+
1160
+ def __init__(self, config: Siglip2VisionConfig):
1161
+ super().__init__(config)
1162
+
1163
+ self.vision_model = Siglip2VisionTransformer(config)
1164
+
1165
+ # Initialize weights and apply final processing
1166
+ self.post_init()
1167
+
1168
+ def get_input_embeddings(self) -> nn.Module:
1169
+ return self.vision_model.embeddings.patch_embedding
1170
+
1171
+ @add_start_docstrings_to_model_forward(SIGLIP2_VISION_INPUTS_DOCSTRING)
1172
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Siglip2VisionConfig)
1173
+ def forward(
1174
+ self,
1175
+ pixel_values: torch.FloatTensor,
1176
+ spatial_shapes: torch.LongTensor,
1177
+ output_attentions: Optional[bool] = None,
1178
+ output_hidden_states: Optional[bool] = None,
1179
+ return_dict: Optional[bool] = None,
1180
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1181
+ r"""
1182
+ Returns:
1183
+
1184
+ Examples:
1185
+
1186
+ ```python
1187
+ >>> from PIL import Image
1188
+ >>> import requests
1189
+ >>> from transformers import AutoProcessor, Siglip2VisionModel
1190
+
1191
+ >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
1192
+ >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
1193
+
1194
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1195
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1196
+
1197
+ >>> inputs = processor(images=image, return_tensors="pt")
1198
+
1199
+ >>> outputs = model(**inputs)
1200
+ >>> last_hidden_state = outputs.last_hidden_state
1201
+ >>> pooled_output = outputs.pooler_output # pooled features
1202
+ ```"""
1203
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1204
+
1205
+ return self.vision_model(
1206
+ pixel_values=pixel_values,
1207
+ spatial_shapes=spatial_shapes,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ )
1212
+
1213
+
1214
+
1215
+ class NaFlexSigLip2SwinVisionTower(nn.Module):
1216
+ def __init__(self, vision_tower, vision_tower_cfg, delay_load=False):
1217
+ super().__init__()
1218
+
1219
+ self.is_loaded = False
1220
+
1221
+ self.config = Siglip2VisionConfig()
1222
+
1223
+ self.vision_tower_name = vision_tower
1224
+
1225
+ self.image_processor = SigLipImageProcessor()
1226
+
1227
+ if not delay_load:
1228
+ rank0_print(f"Loading vision tower: {vision_tower}")
1229
+ self.load_model()
1230
+ elif getattr(vision_tower_cfg, "unfreeze_mm_vision_tower", False):
1231
+ # TODO: better detector is needed.
1232
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `unfreeze_mm_vision_tower`: True.")
1233
+ self.load_model()
1234
+ elif hasattr(vision_tower_cfg, "mm_tunable_parts") and "mm_vision_tower" in vision_tower_cfg.mm_tunable_parts:
1235
+ rank0_print(f"The checkpoint seems to contain `vision_tower` weights: `mm_tunable_parts` contains `mm_vision_tower`.")
1236
+ self.load_model()
1237
+ else:
1238
+ self.cfg_only = self.config
1239
+
1240
+ def load_model(self, device_map=None):
1241
+ if self.is_loaded:
1242
+ rank0_print("{} is already loaded, `load_model` called again, skipping.".format(self.vision_tower_name))
1243
+ return
1244
+
1245
+ self.vision_tower = Siglip2VisionModel.from_pretrained(self.vision_tower_name, device_map=device_map)
1246
+ self.vision_tower.vision_model.head = nn.Identity()
1247
+ self.vision_tower.requires_grad_(False)
1248
+ self.is_loaded = True
1249
+
1250
+ def forward(self, images, patch_sizes):
1251
+ if type(images) is list:
1252
+ image_features = []
1253
+ for image, spatial_shape in zip(images, patch_sizes):
1254
+ # 服务后面的维度:1, h, w
1255
+ spatial_shape = torch.as_tensor(spatial_shape)[None]
1256
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), spatial_shapes=spatial_shape, output_hidden_states=True)
1257
+ # 根据崇屹经验,siglip2和siglip都是最后一层
1258
+ # hidden_states的长度为 ViT层数+1,因为patching后的embedding也有
1259
+ # image_feature = image_forward_out.hidden_states[-1].to(image.dtype)
1260
+ image_feature = image_forward_out.last_hidden_state.to(image.dtype)
1261
+ image_features.append(image_feature)
1262
+ else: #torch.Size([1, 3, 384, 384])
1263
+ print('no support for paralla')
1264
+ exit()
1265
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype),spatial_shapes=patch_sizes, output_hidden_states=True)
1266
+ # image_features = image_forward_outs.hidden_states[-1].to(images.dtype)
1267
+ image_features = image_forward_outs.last_hidden_state.to(images.dtype)
1268
+
1269
+ return image_features
1270
+
1271
+ # def forward_uhd_v2(self, images, patch_sizes):
1272
+ # if type(images) is list:
1273
+ # image_features = []
1274
+ # for image in images:
1275
+ # image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), spatial_shapes=patch_sizes,output_hidden_states=True)
1276
+ # # 根据崇屹经验,siglip2和siglip都是最后一层
1277
+ # # hidden_states的长度为 ViT层数+1,因为patching后的embedding也有
1278
+ # image_feature = image_forward_out.last_hidden_state.to(image.dtype)
1279
+ # # image_feature = image_forward_out.hidden_states[-1].to(image.dtype)
1280
+ # image_features.append(image_feature)
1281
+ # else:
1282
+ # print('no support for paralla')
1283
+ # exit()
1284
+ # image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), spatial_shapes=patch_sizes,output_hidden_states=True)
1285
+ # image_features = image_forward_outs.last_hidden_state.to(images.dtype)
1286
+ # # image_features = image_forward_outs.hidden_states[-1].to(images.dtype)
1287
+
1288
+ # return image_features
1289
+
1290
+ @property
1291
+ def dummy_feature(self):
1292
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
1293
+
1294
+ @property
1295
+ def dtype(self):
1296
+ for p in self.vision_tower.parameters():
1297
+ return p.dtype
1298
+
1299
+ @property
1300
+ def device(self):
1301
+ for p in self.vision_tower.parameters():
1302
+ return p.device
1303
+
1304
+ @property
1305
+ def hidden_size(self):
1306
+ return self.config.hidden_size
1307
+
1308
+ @property
1309
+ def num_patches(self):
1310
+ return (self.config.image_size // self.config.patch_size) ** 2
1311
+
1312
+ @property
1313
+ def num_patches_per_side(self):
1314
+ return self.config.image_size // self.config.patch_size
1315
+ # return self.model_config["vision_cfg"]["image_size"] // self.model_config["vision_cfg"]["patch_size"]
1316
+
1317
+ @property
1318
+ def image_size(self):
1319
+ return self.config.image_size