sy1998 commited on
Commit
aae28d1
·
verified ·
1 Parent(s): bc73ec5

Delete lmms-eval-0.2.0.post1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lmms-eval-0.2.0.post1/.github/issue_template.md +0 -8
  2. lmms-eval-0.2.0.post1/.github/pull_request_template.md +0 -8
  3. lmms-eval-0.2.0.post1/.github/workflows/black.yml +0 -17
  4. lmms-eval-0.2.0.post1/.gitignore +0 -39
  5. lmms-eval-0.2.0.post1/.pre-commit-config.yaml +0 -6
  6. lmms-eval-0.2.0.post1/LICENSE +0 -56
  7. lmms-eval-0.2.0.post1/README.md +0 -258
  8. lmms-eval-0.2.0.post1/docs/README.md +0 -12
  9. lmms-eval-0.2.0.post1/docs/commands.md +0 -24
  10. lmms-eval-0.2.0.post1/docs/current_tasks.md +0 -122
  11. lmms-eval-0.2.0.post1/docs/model_guide.md +0 -78
  12. lmms-eval-0.2.0.post1/docs/task_guide.md +0 -113
  13. lmms-eval-0.2.0.post1/lmms_eval/__init__.py +0 -0
  14. lmms-eval-0.2.0.post1/lmms_eval/__main__.py +0 -345
  15. lmms-eval-0.2.0.post1/lmms_eval/api/__init__.py +0 -0
  16. lmms-eval-0.2.0.post1/lmms_eval/api/filter.py +0 -53
  17. lmms-eval-0.2.0.post1/lmms_eval/api/instance.py +0 -29
  18. lmms-eval-0.2.0.post1/lmms_eval/api/metrics.py +0 -443
  19. lmms-eval-0.2.0.post1/lmms_eval/api/model.py +0 -203
  20. lmms-eval-0.2.0.post1/lmms_eval/api/registry.py +0 -156
  21. lmms-eval-0.2.0.post1/lmms_eval/api/samplers.py +0 -94
  22. lmms-eval-0.2.0.post1/lmms_eval/api/task.py +0 -1297
  23. lmms-eval-0.2.0.post1/lmms_eval/evaluator.py +0 -630
  24. lmms-eval-0.2.0.post1/lmms_eval/filters/__init__.py +0 -45
  25. lmms-eval-0.2.0.post1/lmms_eval/filters/decontamination.py +0 -23
  26. lmms-eval-0.2.0.post1/lmms_eval/filters/extraction.py +0 -278
  27. lmms-eval-0.2.0.post1/lmms_eval/filters/selection.py +0 -48
  28. lmms-eval-0.2.0.post1/lmms_eval/filters/transformation.py +0 -48
  29. lmms-eval-0.2.0.post1/lmms_eval/logging_utils.py +0 -367
  30. lmms-eval-0.2.0.post1/lmms_eval/models/__init__.py +0 -40
  31. lmms-eval-0.2.0.post1/lmms_eval/models/batch_gpt4.py +0 -204
  32. lmms-eval-0.2.0.post1/lmms_eval/models/claude.py +0 -256
  33. lmms-eval-0.2.0.post1/lmms_eval/models/from_log.py +0 -116
  34. lmms-eval-0.2.0.post1/lmms_eval/models/fuyu.py +0 -261
  35. lmms-eval-0.2.0.post1/lmms_eval/models/gemini_api.py +0 -185
  36. lmms-eval-0.2.0.post1/lmms_eval/models/gpt4v.py +0 -191
  37. lmms-eval-0.2.0.post1/lmms_eval/models/idefics2.py +0 -231
  38. lmms-eval-0.2.0.post1/lmms_eval/models/instructblip.py +0 -229
  39. lmms-eval-0.2.0.post1/lmms_eval/models/internvl.py +0 -484
  40. lmms-eval-0.2.0.post1/lmms_eval/models/llama_vid.py +0 -271
  41. lmms-eval-0.2.0.post1/lmms_eval/models/llava.py +0 -419
  42. lmms-eval-0.2.0.post1/lmms_eval/models/llava_hf.py +0 -341
  43. lmms-eval-0.2.0.post1/lmms_eval/models/llava_sglang.py +0 -161
  44. lmms-eval-0.2.0.post1/lmms_eval/models/llava_vid.py +0 -404
  45. lmms-eval-0.2.0.post1/lmms_eval/models/longva.py +0 -462
  46. lmms-eval-0.2.0.post1/lmms_eval/models/minicpm_v.py +0 -222
  47. lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/__init__.py +0 -0
  48. lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/load_video.py +0 -55
  49. lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/qwen/qwen_generate_utils.py +0 -370
  50. lmms-eval-0.2.0.post1/lmms_eval/models/mplug_owl_video.py +0 -193
lmms-eval-0.2.0.post1/.github/issue_template.md DELETED
@@ -1,8 +0,0 @@
1
- Before you open an issue, please check if a similar issue already exists or has been closed before.
2
-
3
- ### When you open an issue, please be sure to include the following
4
-
5
- - [ ] A descriptive title: [xxx] XXXX
6
- - [ ] A detailed description
7
-
8
- Thank you for your contributions!
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/.github/pull_request_template.md DELETED
@@ -1,8 +0,0 @@
1
- Before you open a pull-request, please check if a similar issue already exists or has been closed before.
2
-
3
- ### When you open a pull-request, please be sure to include the following
4
-
5
- - [ ] A descriptive title: [xxx] XXXX
6
- - [ ] A detailed description
7
-
8
- Thank you for your contributions!
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/.github/workflows/black.yml DELETED
@@ -1,17 +0,0 @@
1
- name: Lint
2
-
3
- on: [push, pull_request]
4
-
5
- jobs:
6
- lint:
7
- runs-on: ubuntu-latest
8
- steps:
9
- - uses: actions/checkout@v3
10
- - name: Set up Python
11
- uses: actions/setup-python@v4
12
- with:
13
- python-version: '3.9'
14
- - name: Install specific version of Black
15
- run: pip install black==23.9.1
16
- - name: Run Black
17
- run: black --line-length=240 ./
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/.gitignore DELETED
@@ -1,39 +0,0 @@
1
- env
2
- *.pyc
3
- output/
4
- data/
5
- lm_cache
6
- .idea
7
- build
8
- dist
9
- *.egg-info
10
- venv
11
- .vscode/
12
- temp
13
- __pycache__
14
- .ipynb_checkpoints
15
- temp
16
- # IPython
17
- profile_default/
18
- ipython_config.py
19
- logs/
20
- scripts/
21
- wandb/
22
- SimSun.ttf
23
- submissions/
24
- lmms_eval/tasks/hallusion_bench/hallusion_output_vs_model.json
25
- lmms_eval/tasks/hallusion_bench/hallusion_output_vd_model.json
26
- zk.log
27
- cache_dir
28
- ckpt
29
- pretrained/
30
- LLaVA/
31
- *logs
32
- temp/
33
- InternVL/
34
- logs/
35
- data/
36
- llava-video/
37
- Video-MME/
38
- VATEX/
39
- lmms_eval/tasks/vatex/__pycache__/utils.cpython-310.pyc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/.pre-commit-config.yaml DELETED
@@ -1,6 +0,0 @@
1
- repos:
2
- - repo: https://github.com/psf/black
3
- rev: 23.12.1
4
- hooks:
5
- - id: black
6
- language_version: python3
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/LICENSE DELETED
@@ -1,56 +0,0 @@
1
- # For the main pipeline structure-related code, we maintain the original license provided with lm-evaluation-harness, which is the MIT License.
2
-
3
- MIT License
4
-
5
- Copyright (c) 2024 LMMs-Lab
6
-
7
- Permission is hereby granted, free of charge, to any person obtaining a copy
8
- of this software and associated documentation files (the "Software"), to deal
9
- in the Software without restriction, including without limitation the rights
10
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
- copies of the Software, and to permit persons to whom the Software is
12
- furnished to do so, subject to the following conditions:
13
-
14
- The above copyright notice and this permission notice shall be included in all
15
- copies or substantial portions of the Software.
16
-
17
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
- SOFTWARE.
24
-
25
- # For the multimodal models and datasets that we have added (defined as code in the lmms_eval/tasks and lmms_eval/models folders), we apply the Apache License.
26
-
27
- Apache 2.0 License
28
-
29
- Copyright (c) 2024 LMMs-Lab
30
-
31
- Licensed under the Apache License, Version 2.0 (the "License");
32
- you may not use this file except in compliance with the License.
33
- You may obtain a copy of the License at
34
-
35
- http://www.apache.org/licenses/LICENSE-2.0
36
-
37
- Unless required by applicable law or agreed to in writing, software
38
- distributed under the License is distributed on an "AS IS" BASIS,
39
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
40
- See the License for the specific language governing permissions and
41
- limitations under the License.
42
-
43
- When modifying the code, please include the following information about the original lmms-eval source:
44
- # Adopted from lmms-eval from https://github.com/EvolvingLMMs-Lab/lmms-eval. Below is the original copyright:
45
- #
46
- # Licensed under the Apache License, Version 2.0 (the "License");
47
- # you may not use this file except in compliance with the License.
48
- # You may obtain a copy of the License at
49
- #
50
- # http://www.apache.org/licenses/LICENSE-2.0
51
- #
52
- # Unless required by applicable law or agreed to in writing, software
53
- # distributed under the License is distributed on an "AS IS" BASIS,
54
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
55
- # See the License for the specific language governing permissions and
56
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/README.md DELETED
@@ -1,258 +0,0 @@
1
- <p align="center" width="80%">
2
- <img src="https://i.postimg.cc/g0QRgMVv/WX20240228-113337-2x.png" width="100%" height="70%">
3
- </p>
4
-
5
- # The Evaluation Suite of Large Multimodal Models
6
-
7
- > Accelerating the development of large multimodal models (LMMs) with `lmms-eval`
8
-
9
- 🏠 [LMMs-Lab Homepage](https://lmms-lab.github.io/) | 🎉 [Blog](https://lmms-lab.github.io/lmms-eval-blog/lmms-eval-0.1/) | 📚 [Documentation](docs/README.md) | 🤗 [Huggingface Datasets](https://huggingface.co/lmms-lab) | <a href="https://emoji.gg/emoji/1684-discord-thread"><img src="https://cdn3.emoji.gg/emojis/1684-discord-thread.png" width="14px" height="14px" alt="Discord_Thread"></a> [discord/lmms-eval](https://discord.gg/zdkwKUqrPy)
10
-
11
- ---
12
-
13
- ## Annoucement
14
-
15
- - [2024-06] 🎬🎬 The `lmms-eval/v0.2` has been upgraded to support video evaluations for video models like LLaVA-NeXT Video and Gemini 1.5 Pro across tasks such as EgoSchema, PerceptionTest, VideoMME, and more. Please refer to the [blog](https://lmms-lab.github.io/posts/lmms-eval-0.2/) for more details
16
-
17
- - [2024-03] 📝📝 We have released the first version of `lmms-eval`, please refer to the [blog](https://lmms-lab.github.io/posts/lmms-eval-0.1/) for more details
18
-
19
- ## Why `lmms-eval`?
20
-
21
- <p align="center" width="80%">
22
- <img src="https://i.postimg.cc/L5kNJsJf/Blue-Purple-Futuristic-Modern-3-D-Tech-Company-Business-Presentation.png" width="100%" height="80%">
23
- </p>
24
-
25
- In today's world, we're on an exciting journey toward creating Artificial General Intelligence (AGI), much like the enthusiasm of the 1960s moon landing. This journey is powered by advanced large language models (LLMs) and large multimodal models (LMMs), which are complex systems capable of understanding, learning, and performing a wide variety of human tasks.
26
-
27
- To gauge how advanced these models are, we use a variety of evaluation benchmarks. These benchmarks are tools that help us understand the capabilities of these models, showing us how close we are to achieving AGI.
28
-
29
- However, finding and using these benchmarks is a big challenge. The necessary benchmarks and datasets are spread out and hidden in various places like Google Drive, Dropbox, and different school and research lab websites. It feels like we're on a treasure hunt, but the maps are scattered everywhere.
30
-
31
- In the field of language models, there has been a valuable precedent set by the work of [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). They offer integrated data and model interfaces, enabling rapid evaluation of language models and serving as the backend support framework for the [open-llm-leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), and has gradually become the underlying ecosystem of the era of foundation models.
32
-
33
- We humbly obsorbed the exquisite and efficient design of [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) and introduce **lmms-eval**, an evaluation framework meticulously crafted for consistent and efficient evaluation of LMM.
34
-
35
- ## Installation
36
-
37
- For formal usage, you can install the package from PyPI by running the following command:
38
- ```bash
39
- pip install lmms-eval
40
- ```
41
-
42
- For development, you can install the package by cloning the repository and running the following command:
43
- ```bash
44
- git clone https://github.com/EvolvingLMMs-Lab/lmms-eval
45
- cd lmms-eval
46
- pip install -e .
47
- ```
48
-
49
- If you wanted to test llava, you will have to clone their repo from [LLaVA](https://github.com/haotian-liu/LLaVA) and
50
- ```bash
51
- # for llava 1.5
52
- # git clone https://github.com/haotian-liu/LLaVA
53
- # cd LLaVA
54
- # pip install -e .
55
-
56
- # for llava-next (1.6)
57
- git clone https://github.com/LLaVA-VL/LLaVA-NeXT
58
- cd LLaVA-NeXT
59
- pip install -e .
60
- ```
61
-
62
- <details>
63
- <summary>Reproduction of LLaVA-1.5's paper results</summary>
64
-
65
- You can check the [environment install script](miscs/repr_scripts.sh) and [torch environment info](miscs/repr_torch_envs.txt) to **reproduce LLaVA-1.5's paper results**. We found torch/cuda versions difference would cause small variations in the results, we provide the [results check](miscs/llava_result_check.md) with different environments.
66
-
67
- </details>
68
-
69
- If you want to test on caption dataset such as `coco`, `refcoco`, and `nocaps`, you will need to have `java==1.8.0 ` to let pycocoeval api to work. If you don't have it, you can install by using conda
70
- ```
71
- conda install openjdk=8
72
- ```
73
- you can then check your java version by `java -version`
74
-
75
-
76
- <details>
77
- <summary>Comprehensive Evaluation Results of LLaVA Family Models</summary>
78
- <br>
79
-
80
- As demonstrated by the extensive table below, we aim to provide detailed information for readers to understand the datasets included in lmms-eval and some specific details about these datasets (we remain grateful for any corrections readers may have during our evaluation process).
81
-
82
- We provide a Google Sheet for the detailed results of the LLaVA series models on different datasets. You can access the sheet [here](https://docs.google.com/spreadsheets/d/1a5ImfdKATDI8T7Cwh6eH-bEsnQFzanFraFUgcS9KHWc/edit?usp=sharing). It's a live sheet, and we are updating it with new results.
83
-
84
- <p align="center" width="100%">
85
- <img src="https://i.postimg.cc/jdw497NS/WX20240307-162526-2x.png" width="100%" height="80%">
86
- </p>
87
-
88
- We also provide the raw data exported from Weights & Biases for the detailed results of the LLaVA series models on different datasets. You can access the raw data [here](https://docs.google.com/spreadsheets/d/1AvaEmuG4csSmXaHjgu4ei1KBMmNNW8wflOD_kkTDdv8/edit?usp=sharing).
89
-
90
- </details>
91
- <br>
92
-
93
-
94
- Our Development will be continuing on the main branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub.
95
-
96
- ## Multiple Usages
97
-
98
- **Evaluation of LLaVA on MME**
99
-
100
- ```bash
101
- python3 -m accelerate.commands.launch \
102
- --num_processes=8 \
103
- -m lmms_eval \
104
- --model llava \
105
- --model_args pretrained="liuhaotian/llava-v1.5-7b" \
106
- --tasks mme \
107
- --batch_size 1 \
108
- --log_samples \
109
- --log_samples_suffix llava_v1.5_mme \
110
- --output_path ./logs/
111
- ```
112
-
113
- **Evaluation of LLaVA on multiple datasets**
114
-
115
- ```bash
116
- python3 -m accelerate.commands.launch \
117
- --num_processes=8 \
118
- -m lmms_eval \
119
- --model llava \
120
- --model_args pretrained="liuhaotian/llava-v1.5-7b" \
121
- --tasks mme,mmbench_en \
122
- --batch_size 1 \
123
- --log_samples \
124
- --log_samples_suffix llava_v1.5_mme_mmbenchen \
125
- --output_path ./logs/
126
- ```
127
-
128
- **For other variants llava. Please change the `conv_template` in the `model_args`**
129
-
130
- > `conv_template` is an arg of the init function of llava in `lmms_eval/models/llava.py`, you could find the corresponding value at LLaVA's code, probably in a dict variable `conv_templates` in `llava/conversations.py`
131
-
132
- ```bash
133
- python3 -m accelerate.commands.launch \
134
- --num_processes=8 \
135
- -m lmms_eval \
136
- --model llava \
137
- --model_args pretrained="liuhaotian/llava-v1.6-mistral-7b,conv_template=mistral_instruct" \
138
- --tasks mme,mmbench_en \
139
- --batch_size 1 \
140
- --log_samples \
141
- --log_samples_suffix llava_v1.5_mme_mmbenchen \
142
- --output_path ./logs/
143
- ```
144
-
145
- **Evaluation of larger lmms (llava-v1.6-34b)**
146
-
147
- ```bash
148
- python3 -m accelerate.commands.launch \
149
- --num_processes=8 \
150
- -m lmms_eval \
151
- --model llava \
152
- --model_args pretrained="liuhaotian/llava-v1.6-34b,conv_template=mistral_direct" \
153
- --tasks mme,mmbench_en \
154
- --batch_size 1 \
155
- --log_samples \
156
- --log_samples_suffix llava_v1.5_mme_mmbenchen \
157
- --output_path ./logs/
158
- ```
159
-
160
- **Evaluation with a set of configurations, supporting evaluation of multiple models and datasets**
161
-
162
- ```bash
163
- python3 -m accelerate.commands.launch --num_processes=8 -m lmms_eval --config ./miscs/example_eval.yaml
164
- ```
165
-
166
- **Evaluation with naive model sharding for bigger model (llava-next-72b)**
167
-
168
- ```bash
169
- python3 -m lmms_eval \
170
- --model=llava \
171
- --model_args=pretrained=lmms-lab/llava-next-72b,conv_template=qwen_1_5,device_map=auto,model_name=llava_qwen \
172
- --tasks=pope,vizwiz_vqa_val,scienceqa_img \
173
- --batch_size=1 \
174
- --log_samples \
175
- --log_samples_suffix=llava_qwen \
176
- --output_path="./logs/" \
177
- --wandb_args=project=lmms-eval,job_type=eval,entity=llava-vl
178
- ```
179
-
180
- **Evaluation with SGLang for bigger model (llava-next-72b)**
181
-
182
- ```bash
183
- python3 -m lmms_eval \
184
- --model=llava_sglang \
185
- --model_args=pretrained=lmms-lab/llava-next-72b,tokenizer=lmms-lab/llavanext-qwen-tokenizer,conv_template=chatml-llava,tp_size=8,parallel=8 \
186
- --tasks=mme \
187
- --batch_size=1 \
188
- --log_samples \
189
- --log_samples_suffix=llava_qwen \
190
- --output_path=./logs/ \
191
- --verbosity=INFO
192
- ```
193
-
194
- ### Supported models
195
-
196
- Please check [supported models](lmms_eval/models/__init__.py) for more details.
197
-
198
- ### Supported tasks
199
-
200
- Please check [supported tasks](lmms_eval/docs/current_tasks.md) for more details.
201
-
202
- ## Add Customized Model and Dataset
203
-
204
- Please refer to our [documentation](docs/README.md).
205
-
206
- ## Acknowledgement
207
-
208
- lmms_eval is a fork of [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness). We recommend you to read through the [docs of lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs) for relevant information.
209
-
210
- ---
211
-
212
- Below are the changes we made to the original API:
213
- - Build context now only pass in idx and process image and doc during the model responding phase. This is due to the fact that dataset now contains lots of images and we can't store them in the doc like the original lm-eval-harness other wise the cpu memory would explode.
214
- - Instance.args (lmms_eval/api/instance.py) now contains a list of images to be inputted to lmms.
215
- - lm-eval-harness supports all HF language models as single model class. Currently this is not possible of lmms because the input/output format of lmms in HF are not yet unified. Thererfore, we have to create a new class for each lmms model. This is not ideal and we will try to unify them in the future.
216
-
217
- ---
218
-
219
- During the initial stage of our project, we thank:
220
- - [Xiang Yue](https://xiangyue9607.github.io/), [Jingkang Yang](https://jingkang50.github.io/), [Dong Guo](https://www.linkedin.com/in/dongguoset/) and [Sheng Shen](https://sincerass.github.io/) for early discussion and testing.
221
-
222
- ---
223
-
224
- During the `v0.1` to `v0.2`, we thank the community support from pull requests (PRs):
225
-
226
- > Details are in [lmms-eval/v0.2.0 release notes](https://github.com/EvolvingLMMs-Lab/lmms-eval/releases/tag/untagged-9057ff0e9a72d5a5846f)
227
-
228
- **Datasets:**
229
-
230
- - VCR: Visual Caption Restoration (officially from the authors, MILA)
231
- - ConBench (officially from the authors, PKU/Bytedance)
232
- - MathVerse (officially from the authors, CUHK)
233
- - MM-UPD (officially from the authors, University of Tokyo)
234
- - WebSRC (from Hunter Heiden)
235
- - ScreeSpot (from Hunter Heiden)
236
- - RealworldQA (from Fanyi Pu, NTU)
237
- - Multi-lingual LLaVA-W (from Gagan Bhatia, UBC)
238
-
239
- **Models:**
240
-
241
- - LLaVA-HF (officially from Huggingface)
242
- - Idefics-2 (from the lmms-lab team)
243
- - microsoft/Phi-3-Vision (officially from the authors, Microsoft)
244
- - LLaVA-SGlang (from the lmms-lab team)
245
-
246
- ## Citations
247
-
248
- ```shell
249
- @misc{lmms_eval2024,
250
- title={LMMs-Eval: Accelerating the Development of Large Multimoal Models},
251
- url={https://github.com/EvolvingLMMs-Lab/lmms-eval},
252
- author={Bo Li*, Peiyuan Zhang*, Kaichen Zhang*, Fanyi Pu*, Xinrun Du, Yuhao Dong, Haotian Liu, Yuanhan Zhang, Ge Zhang, Chunyuan Li and Ziwei Liu},
253
- publisher = {Zenodo},
254
- version = {v0.1.0},
255
- month={March},
256
- year={2024}
257
- }
258
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/docs/README.md DELETED
@@ -1,12 +0,0 @@
1
- # LMMs Eval Documentation
2
-
3
- Welcome to the docs for `lmms-eval`!
4
-
5
- Majority of this documentation is adapted from [lm-eval-harness](https://github.com/EleutherAI/lm-evaluation-harness/)
6
-
7
- ## Table of Contents
8
-
9
- * To learn about the command line flags, see the [commands](commands.md)
10
- * To learn how to add a new moddel, see the [Model Guide](model_guide.md).
11
- * For a crash course on adding new tasks to the library, see our [Task Guide](task_guide.md).
12
- * If you need to upload your datasets into correct HF format with viewer supported, please refer to [tools](https://github.com/EvolvingLMMs-Lab/lmms-eval/tree/pufanyi/hf_dataset_docs/tools)
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/docs/commands.md DELETED
@@ -1,24 +0,0 @@
1
- # User Guide
2
- This document details the interface exposed by `lmms_eval` and provides details on what flags are available to users.
3
-
4
- ## Command-line Interface
5
-
6
-
7
- Equivalently, running the library can be done via the `lmms_eval` entrypoint at the command line.
8
-
9
- This mode supports a number of command-line arguments, the details of which can be also be seen via running with `-h` or `--help`:
10
-
11
- * `--model` : Selects which model type or provider is evaluated. Must be a mdoels registered under lmms_eval/models. For example, `--model qwen_vl` or `--model llava`.
12
-
13
- * `--model_args` : Controls parameters passed to the model constructor. Accepts a string containing comma-separated keyword arguments to the model class of the format `"arg1=val1,arg2=val2,..."`, such as, for example `--model_args pretrained=liuhaotian/llava-v1.5-7b,batch_size=1`. For a full list of what keyword arguments, see the initialization of the corresponding model class in `lmms_eval/models/`.
14
-
15
- * `--tasks` : Determines which tasks or task groups are evaluated. Accepts a comma-separated list of task names or task group names. Must be solely comprised of valid tasks/groups. You can use `--tasks list` to see all the available tasks. If you add your own tasks but not shown on the list, you can try to set `--verbosity=DEBUG` to view the error message. You can also use `--tasks list_with_num` to check every tasks and the number of question each task contains. However, `list_with_num` will download all the available datasets and may require lots of memory and time.
16
-
17
- * `--batch_size` : Sets the batch size used for evaluation. Can be a positive integer or `"auto"` to automatically select the largest batch size that will fit in memory, speeding up evaluation. One can pass `--batch_size auto:N` to re-select the maximum batch size `N` times during evaluation. This can help accelerate evaluation further, since `lm-eval` sorts documents in descending order of context length.
18
-
19
- * `--output_path` : A string of the form `dir/file.jsonl` or `dir/`. Provides a path where high-level results will be saved, either into the file named or into the directory named. If `--log_samples` is passed as well, then per-document outputs and metrics will be saved into the directory as well.
20
-
21
- * `--log_samples` : If this flag is passed, then the model's outputs, and the text fed into the model, will be saved at per-document granularity. Must be used with `--output_path`.
22
-
23
- * `--limit` : Accepts an integer, or a float between 0.0 and 1.0 . If passed, will limit the number of documents to evaluate to the first X documents (if an integer) per task or first X% of documents per task. Useful for debugging, especially on costly API models.
24
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/docs/current_tasks.md DELETED
@@ -1,122 +0,0 @@
1
- # Current Tasks
2
-
3
- > () indicates the task name in the lmms_eval. The task name is also used to specify the dataset in the configuration file.
4
- > The following is manually updated documentation. You could use `lmms_eval task --list` to list all supported tasks and their task names.
5
-
6
- - AI2D (ai2d)
7
- - ChartQA (chartqa)
8
- - CMMMU (cmmmu)
9
- - CMMMU Validation (cmmmu_val)
10
- - CMMMU Test (cmmmu_test)
11
- - COCO Caption (coco_cap)
12
- - COCO 2014 Caption (coco2014_cap)
13
- - COCO 2014 Caption Validation (coco2014_cap_val)
14
- - COCO 2014 Caption Test (coco2014_cap_test)
15
- - COCO 2017 Caption (coco2017_cap)
16
- - COCO 2017 Caption MiniVal (coco2017_cap_val)
17
- - COCO 2017 Caption MiniTest (coco2017_cap_test)
18
- - [ConBench](https://github.com/foundation-multimodal-models/ConBench) (conbench)
19
- - DOCVQA (docvqa)
20
- - DOCVQA Validation (docvqa_val)
21
- - DOCVQA Test (docvqa_test)
22
- - Ferret (ferret)
23
- - Flickr30K (flickr30k)
24
- - Ferret Test (ferret_test)
25
- - GQA (gqa)
26
- - HallusionBenchmark (hallusion_bench_image)
27
- - Infographic VQA (info_vqa)
28
- - Infographic VQA Validation (info_vqa_val)
29
- - Infographic VQA Test (info_vqa_test)
30
- - LLaVA-Bench (llava_in_the_wild)
31
- - LLaVA-Bench-COCO (llava_bench_coco)
32
- - MathVerse (mathverse)
33
- - MathVerse Text Dominant (mathverse_testmini_text_dominant)
34
- - MathVerse Text Only (mathverse_testmini_text_only)
35
- - MathVerse Text Lite (mathverse_testmini_text_lite)
36
- - MathVerse Vision Dominant (mathverse_testmini_vision_dominant)
37
- - MathVerse Vision Intensive (mathverse_testmini_vision_intensive)
38
- - MathVerse Vision Only (mathverse_testmini_vision_only)
39
- - MathVista (mathvista)
40
- - MathVista Validation (mathvista_testmini)
41
- - MathVista Test (mathvista_test)
42
- - MMBench (mmbench)
43
- - MMBench English (mmbench_en)
44
- - MMBench English Dev (mmbench_en_dev)
45
- - MMBench English Test (mmbench_en_test)
46
- - MMBench Chinese (mmbench_cn)
47
- - MMBench Chinese Dev (mmbench_cn_dev)
48
- - MMBench Chinese Test (mmbench_cn_test)
49
- - MME (mme)
50
- - MMMU (mmmu)
51
- - MMMU Validation (mmmu_val)
52
- - MMMU Test (mmmu_test)
53
- - MMUPD (mmupd)
54
- - MMUPD Base (mmupd_base)
55
- - MMAAD Base (mmaad_base)
56
- - MMIASD Base (mmiasd_base)
57
- - MMIVQD Base (mmivqd_base)
58
- - MMUPD Option (mmupd_option)
59
- - MMAAD Option (mmaad_option)
60
- - MMIASD Option (mmiasd_option)
61
- - MMIVQD Option (mmivqd_option)
62
- - MMUPD Instruction (mmupd_instruction)
63
- - MMAAD Instruction (mmaad_instruction)
64
- - MMIASD Instruction (mmiasd_instruction)
65
- - MMIVQD Instruction (mmivqd_instruction)
66
- - MMVet (mmvet)
67
- - Multi-DocVQA (multidocvqa)
68
- - Multi-DocVQA Validation (multidocvqa_val)
69
- - Multi-DocVQA Test (multidocvqa_test)
70
- - NoCaps (nocaps)
71
- - NoCaps Validation (nocaps_val)
72
- - NoCaps Test (nocaps_test)
73
- - OKVQA (ok_vqa)
74
- - OKVQA Validation 2014 (ok_vqa_val2014)
75
- - POPE (pope)
76
- - RefCOCO (refcoco)
77
- - refcoco_seg_test
78
- - refcoco_seg_val
79
- - refcoco_seg_testA
80
- - refcoco_seg_testB
81
- - refcoco_bbox_test
82
- - refcoco_bbox_val
83
- - refcoco_bbox_testA
84
- - refcoco_bbox_testB
85
- - RefCOCO+ (refcoco+)
86
- - refcoco+_seg
87
- - refcoco+_seg_val
88
- - refcoco+_seg_testA
89
- - refcoco+_seg_testB
90
- - refcoco+_bbox
91
- - refcoco+_bbox_val
92
- - refcoco+_bbox_testA
93
- - refcoco+_bbox_testB
94
- - RefCOCOg (refcocog)
95
- - refcocog_seg_test
96
- - refcocog_seg_val
97
- - refcocog_bbox_test
98
- - refcocog_bbox_val
99
- - ScienceQA (scienceqa_full)
100
- - ScienceQA Full (scienceqa)
101
- - ScienceQA IMG (scienceqa_img)
102
- - ScreenSpot (screenspot)
103
- - ScreenSpot REC / Grounding (screenspot_rec)
104
- - ScreenSpot REG / Instruction Generation (screenspot_reg)
105
- - SeedBench (seedbench)
106
- - SeedBench 2 (seedbench_2)
107
- - ST-VQA (stvqa)
108
- - TextCaps (textcaps)
109
- - TextCaps Validation (textcaps_val)
110
- - TextCaps Test (textcaps_test)
111
- - TextVQA (textvqa)
112
- - TextVQA Validation (textvqa_val)
113
- - TextVQA Test (textvqa_test)
114
- - VizWizVQA (vizwiz_vqa)
115
- - VizWizVQA Validation (vizwiz_vqa_val)
116
- - VizWizVQA Test (vizwiz_vqa_test)
117
- - VQAv2 (vqav2)
118
- - VQAv2 Validation (vqav2_val)
119
- - VQAv2 Test (vqav2_test)
120
- - WebSRC (websrc)
121
- - WebSRC Validation (websrc_val)
122
- - WebSRC Test (websrc_test)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/docs/model_guide.md DELETED
@@ -1,78 +0,0 @@
1
- # New Model Guide
2
- In order to properly evaluate a given LM, we require implementation of a wrapper class subclassing the `lmms_eval.api.model.lmms` class, that defines how the lmms_eval should interface with your model. This guide walks through how to write this `lmms` subclass via adding it to the library!
3
-
4
- ## Setup
5
-
6
- To get started contributing, go ahead and fork the main repo, clone it, create a branch with the name of your task, and install the project requirements in your environment:
7
-
8
- ```sh
9
- # After forking...
10
- git clone https://github.com/<YOUR-USERNAME>/lmms-eval.git
11
- cd lmms-eval
12
- git checkout -b <model-type>
13
- pip install -e .
14
- ```
15
-
16
- Now, we'll create a new file where we'll be adding our model:
17
-
18
- ```sh
19
- touch lmms_eval/models/<my_model_filename>.py
20
- ```
21
-
22
- **As a rule of thumb, we recommend you to use `lmms_eval/models/qwen_vl.py` and `lmms_eval/models/instructblip.py` as reference implementations for your model. You can copy and paste the contents of one of these files into your new file to get started.**
23
-
24
- ## Interface
25
-
26
- All models must subclass the `lmms_eval.api.model.lmms` class.
27
-
28
- The lmms class enforces a common interface via which we can extract responses from a model:
29
-
30
- ```python
31
- class MyCustomLM(lmms):
32
- #...
33
- def loglikelihood(self, requests: list[Instance]) -> list[tuple[float, bool]]:
34
- #...
35
-
36
- def generate_until(self, requests: list[Instance]) -> list[str]:
37
- #...
38
- #...
39
- ```
40
- Where `Instance` is a dataclass defined in [`lmms_eval.api.instance`](https://github.com/EvolvingLMMs-Lab/lmms-eval/tree/main/lmms_eval/api/instance.py) with property `args` of request-dependent type signature described below.
41
-
42
- We support three types of requests, consisting of different interactions / measurements with an autoregressive LM.
43
-
44
- All three request types take as input `requests` of type `list[Instance]` that have a matching `Instance.request_type` to the method name. Overall, you can check the [construct_requests](https://github.com/EvolvingLMMs-Lab/lmms-eval/blob/main/lmms_eval/api/task.py#L918) to see how the arguments are being constructed for different types of output type requests.
45
-
46
- - `generate_until`
47
- - Each request contains `Instance.args : Tuple[str, dict]` containing 1. an input string to the LM and 2. a dictionary of keyword arguments used to control generation parameters.
48
- - In each `Instance.args` there will be 6 elements which are `contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split`. `contexts` refers to the formatted question and is the text input for the LMM. Sometimes it might contains image token and need to address differently for different models. `all_gen_kwargs` refers to the dict that contains all the generation configuration for the model. We use `doc_id`, `task`, and `split` to access the dataset and then you can use `doc_to_visual` which is a function reference to process the image. When you implement your own model, you should use these to write your own generate_util function.
49
- - Using this input and these generation parameters, text will be sampled from the language model (typically until a maximum output length or specific stopping string sequences--for example, `{"until": ["\n\n", "."], "max_gen_toks": 128}`).
50
- - The generated input+output text from the model will then be returned.
51
-
52
- - `loglikelihood`
53
- - Each request contains `Instance.args : Tuple[str, str]` containing 1. an input string to the LM and 2. a target string on which the loglikelihood of the LM producing this target, conditioned on the input, will be returned.
54
- - In each `Instance.args` there will be 6 elements which are ` contexts, doc_to_target, doc_to_visual, doc_id, task, split`. `contexts` refers to the formatted question and is the text input for the LMM. Sometimes it might contains image token and need to address differently for different models. `doc_to_target` is a function reference that get the get the answer from the doc. This will be the continuation of the answer and only tokens belong to this part should be calculated for the loglikelihood.
55
- - Each request will have, as result, `(ll, is_greedy): Tuple[float, int]` returned, where `ll` is a floating point number representing the log probability of generating the target string conditioned on the input, and `is_greedy` being either the value `0` or `1`, with it being `1` if and only if the target string *would be generated by greedy sampling from the LM* (that is, if the target string is the *most likely* N-token string to be output by the LM given the input. )
56
-
57
-
58
-
59
-
60
- ## Registration
61
-
62
- Congrats on implementing your model! Now it's time to test it out.
63
-
64
- To make your model usable via the command line interface to `lmms_eval`, you'll need to tell `lmms_eval` what your model's name is.
65
-
66
- This is done via a *decorator*, `lmms_eval.api.registry.register_model`. Using `register_model()`, one can both tell the package what the model's name(s) to be used are when invoking it with `python -m lm_eval --model <name>` and alert `lmms_eval` to the model's existence.
67
-
68
- ```python
69
- from lmms_eval.api.registry import register_model
70
-
71
- @register_model("<name1>", "<name2>")
72
- class MyCustomLM(LM):
73
- ```
74
-
75
- The final step is to import your model in `lmms_eval/models/__init__.py`:
76
- ```python
77
- from .my_model_filename import MyCustomLM
78
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/docs/task_guide.md DELETED
@@ -1,113 +0,0 @@
1
- # Task Configuration
2
-
3
- The `lmms_eval` is meant to be an extensible and flexible framework within which many different evaluation tasks can be defined. All tasks in the new version of the harness are built around a YAML configuration file format.
4
-
5
- These YAML configuration files, along with the current codebase commit hash, are intended to be shareable such that providing the YAML config enables another researcher to precisely replicate the evaluation setup used by another, in the case that the prompt or setup differs from standard `lmms_eval` task implementations.
6
-
7
- While adding a standard evaluation task on a new dataset can be occasionally as simple as swapping out a Hugging Face dataset path in an existing file, more specialized evaluation setups also exist. Here we'll provide a crash course on the more advanced logic implementable in YAML form available to users.
8
-
9
- ## Good Reference Tasks
10
-
11
- Contributing a new task can be daunting! Luckily, much of the work has often been done for you in a different, similarly evaluated task. Good examples of task implementations to study include:
12
-
13
- Generation-based tasks:
14
-
15
- - MME (`lmms_eval/tasks/mme/mme.yaml`)
16
-
17
- ```yaml
18
- dataset_path: lmms-lab/MME
19
- dataset_kwargs:
20
- token: True
21
- task: "mme"
22
- test_split: test
23
- output_type: generate_until
24
- doc_to_visual: !function utils.mme_doc_to_visual
25
- doc_to_text: !function utils.mme_doc_to_text
26
- doc_to_target: "answer"
27
- generation_kwargs:
28
- max_new_tokens: 16
29
- temperature: 0
30
- top_p: 1.0
31
- num_beams: 1
32
- do_sample: false
33
- # The return value of process_results will be used by metrics
34
- process_results: !function utils.mme_process_results
35
- # Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
36
- metric_list:
37
- - metric: mme_percetion_score
38
- aggregation: !function utils.mme_aggregate_results
39
- higher_is_better: true
40
- - metric: mme_cognition_score
41
- aggregation: !function utils.mme_aggregate_results
42
- higher_is_better: true
43
- model_specific_prompt_kwargs:
44
- default:
45
- pre_prompt: ""
46
- post_prompt: "\nAnswer the question using a single word or phrase."
47
- qwen_vl:
48
- pre_prompt: ""
49
- post_prompt: " Answer:"
50
- metadata:
51
- - version: 0.0
52
- ```
53
-
54
- You can pay special attention to the `process_results` and `metric_list` fields, which are used to define how the model output is post-processed and scored.
55
- Also, the `model_specific_prompt_kwargs` field is used to define model-specific prompt configurations. The default is set to follow Llava.
56
-
57
- PPL-based tasks:
58
- - Seedbench (`lmms_eval/tasks/seedbench/seedbench_ppl.yaml`)
59
-
60
- ```yaml
61
- dataset_path: lmms-lab/SEED-Bench
62
- dataset_kwargs:
63
- token: True
64
- task: "seedbench_ppl"
65
- test_split: test
66
- output_type: multiple_choice
67
- doc_to_visual: !function utils.seed_doc_to_visual
68
- doc_to_text: !function utils.seed_doc_to_text_mc
69
- doc_to_choice : !function utils.seed_doc_to_choice
70
- doc_to_target: !function utils.seed_doc_to_mc_target
71
- # Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
72
- metric_list:
73
- - metric: acc
74
- metadata:
75
- - version: 0.0
76
- ```
77
-
78
- ## Configurations
79
-
80
- Tasks are configured via the `TaskConfig` object. Below, we describe all fields usable within the object, and their role in defining a task.
81
-
82
- ### Parameters
83
-
84
- Task naming + registration:
85
- - **task** (`str`, defaults to None) — name of the task.
86
- - **group** (`str`, *optional*) — name of the task group(s) a task belongs to. Enables one to run all tasks with a specified tag or group name at once.
87
-
88
- Dataset configuration options:
89
- - **dataset_path** (`str`) — The name of the dataset as listed by HF in the datasets Hub.
90
- - **dataset_name** (`str`, *optional*, defaults to None) — The name of what HF calls a “config” or sub-task of the benchmark. If your task does not contain any data instances, just leave this to default to None. (If you're familiar with the HF `datasets.load_dataset` function, these are just the first 2 arguments to it.)
91
- - **dataset_kwargs** (`dict`, *optional*) — Auxiliary arguments that `datasets.load_dataset` accepts. This can be used to specify arguments such as `data_files` or `data_dir` if you want to use local datafiles such as json or csv.
92
- - **training_split** (`str`, *optional*) — Split in the dataset to use as the training split.
93
- - **validation_split** (`str`, *optional*) — Split in the dataset to use as the validation split.
94
- - **test_split** (`str`, *optional*) — Split in the dataset to use as the test split.
95
- - **fewshot_split** (`str`, *optional*) — Split in the dataset to draw few-shot exemplars from. assert that this not None if num_fewshot > 0. **This function is not well tested so far**
96
- - **process_docs** (`Callable`, *optional*) — Optionally define a function to apply to each HF dataset split, to preprocess all documents before being fed into prompt template rendering or other evaluation steps. Can be used to rename dataset columns, or to process documents into a format closer to the expected format expected by a prompt template.
97
-
98
- Prompting / in-context formatting options:
99
- - **doc_to_text** (`Union[Callable, str]`, *optional*) — Column name or function to process a sample into the appropriate input for the model
100
- - **doc_to_visial** (`Union[Callable, str]`, *optional*) — Function to process a sample into the appropriate input images for the model.
101
- - **doc_to_target** (`Union[Callable, str]`, *optional*) — Column name or or function to process a sample into the appropriate target output for the model. For multiple choice tasks, this should return an index into
102
- - **doc_to_choice** (`Union[Callable, str]`, *optional*) — Column name or or function to process a sample into a list of possible string choices for `multiple_choice` tasks. Left undefined for `generate_until` tasks.
103
-
104
- Runtime configuration options:
105
- - **num_fewshot** (`int`, *optional*, defaults to 0) — Number of few-shot examples before the input. **This function is not well tested so far**
106
- - **batch_size** (`int`, *optional*, defaults to 1) — Batch size.
107
-
108
- **So far some models (such as qwen) may not support batch size > 1. Some models (such as llava) will generate different scores for different batch sizes. We recommend setting batch size to 1 for final benchmarking runs.**
109
-
110
- Scoring details:
111
- - **metric_list** (`str`, *optional*, defaults to None) — A list of metrics to use for evaluation.
112
- - **output_type** (`str`, *optional*, defaults to "generate_until") — Selects the type of model output for the given task. Options are `generate_until`, `loglikelihood`, and `multiple_choice`.
113
- - **generation_kwargs** (`dict`, *optional*) — Auxiliary arguments for the `generate` function from HF transformers library. Advanced keyword arguments may not be supported for non-HF LM classes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/__init__.py DELETED
File without changes
lmms-eval-0.2.0.post1/lmms_eval/__main__.py DELETED
@@ -1,345 +0,0 @@
1
- import os
2
- import yaml
3
- import sys
4
- import json
5
-
6
- import traceback
7
- import argparse
8
- import numpy as np
9
- import datetime
10
-
11
- import warnings
12
- import traceback
13
-
14
- warnings.simplefilter("ignore", category=DeprecationWarning)
15
-
16
- from accelerate import Accelerator
17
- from accelerate.utils import InitProcessGroupKwargs
18
- from pathlib import Path
19
- from typing import Union
20
- import hashlib
21
-
22
- from lmms_eval import evaluator, utils
23
- from lmms_eval.tasks import initialize_tasks, include_path, get_task_dict
24
- from lmms_eval.api.registry import ALL_TASKS
25
- from lmms_eval.logging_utils import WandbLogger
26
- from loguru import logger as eval_logger
27
-
28
-
29
- def _handle_non_serializable(o):
30
- if isinstance(o, np.int64) or isinstance(o, np.int32):
31
- return int(o)
32
- elif isinstance(o, set):
33
- return list(o)
34
- else:
35
- return str(o)
36
-
37
-
38
- def parse_eval_args() -> argparse.Namespace:
39
- parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
40
- parser.add_argument("--config", default="", help="Path to a yaml file specifying all eval arguments, will ignore cli arguments if specified")
41
- parser.add_argument("--model", default="hf", help="Name of model e.g. `hf`")
42
- parser.add_argument(
43
- "--tasks",
44
- default=None,
45
- help="To get full list of tasks, use the command lmms-eval --tasks list",
46
- )
47
- parser.add_argument(
48
- "--model_args",
49
- default="",
50
- help="String arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
51
- )
52
- parser.add_argument(
53
- "--num_fewshot",
54
- type=int,
55
- default=None,
56
- help="Number of examples in few-shot context",
57
- )
58
- parser.add_argument("--batch_size", type=str, default=1)
59
- parser.add_argument(
60
- "--device",
61
- type=str,
62
- default=None,
63
- help="Device to use (e.g. cuda, cuda:0, cpu)",
64
- )
65
- parser.add_argument(
66
- "--output_path",
67
- default=None,
68
- type=str,
69
- metavar="= [dir/file.jsonl] [DIR]",
70
- help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
71
- )
72
- parser.add_argument(
73
- "--limit",
74
- type=float,
75
- default=None,
76
- help="Limit the number of examples per task. " "If <1, limit is a percentage of the total number of examples.",
77
- )
78
- parser.add_argument(
79
- "--check_integrity",
80
- action="store_true",
81
- help="Whether to run the relevant part of the test suite for the tasks",
82
- )
83
- parser.add_argument(
84
- "--show_task_to_terminal",
85
- action="store_true",
86
- default=False,
87
- help="Prints the prompt for the first few documents",
88
- )
89
- parser.add_argument(
90
- "--log_samples",
91
- action="store_true",
92
- default=False,
93
- help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis",
94
- )
95
- parser.add_argument(
96
- "--wandb_log_samples",
97
- action="store_true",
98
- default=False,
99
- help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis to Weights and Biases",
100
- )
101
- parser.add_argument(
102
- "--log_samples_suffix",
103
- type=str,
104
- default="model_outputs",
105
- help="Specify a suffix for the log_samples file name.",
106
- )
107
- parser.add_argument(
108
- "--predict_only",
109
- "-x",
110
- action="store_true",
111
- default=False,
112
- help="Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.",
113
- )
114
- parser.add_argument(
115
- "--show_config",
116
- action="store_true",
117
- default=False,
118
- help="If True, shows the the full config of all tasks at the end of the evaluation.",
119
- )
120
- parser.add_argument(
121
- "--include_path",
122
- type=str,
123
- default=None,
124
- help="Additional path to include if there are external tasks to include.",
125
- )
126
- parser.add_argument(
127
- "--gen_kwargs",
128
- default="",
129
- help=("String arguments for model generation on greedy_until tasks," " e.g. `temperature=0,top_k=0,top_p=0`"),
130
- )
131
- parser.add_argument(
132
- "--verbosity",
133
- type=str,
134
- default="INFO",
135
- help="Log error when tasks are not registered.",
136
- )
137
- parser.add_argument(
138
- "--wandb_args",
139
- default="",
140
- help="Comma separated string arguments passed to wandb.init, e.g. `project=lmms-eval,job_type=eval",
141
- )
142
- parser.add_argument(
143
- "--timezone",
144
- default="Asia/Singapore",
145
- help="Timezone for datetime string, e.g. Asia/Singapore, America/New_York, America/Los_Angeles",
146
- )
147
- args = parser.parse_args()
148
- return args
149
-
150
-
151
- def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
152
- if not args:
153
- args = parse_eval_args()
154
-
155
- # Check if no arguments were passed after parsing
156
- if len(sys.argv) == 1:
157
- print("┌───────────────────────────────────────────────────────────────────────────────┐")
158
- print("│ Please provide arguments to evaluate the model. e.g. │")
159
- print("│ `lmms-eval --model llava --model_path liuhaotian/llava-v1.6-7b --tasks okvqa` │")
160
- print("│ Use `lmms-eval --help` for more information. │")
161
- print("└───────────────────────────────────────────────────────────────────────────────┘")
162
- sys.exit(1)
163
-
164
- # reset logger
165
- eval_logger.remove()
166
- eval_logger.add(sys.stdout, colorize=True, level=args.verbosity)
167
- eval_logger.add(sys.stderr, level=args.verbosity)
168
- eval_logger.info(f"Verbosity set to {args.verbosity}")
169
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
170
-
171
- args_list = []
172
- results_list = []
173
- if args.config:
174
- if not os.path.exists(args.config):
175
- raise ValueError(f"Config file does not exist: {args.config}")
176
-
177
- with open(args.config, "r") as file:
178
- config_args = yaml.safe_load(file)
179
- config_args = [config_args] if type(config_args) != list else config_args
180
- # multiple configs, create args list first
181
- for config in config_args:
182
- args_copy = argparse.Namespace(**vars(args))
183
- for key, value in config.items():
184
- setattr(args_copy, key, value)
185
- args_list.append(args_copy)
186
- else:
187
- args_list.append(args)
188
-
189
- # initialize Accelerator
190
- kwargs_handler = InitProcessGroupKwargs(timeout=datetime.timedelta(seconds=60000))
191
- accelerator = Accelerator(kwargs_handlers=[kwargs_handler])
192
- if accelerator.is_main_process:
193
- is_main_process = True
194
- else:
195
- is_main_process = False
196
-
197
- for args in args_list:
198
- try:
199
- if is_main_process and args.wandb_args: # thoughtfully we should only init wandb once, instead of multiple ranks to avoid network traffics and unwanted behaviors.
200
- wandb_logger = WandbLogger(args)
201
-
202
- results, samples = cli_evaluate_single(args)
203
- results_list.append(results)
204
-
205
- accelerator.wait_for_everyone()
206
- if is_main_process and args.wandb_args:
207
- wandb_logger.post_init(results)
208
- wandb_logger.log_eval_result()
209
- if args.wandb_log_samples and samples is not None:
210
- wandb_logger.log_eval_samples(samples)
211
-
212
- wandb_logger.finish()
213
-
214
- except Exception as e:
215
- traceback.print_exc()
216
- eval_logger.error(f"Error during evaluation: {e}")
217
- traceback.print_exc()
218
- results_list.append(None)
219
-
220
- for args, results in zip(args_list, results_list):
221
- # cli_evaluate will return none if the process is not the main process (rank 0)
222
- if results is not None:
223
- print_results(args, results)
224
-
225
-
226
- def cli_evaluate_single(args: Union[argparse.Namespace, None] = None) -> None:
227
- initialize_tasks(args.verbosity)
228
-
229
- if args.predict_only:
230
- args.log_samples = True
231
- if (args.log_samples or args.predict_only) and not args.output_path:
232
- raise ValueError("Specify --output_path if providing --log_samples or --predict_only")
233
- if args.limit:
234
- eval_logger.warning(" --limit SHOULD ONLY BE USED FOR TESTING." "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
235
- if args.include_path is not None:
236
- eval_logger.info(f"Including path: {args.include_path}")
237
- include_path(args.include_path)
238
-
239
- if args.tasks is None:
240
- task_names = ALL_TASKS
241
- elif args.tasks == "list":
242
- eval_logger.info("Available Tasks:\n - {}".format(f"\n - ".join(sorted(ALL_TASKS))))
243
- sys.exit()
244
- elif args.tasks == "list_with_num":
245
- log_message = (
246
- "\n" + "=" * 70 + "\n" + "\n\tYou are trying to check all the numbers in each task." + "\n\tThis action will download the complete dataset." + "\n\tIf the results are not clear initially, call this again." + "\n\n" + "=" * 70
247
- )
248
- eval_logger.info(log_message)
249
- for task_name in sorted(ALL_TASKS):
250
- try:
251
- task_dict = get_task_dict([task_name], model_name="llava")
252
- task_obj = task_dict[task_name]
253
- if type(task_obj) == tuple:
254
- group, task_obj = task_obj
255
- if task_obj is None:
256
- continue
257
- eval_logger.info(f"\nTask : {task_obj.config.task}\n - #num : {len(task_obj.test_docs()) if task_obj.has_test_docs() else len(task_obj.validation_docs())}")
258
- except Exception as e:
259
- eval_logger.debug(f"\nTask : {task_name} fail to load \n Exception : \n {e}")
260
- sys.exit()
261
- else:
262
- tasks_list = args.tasks.split(",")
263
- eval_logger.info(f"Evaluating on {len(tasks_list)} tasks.")
264
- task_names = utils.pattern_match(tasks_list, ALL_TASKS)
265
- task_missing = [task for task in tasks_list if task not in task_names and "*" not in task] # we don't want errors if a wildcard ("*") task name was used
266
-
267
- if task_missing:
268
- missing = ", ".join(task_missing)
269
- eval_logger.error(
270
- f"Tasks were not found: {missing}. Try `lmms-eval --tasks list` for list of available tasks",
271
- )
272
- # eval_logger.warn(f"Tasks {missing} were not found. Try `lmms-eval --tasks list` for list of available tasks.")
273
-
274
- eval_logger.info(f"Selected Tasks: {task_names}")
275
-
276
- # set datetime before evaluation
277
- datetime_str = utils.get_datetime_str(timezone=args.timezone)
278
- if args.output_path:
279
- if args.log_samples_suffix and len(args.log_samples_suffix) > 15:
280
- eval_logger.warning("The suffix for log_samples is too long. It is recommended to keep it under 15 characters.")
281
- args.log_samples_suffix = args.log_samples_suffix[:5] + "..." + args.log_samples_suffix[-5:]
282
-
283
- hash_input = f"{args.model_args}".encode("utf-8")
284
- hash_output = hashlib.sha256(hash_input).hexdigest()[:6]
285
- path = Path(args.output_path)
286
- path = path.expanduser().resolve().joinpath(f"{datetime_str}_{args.log_samples_suffix}_{args.model}_model_args_{hash_output}")
287
- args.output_path = path
288
-
289
- elif args.log_samples and not args.output_path:
290
- assert args.output_path, "Specify --output_path"
291
-
292
- results = evaluator.simple_evaluate(
293
- model=args.model,
294
- model_args=args.model_args,
295
- tasks=task_names,
296
- num_fewshot=args.num_fewshot,
297
- batch_size=args.batch_size,
298
- device=args.device,
299
- limit=args.limit,
300
- check_integrity=args.check_integrity,
301
- show_task_to_terminal=args.show_task_to_terminal,
302
- log_samples=args.log_samples,
303
- gen_kwargs=args.gen_kwargs,
304
- cli_args=args,
305
- predict_only=args.predict_only,
306
- )
307
-
308
- if results is not None:
309
- if args.log_samples:
310
- samples = results.pop("samples")
311
- else:
312
- samples = None
313
- dumped = json.dumps(results, indent=4, default=_handle_non_serializable)
314
- if args.show_config:
315
- print(dumped)
316
-
317
- if args.output_path:
318
- args.output_path.mkdir(parents=True, exist_ok=True)
319
- result_file_path = path.joinpath("results.json")
320
- if result_file_path.exists():
321
- eval_logger.warning(f"Output file {result_file_path} already exists and will be overwritten.")
322
-
323
- result_file_path.open("w").write(dumped)
324
- if args.log_samples:
325
- for task_name, config in results["configs"].items():
326
- filename = args.output_path.joinpath(f"{task_name}.json")
327
- # Structure the data with 'args' and 'logs' keys
328
- data_to_dump = {"args": vars(args), "model_configs": config, "logs": sorted(samples[task_name], key=lambda x: x["doc_id"]), "time": datetime_str}
329
- samples_dumped = json.dumps(data_to_dump, indent=4, default=_handle_non_serializable, ensure_ascii=False)
330
- filename.open("w", encoding="utf-8").write(samples_dumped)
331
- eval_logger.info(f"Saved samples to {filename}")
332
-
333
- return results, samples
334
- return None, None
335
-
336
-
337
- def print_results(args, results):
338
- print(f"{args.model} ({args.model_args}),\ngen_kwargs: ({args.gen_kwargs}),\nlimit: {args.limit},\nnum_fewshot: {args.num_fewshot},\nbatch_size: {args.batch_size}")
339
- print(evaluator.make_table(results))
340
- if "groups" in results:
341
- print(evaluator.make_table(results, "groups"))
342
-
343
-
344
- if __name__ == "__main__":
345
- cli_evaluate()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/__init__.py DELETED
File without changes
lmms-eval-0.2.0.post1/lmms_eval/api/filter.py DELETED
@@ -1,53 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import List
3
-
4
- from lmms_eval.api.instance import Instance
5
- from datasets import Dataset
6
-
7
-
8
- class Filter:
9
- """
10
- Filter classes operate on a per-task level.
11
- They take all model outputs (`instance.resps` for all `task.instances`)
12
- across all instances of a task, and perform operations.
13
- In a single run, one can configure any number of separate filters or lists of filters.
14
-
15
- """
16
-
17
- def __init__(self, *args, **kwargs) -> None:
18
- """
19
- Can define custom behavior here, if an individual instantiation of a Filter class should have state.
20
- """
21
-
22
- def apply(self, resps, docs):
23
- """
24
- Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects.
25
- Should return the list of (filtered) response lists *in the same order as they were input*, e.g.
26
- if pass in [<inst.resps for instance 0>, <inst.resps for instance 1>] should return
27
- [<filtered resps for instance 0>, <filtered resps for instance 1>]
28
- """
29
- return resps
30
-
31
-
32
- @dataclass
33
- class FilterEnsemble:
34
- """
35
- FilterEnsemble creates a pipeline applying multiple filters.
36
- Its intended usage is to stack multiple post-processing steps in order.
37
- `task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each
38
- pipeline separately.
39
- """
40
-
41
- name: str
42
- filters: List[Filter]
43
-
44
- def apply(self, instances: List[Instance], docs: List[Dataset]) -> None:
45
- resps = [inst.resps for inst in instances] # operate just on the model responses
46
- for f in self.filters:
47
- # apply filters in sequence
48
- resps = f.apply(resps, docs)
49
-
50
- # add the end results after filtering to filtered_requests of their respective source instances.
51
- # has key `self.name`: each FilterEnsemble applied in a given run should use a different name.
52
- for inst, resp in zip(instances, resps):
53
- inst.filtered_resps[self.name] = resp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/instance.py DELETED
@@ -1,29 +0,0 @@
1
- from dataclasses import dataclass, field
2
- from typing import Literal, Tuple
3
-
4
-
5
- @dataclass
6
- class Instance:
7
- request_type: Literal["loglikelihood", "generate_until"]
8
- arguments: tuple
9
- idx: int
10
- metadata: Tuple[str, int, int] = field(default_factory=lambda: (None, None, None)) # TODO: better typehints here
11
- resps: list = field(default_factory=list)
12
- filtered_resps: dict = field(default_factory=dict)
13
-
14
- # initialized after init
15
- task_name: str = None
16
- doc_id: str = None
17
- repeats: str = None
18
- doc: dict = None
19
-
20
- def __post_init__(self) -> None:
21
- # unpack metadata field
22
- self.task_name, self.doc_id, self.repeats = self.metadata
23
-
24
- @property
25
- def args(self):
26
- """
27
- Returns (string,) where `string` is the string to calculate loglikelihood over
28
- """
29
- return self.arguments if isinstance(self.arguments, tuple) else (self.arguments,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/metrics.py DELETED
@@ -1,443 +0,0 @@
1
- import math
2
- from collections.abc import Iterable
3
-
4
- import numpy as np
5
- import sacrebleu
6
- import sklearn.metrics
7
- import random
8
- import evaluate
9
- import torch
10
-
11
- from lmms_eval.api.registry import register_metric, register_aggregation
12
- from loguru import logger as eval_logger
13
-
14
-
15
- # Register Aggregations First
16
- @register_aggregation("bypass")
17
- def bypass_agg(arr):
18
- return 999
19
-
20
-
21
- @register_aggregation("mean")
22
- def mean(arr):
23
- return sum(arr) / len(arr)
24
-
25
-
26
- @register_aggregation("median")
27
- def median(arr):
28
- return arr[len(arr) // 2]
29
-
30
-
31
- # Certain metrics must be calculated across all documents in a benchmark.
32
- # We use them as aggregation metrics, paired with no-op passthrough metric fns.
33
- @register_aggregation("perplexity")
34
- def perplexity(items):
35
- # return math.exp(-mean(items))
36
- items = torch.exp(torch.tensor(items)).tolist()
37
- return sum(items) / len(items)
38
-
39
-
40
- @register_aggregation("weighted_perplexity")
41
- def weighted_perplexity(items):
42
- return math.exp(-weighted_mean(items))
43
-
44
-
45
- @register_aggregation("bits_per_byte")
46
- def bits_per_byte(items):
47
- return -weighted_mean(items) / math.log(2)
48
-
49
-
50
- @register_aggregation("f1")
51
- def f1_score(items):
52
- unzipped_list = list(zip(*items))
53
- golds = unzipped_list[0]
54
- preds = unzipped_list[1]
55
- fscore = sklearn.metrics.f1_score(golds, preds)
56
-
57
- return np.max(fscore)
58
-
59
-
60
- @register_aggregation("matthews_corrcoef")
61
- def matthews_corrcoef(items):
62
- unzipped_list = list(zip(*items))
63
- golds = unzipped_list[0]
64
- preds = unzipped_list[1]
65
- # print(preds)
66
- return sklearn.metrics.matthews_corrcoef(golds, preds)
67
-
68
-
69
- @register_aggregation("bleu")
70
- def bleu(items):
71
- """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
72
- for evaluating a generated sentence to a reference sentence. It counts matching
73
- n-grams in the candidate translation to n-grams in the reference text, where
74
- 1-gram or unigram would be each token and a bigram comparison would be each
75
- word pair. The comparison is made regardless of word order
76
- Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
77
- Paper: https://www.aclweb.org/anthology/P02-1040/
78
-
79
- Higher is better
80
- """
81
- refs = list(zip(*items))[0]
82
- preds = list(zip(*items))[1]
83
- refs, preds = _sacreformat(refs, preds)
84
- return sacrebleu.corpus_bleu(preds, refs).score
85
-
86
-
87
- @register_aggregation("chrf")
88
- def chrf(items):
89
- """chrF++ is a tool for automatic evaluation of machine translation output
90
- based on character n-gram precision and recall enhanced with word n-grams.
91
- Source: https://github.com/m-popovic/chrF
92
- Paper: https://www.aclweb.org/anthology/W15-3049.pdf
93
-
94
- Higher is better # TODO I think
95
- """
96
- refs = list(zip(*items))[0]
97
- preds = list(zip(*items))[1]
98
- refs, preds = _sacreformat(refs, preds)
99
- return sacrebleu.corpus_chrf(preds, refs).score
100
-
101
-
102
- @register_aggregation("ter")
103
- def ter(items):
104
- """Translation Error Rate is an error metric for machine translation that
105
- measures the number of edits required to change a system output into one
106
- of the references
107
- Source: http://www.cs.umd.edu/~snover/tercom/
108
- Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
109
-
110
- Lower is better
111
- """
112
- refs = list(zip(*items))[0]
113
- preds = list(zip(*items))[1]
114
- refs, preds = _sacreformat(refs, preds)
115
- return sacrebleu.corpus_ter(preds, refs).score
116
-
117
-
118
- @register_metric(
119
- metric="acc",
120
- higher_is_better=True,
121
- output_type=["loglikelihood", "multiple_choice"],
122
- aggregation="mean",
123
- )
124
- def acc_fn(items): # This is a passthrough function
125
- return items
126
-
127
-
128
- @register_metric(
129
- metric="acc_norm",
130
- higher_is_better=True,
131
- output_type=["loglikelihood", "multiple_choice"],
132
- aggregation="mean",
133
- )
134
- def acc_norm_fn(items): # This is a passthrough function
135
- return items
136
-
137
-
138
- @register_metric(
139
- metric="acc_mutual_info",
140
- higher_is_better=True,
141
- output_type="multiple_choice",
142
- aggregation="mean",
143
- )
144
- def acc_mutual_info_fn(items): # This is a passthrough function
145
- return items
146
-
147
-
148
- exact_match = evaluate.load("exact_match")
149
-
150
-
151
- @register_metric(
152
- metric="exact_match",
153
- higher_is_better=True,
154
- output_type="generate_until",
155
- aggregation="mean",
156
- )
157
- def exact_match_fn(**kwargs):
158
- return exact_match.compute(**kwargs)
159
-
160
-
161
- @register_metric(
162
- metric="perplexity",
163
- higher_is_better=False,
164
- output_type="loglikelihood",
165
- aggregation="perplexity",
166
- )
167
- def perplexity_fn(items): # This is a passthrough function
168
- return items
169
-
170
-
171
- def levenshtein_distance(s1, s2):
172
- if len(s1) > len(s2):
173
- s1, s2 = s2, s1
174
-
175
- distances = range(len(s1) + 1)
176
- for i2, c2 in enumerate(s2):
177
- distances_ = [i2 + 1]
178
- for i1, c1 in enumerate(s1):
179
- if c1 == c2:
180
- distances_.append(distances[i1])
181
- else:
182
- distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
183
- distances = distances_
184
- return distances[-1]
185
-
186
-
187
- @register_metric(
188
- metric="anls",
189
- higher_is_better=True,
190
- output_type="generate_until",
191
- aggregation="mean",
192
- )
193
- def anls(
194
- references,
195
- predictions,
196
- thresh_hold=0.5,
197
- ): # This is a passthrough function
198
- """https://github.com/QwenLM/Qwen-VL/blob/master/eval_mm/infographicsvqa_eval.py"""
199
- values = []
200
- for answer in references:
201
- # preprocess both the answers - gt and prediction
202
- gt_answer = " ".join(answer.strip().lower().split())
203
- det_answer = " ".join(predictions[0].strip().lower().split())
204
-
205
- # dist = levenshtein_distance(answer.lower(), detObject['answer'].lower())
206
- dist = levenshtein_distance(gt_answer, det_answer)
207
- length = max(len(answer.upper()), len(predictions[0].upper()))
208
- values.append(0.0 if length == 0 else float(dist) / float(length))
209
-
210
- question_result = 1 - min(values)
211
-
212
- if question_result < thresh_hold:
213
- question_result = 0
214
- return {"anls": question_result}
215
-
216
-
217
- def pop_stddev(arr):
218
- mu = mean(arr)
219
- return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
220
-
221
-
222
- def sample_stddev(arr):
223
- mu = mean(arr)
224
- return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
225
-
226
-
227
- def mean_stderr(arr):
228
- return sample_stddev(arr) / math.sqrt(len(arr))
229
-
230
-
231
- @register_metric(
232
- metric="bypass",
233
- higher_is_better=True,
234
- output_type=["loglikelihood", "multiple_choice", "generate_until"],
235
- aggregation="bypass",
236
- )
237
- def bypass(items):
238
- return items
239
-
240
-
241
- @register_metric(
242
- metric="mcc",
243
- higher_is_better=True,
244
- output_type="multiple_choice",
245
- aggregation="matthews_corrcoef",
246
- )
247
- def mcc_fn(items): # This is a passthrough function
248
- return items
249
-
250
-
251
- @register_metric(
252
- metric="f1",
253
- higher_is_better=True,
254
- output_type="multiple_choice",
255
- aggregation="f1",
256
- )
257
- def f1_fn(items): # This is a passthrough function
258
- return items
259
-
260
-
261
- @register_metric(
262
- metric="bleu",
263
- higher_is_better=True,
264
- output_type="generate_until",
265
- aggregation="bleu",
266
- )
267
- def bleu_fn(items): # This is a passthrough function
268
- return items
269
-
270
-
271
- @register_metric(
272
- metric="chrf",
273
- higher_is_better=True,
274
- output_type="generate_until",
275
- aggregation="chrf",
276
- )
277
- def chrf_fn(items): # This is a passthrough function
278
- return items
279
-
280
-
281
- @register_metric(
282
- metric="ter",
283
- higher_is_better=True,
284
- output_type="generate_until",
285
- aggregation="ter",
286
- )
287
- def ter_fn(items): # This is a passthrough function
288
- return items
289
-
290
-
291
- @register_metric(
292
- metric="acc_all",
293
- higher_is_better=True,
294
- output_type="loglikelihood",
295
- aggregation="mean",
296
- )
297
- def acc_all(items):
298
- # Only count as correct if all answers are labeled correctly for each question
299
- question_scoring_dict = {}
300
- preds = list(zip(*items))[0]
301
- docs = list(zip(*items))[1]
302
-
303
- for doc, pred in zip(docs, preds):
304
- paragraph_id = doc["idx"]["paragraph"]
305
- question_id = doc["idx"]["question"]
306
- if (paragraph_id, question_id) not in question_scoring_dict:
307
- question_scoring_dict[(paragraph_id, question_id)] = []
308
-
309
- gold_label = doc["label"] == 1
310
-
311
- question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
312
- acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
313
- return acc
314
-
315
-
316
- def acc_all_stderr(items):
317
- # Only count as correct if all answers are labeled correctly for each question
318
- question_scoring_dict = {}
319
- preds = list(zip(*items))[0]
320
- docs = list(zip(*items))[1]
321
-
322
- for doc, pred in zip(docs, preds):
323
- question_id = doc["idx"]["question"]
324
- if question_id not in question_scoring_dict:
325
- question_scoring_dict[question_id] = []
326
-
327
- gold_label = doc["label"] == 1
328
- question_scoring_dict[question_id].append(gold_label == pred)
329
-
330
- acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
331
- return acc
332
-
333
-
334
- def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
335
- """Compute max metric between prediction and each ground truth."""
336
- scores_for_ground_truths = []
337
- for ground_truth in ground_truths:
338
- score = metric_fn(prediction, ground_truth)
339
- scores_for_ground_truths.append(score)
340
- return max(scores_for_ground_truths)
341
-
342
-
343
- def weighted_mean(items):
344
- a, b = zip(*items)
345
- return sum(a) / sum(b)
346
-
347
-
348
- def is_non_str_iterable(obj):
349
- return isinstance(obj, Iterable) and not isinstance(obj, str)
350
-
351
-
352
- def _sacreformat(refs, preds):
353
- """Format refs and preds for sacrebleu corpus calculation. It is very particular"""
354
- # Sacrebleu expects (List[str], List[List[str])
355
- # e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
356
-
357
- # Note [ref1_stream] is the first reference for each pred.
358
- # So lists are size N and (M, N) for N preds and M possible refs for each pred
359
- # This is a different order of dimensions that I would expect
360
-
361
- # We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
362
- # Must become List[List[str]] with the inner list corresponding to preds
363
- if not is_non_str_iterable(refs):
364
- refs = list(refs)
365
- if not is_non_str_iterable(refs[0]):
366
- refs = [[ref] for ref in refs]
367
- refs = list(zip(*refs))
368
- # Note the number of refs in each ref list much match the number of preds
369
-
370
- # We expect preds to be List[str] or List[List[str]]. Must become List[str]
371
- if not is_non_str_iterable(preds):
372
- preds = list(preds)
373
- if is_non_str_iterable(preds[0]):
374
- assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
375
- preds = [pred[0] for pred in preds]
376
-
377
- return refs, preds
378
-
379
-
380
- # stderr stuff
381
-
382
-
383
- class _bootstrap_internal:
384
- def __init__(self, f, n) -> None:
385
- self.f = f
386
- self.n = n
387
-
388
- def __call__(self, v):
389
- i, xs = v
390
- rnd = random.Random()
391
- rnd.seed(i)
392
- res = []
393
- for _ in range(self.n):
394
- res.append(self.f(rnd.choices(xs, k=len(xs))))
395
- return res
396
-
397
-
398
- def bootstrap_stderr(f, xs, iters):
399
- import multiprocessing as mp
400
-
401
- pool = mp.Pool(mp.cpu_count())
402
- # this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
403
- # equivalent to stderr calculated without Bessel's correction in the stddev.
404
- # Unfortunately, I haven't been able to figure out what the right correction is
405
- # to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
406
- # that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
407
- # Thankfully, shouldn't matter because our samples are pretty big usually anyways
408
- res = []
409
- chunk_size = min(1000, iters)
410
- from tqdm import tqdm
411
-
412
- print("bootstrapping for stddev:", f.__name__)
413
- for bootstrap in tqdm(
414
- pool.imap(
415
- _bootstrap_internal(f, chunk_size),
416
- [(i, xs) for i in range(iters // chunk_size)],
417
- ),
418
- total=iters // chunk_size,
419
- ):
420
- # sample w replacement
421
- res.extend(bootstrap)
422
-
423
- pool.close()
424
- return sample_stddev(res)
425
-
426
-
427
- def stderr_for_metric(metric, bootstrap_iters):
428
- bootstrappable = [
429
- median,
430
- matthews_corrcoef,
431
- f1_score,
432
- perplexity,
433
- bleu,
434
- chrf,
435
- ter,
436
- ]
437
-
438
- if metric in bootstrappable:
439
- return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
440
-
441
- stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
442
-
443
- return stderr.get(metric, None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/model.py DELETED
@@ -1,203 +0,0 @@
1
- import abc
2
- import os
3
-
4
- from typing import Union, List, Tuple, Optional, Type, TypeVar
5
- from sqlitedict import SqliteDict
6
- import json
7
- import hashlib
8
- from lmms_eval.api.instance import Instance
9
- from tqdm import tqdm
10
- from lmms_eval import utils
11
-
12
-
13
- from loguru import logger as eval_logger
14
-
15
- T = TypeVar("T", bound="lmms")
16
-
17
-
18
- class lmms(abc.ABC):
19
- def __init__(self) -> None:
20
- """Defines the interface that should be implemented by all lmms subclasses.
21
- lmmss are assumed to take image-text as input and yield strings as output
22
- (inputs/outputs should be tokenization-agnostic.)
23
- """
24
- # set rank and world size to a single process, by default.
25
- self._rank = 0
26
- self._world_size = 1
27
- self.cache_hook = CacheHook(None)
28
- self.task_dict = {}
29
-
30
- @abc.abstractmethod
31
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
32
- """Compute log-likelihood of generating a continuation from a context.
33
- Downstream tasks should attempt to use loglikelihood instead of other
34
- LMM calls whenever possible.
35
-
36
- :param requests: list[Instance]
37
- A list of Instance objects, with property `args` which returns a tuple (context, continuation).
38
- `context: str`
39
- Context string. Implementations of LMM must be able to handle an
40
- empty context string.
41
- `continuation: str`
42
- The continuation over which log likelihood will be calculated. If
43
- there is a word boundary, the space should be in the continuation.
44
- For example, context="hello" continuation=" world" is correct.
45
- 'visual_list: list[dict]'
46
- Visual input to the model. Can be None.
47
-
48
- :return: list[tuple[float, bool]]
49
- A list of pairs (logprob, isgreedy)
50
- `logprob: float`
51
- The log probability of `continuation`.
52
- `isgreedy`:
53
- Whether `continuation` would be generated by greedy sampling from `context`.
54
- """
55
- pass
56
-
57
- # TODO: Add an optional max length
58
- @abc.abstractmethod
59
- def generate_until(self, requests) -> List[str]:
60
- """Generate greedily until a stopping sequence
61
-
62
- :param requests: list[Instance]
63
- A list of Instance objects with property `args` which returns a tuple (context, until).
64
- context: str
65
- Context string
66
- generation_kwargs: dict
67
- Generation Kwargs
68
- 'visual_list: list[dict]'
69
- Visual input to the model. Can be None.
70
- :return: list[str]
71
- A list of strings continuation
72
- continuation: str
73
- The generated continuation.
74
- """
75
- pass
76
-
77
- @classmethod
78
- def create_from_arg_string(cls: Type[T], arg_string: str, additional_config: Optional[dict] = None) -> T:
79
- """
80
- Creates an instance of the LMM class using the given argument string and additional config.
81
-
82
- Parameters:
83
- - arg_string: A string containing arguments in the format key1=value1,key2=value2.
84
- - additional_config: Optional dictionary containing additional configuration parameters.
85
-
86
- Returns:
87
- - Instance of the LMM class.
88
- """
89
- additional_config = {} if additional_config is None else additional_config
90
- args = utils.simple_parse_args_string(arg_string)
91
- args2 = {k: v for k, v in additional_config.items() if v is not None}
92
- return cls(**args, **args2)
93
-
94
- @property
95
- def rank(self):
96
- # used in the case of parallelism. Hardcoded to
97
- # ensure no errors arise using API models which do
98
- # not support multi-device parallelism nor expect it.
99
- return self._rank
100
-
101
- @property
102
- def world_size(self):
103
- # used in the case of parallelism. Hardcoded to
104
- # ensure no errors arise using API models which do
105
- # not support multi-device parallelism nor expect it.
106
- return self._world_size
107
-
108
- def set_cache_hook(self, cache_hook) -> None:
109
- self.cache_hook = cache_hook
110
-
111
-
112
- ### SQLite-based caching of LMM responses
113
- def hash_args(attr, args):
114
- dat = json.dumps([attr] + list(args))
115
- return hashlib.sha256(dat.encode("utf-8")).hexdigest()
116
-
117
-
118
- class CacheHook:
119
- def __init__(self, cachinglm) -> None:
120
- if cachinglm is None:
121
- self.dbdict = None
122
- return
123
-
124
- self.dbdict = cachinglm.dbdict
125
-
126
- def add_partial(self, attr, req, res) -> None:
127
- if self.dbdict is None:
128
- return
129
- hsh = hash_args(attr, req)
130
- self.dbdict[hsh] = res
131
-
132
-
133
- class CachingLMM:
134
- def __init__(self, lm, cache_db) -> None:
135
- """LMM wrapper that returns cached results if they exist, and uses the underlying LMM if not.
136
-
137
- :param lm: LMM
138
- Underlying LMM
139
- :param cache_db: str
140
- Path to cache db
141
- """
142
- self.lm = lm
143
- self.cache_db = cache_db
144
- if os.path.dirname(cache_db):
145
- os.makedirs(os.path.dirname(cache_db), exist_ok=True)
146
- self.dbdict = SqliteDict(cache_db, autocommit=True)
147
-
148
- # add hook to lm
149
- lm.set_cache_hook(self.get_cache_hook())
150
-
151
- def __getattr__(self, attr):
152
- lm_attr = getattr(self.lm, attr)
153
- if not callable(lm_attr):
154
- return lm_attr
155
-
156
- def fn(requests):
157
- res = []
158
- remaining_reqs = []
159
- warned = False
160
- # figure out which ones are cached and which ones are new
161
- eval_logger.info(f"Loading '{attr}' responses from cache '{self.cache_db}' where possible...")
162
- for req in tqdm(requests):
163
- hsh = hash_args(attr, req.args)
164
- if attr == "generate_until" and req.args[1].get("do_sample", False):
165
- # when we are doing non-greedy generation, don't use the cache
166
- # (else every "randomly sampled" generation would be identical for repeats > 1).
167
- if not warned:
168
- eval_logger.warning(f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests.")
169
- warned = True
170
- res.append(None)
171
- remaining_reqs.append(req)
172
- elif hsh in self.dbdict:
173
- ob = self.dbdict[hsh]
174
-
175
- assert ob is not None
176
-
177
- res.append(ob)
178
- else:
179
- res.append(None)
180
- remaining_reqs.append(req)
181
-
182
- # actually run the LMM on the requests that do not have cached results
183
- rem_res = getattr(self.lm, attr)(remaining_reqs)
184
-
185
- # stick the new ones back into the list and also cache any of the new ones
186
- resptr = 0
187
- for req, r in zip(remaining_reqs, rem_res):
188
- while res[resptr] is not None:
189
- resptr += 1
190
-
191
- res[resptr] = r
192
-
193
- # caching
194
- hsh = hash_args(attr, req.args)
195
- self.dbdict[hsh] = r
196
- self.dbdict.commit()
197
-
198
- return res
199
-
200
- return fn
201
-
202
- def get_cache_hook(self):
203
- return CacheHook(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/registry.py DELETED
@@ -1,156 +0,0 @@
1
- from lmms_eval.api.model import lmms
2
-
3
- from typing import Callable, Dict
4
- import evaluate as hf_evaluate
5
-
6
- from loguru import logger as eval_logger
7
-
8
- MODEL_REGISTRY = {}
9
-
10
-
11
- def register_model(*names):
12
- # either pass a list or a single alias.
13
- # function receives them as a tuple of strings
14
-
15
- def decorate(cls):
16
- for name in names:
17
- assert issubclass(cls, lmms), f"Model '{name}' ({cls.__name__}) must extend lmms class"
18
-
19
- assert name not in MODEL_REGISTRY, f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead."
20
-
21
- MODEL_REGISTRY[name] = cls
22
- return cls
23
-
24
- return decorate
25
-
26
-
27
- def get_model(model_name):
28
- try:
29
- return MODEL_REGISTRY[model_name]
30
- except KeyError:
31
- raise ValueError(f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}")
32
-
33
-
34
- TASK_REGISTRY = {} # Key: task name, Value: task ConfigurableTask class
35
- GROUP_REGISTRY = {} # Key: group name, Value: list of task names or group names
36
- ALL_TASKS = set() # Set of all task names and group names
37
- func2task_index = {} # Key: task ConfigurableTask class, Value: task name
38
-
39
-
40
- def register_task(name):
41
- def decorate(fn):
42
- assert name not in TASK_REGISTRY, f"task named '{name}' conflicts with existing registered task!"
43
-
44
- TASK_REGISTRY[name] = fn
45
- ALL_TASKS.add(name)
46
- func2task_index[fn.__name__] = name
47
- return fn
48
-
49
- return decorate
50
-
51
-
52
- def register_group(name):
53
- def decorate(fn):
54
- func_name = func2task_index[fn.__name__]
55
- if name in GROUP_REGISTRY:
56
- GROUP_REGISTRY[name].append(func_name)
57
- else:
58
- GROUP_REGISTRY[name] = [func_name]
59
- ALL_TASKS.add(name)
60
- return fn
61
-
62
- return decorate
63
-
64
-
65
- OUTPUT_TYPE_REGISTRY = {}
66
- METRIC_REGISTRY = {}
67
- METRIC_AGGREGATION_REGISTRY = {}
68
- AGGREGATION_REGISTRY = {}
69
- HIGHER_IS_BETTER_REGISTRY = {}
70
-
71
- DEFAULT_METRIC_REGISTRY = {
72
- "loglikelihood": [
73
- "perplexity",
74
- "acc",
75
- ],
76
- "multiple_choice": ["acc", "acc_norm"],
77
- "generate_until": ["exact_match"],
78
- }
79
-
80
-
81
- def register_metric(**args):
82
- # TODO: do we want to enforce a certain interface to registered metrics?
83
- def decorate(fn):
84
- assert "metric" in args
85
- name = args["metric"]
86
-
87
- for key, registry in [
88
- ("metric", METRIC_REGISTRY),
89
- ("higher_is_better", HIGHER_IS_BETTER_REGISTRY),
90
- ("aggregation", METRIC_AGGREGATION_REGISTRY),
91
- ]:
92
- if key in args:
93
- value = args[key]
94
- assert value not in registry, f"{key} named '{value}' conflicts with existing registered {key}!"
95
-
96
- if key == "metric":
97
- registry[name] = fn
98
- elif key == "aggregation":
99
- registry[name] = AGGREGATION_REGISTRY[value]
100
- else:
101
- registry[name] = value
102
-
103
- return fn
104
-
105
- return decorate
106
-
107
-
108
- def get_metric(name: str, hf_evaluate_metric=False) -> Callable:
109
- if not hf_evaluate_metric:
110
- if name in METRIC_REGISTRY:
111
- return METRIC_REGISTRY[name]
112
- else:
113
- eval_logger.warning(f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library...")
114
-
115
- try:
116
- metric_object = hf_evaluate.load(name)
117
- return metric_object.compute
118
- except Exception:
119
- eval_logger.error(
120
- f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric",
121
- )
122
-
123
-
124
- def register_aggregation(name):
125
- def decorate(fn):
126
- assert name not in AGGREGATION_REGISTRY, f"aggregation named '{name}' conflicts with existing registered aggregation!"
127
-
128
- AGGREGATION_REGISTRY[name] = fn
129
- return fn
130
-
131
- return decorate
132
-
133
-
134
- def get_aggregation(name):
135
- try:
136
- return AGGREGATION_REGISTRY[name]
137
- except KeyError:
138
- eval_logger.warning(
139
- "{} not a registered aggregation metric!".format(name),
140
- )
141
-
142
-
143
- def get_metric_aggregation(name):
144
- try:
145
- return METRIC_AGGREGATION_REGISTRY[name]
146
- except KeyError:
147
- eval_logger.warning(
148
- "{} metric is not assigned a default aggregation!".format(name),
149
- )
150
-
151
-
152
- def is_higher_better(metric_name):
153
- try:
154
- return HIGHER_IS_BETTER_REGISTRY[metric_name]
155
- except KeyError:
156
- eval_logger.warning(f"higher_is_better not specified for metric '{metric_name}'!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/samplers.py DELETED
@@ -1,94 +0,0 @@
1
- class ContextSampler:
2
- def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None:
3
- self.rnd = rnd
4
- assert self.rnd, "must pass rnd to FewShotSampler!"
5
-
6
- self.task = task
7
- self.config = task._config
8
-
9
- self.target_delimiter = self.config.target_delimiter
10
- self.fewshot_delimiter = self.config.fewshot_delimiter
11
-
12
- self.doc_to_text = self.task.doc_to_text
13
- self.doc_to_target = self.task.doc_to_target
14
- self.doc_to_choice = self.task.doc_to_choice
15
-
16
- self.docs = docs # HF dataset split, provided by task._fewshot_docs()
17
- if fewshot_indices: # subset few-shot docs from
18
- self.docs = self.docs.select(fewshot_indices)
19
-
20
- def get_context(self, doc, num_fewshot):
21
- # draw an extra fewshot sample if using same split as evaluating on
22
- n_samples = num_fewshot + 1 if self.config.fewshot_split == self.config.test_split else num_fewshot
23
-
24
- # draw `n_samples` docs from fewshot_docs
25
- fewshotex = self.sample(n_samples)
26
-
27
- # get rid of the doc that's the one we're evaluating, if it's in the fewshot
28
- # TODO: should we just stop people from using fewshot from same split as evaluating?
29
- selected_docs = [x for x in fewshotex if x != doc][:num_fewshot]
30
-
31
- labeled_examples = (
32
- self.fewshot_delimiter.join(
33
- [
34
- # TODO: is separating doc_to_text and doc_to_target by one space always desired?
35
- (self.doc_to_text(doc) if (self.config.doc_to_choice is None or type(self.doc_to_text(doc)) is str) else self.doc_to_choice(doc)[self.doc_to_text(doc)])
36
- + self.target_delimiter
37
- + (
38
- str(self.doc_to_target(doc)[0])
39
- if type(self.doc_to_target(doc)) is list
40
- else self.doc_to_target(doc) if (self.config.doc_to_choice is None or type(self.doc_to_target(doc)) is str) else str(self.doc_to_choice(doc)[self.doc_to_target(doc)])
41
- )
42
- for doc in selected_docs
43
- ]
44
- )
45
- + self.fewshot_delimiter
46
- )
47
-
48
- return labeled_examples
49
-
50
- def sample(self, n):
51
- """
52
- Draw `n` samples from our fewshot docs. This method should be overridden by subclasses.
53
- """
54
-
55
- return self.rnd.sample(self.docs, n)
56
-
57
-
58
- class FirstNSampler(ContextSampler):
59
- def sample(self, n) -> None:
60
- """
61
- Draw the first `n` samples in order from the specified split.
62
- Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU.
63
- """
64
- assert n <= len(self.docs), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available."
65
- return self.docs[:n]
66
-
67
-
68
- class BalancedSampler(ContextSampler):
69
- def sample(self, n) -> None:
70
- """
71
- TODO: this should return approximately class-balanced samples from our fewshot examples.
72
- TODO: what order should they be in? maybe random?
73
- """
74
-
75
- pass
76
-
77
-
78
- class ManualSampler(ContextSampler):
79
- def sample(self, n) -> None:
80
- """ """
81
- pass
82
-
83
-
84
- SAMPLER_REGISTRY = {
85
- "default": ContextSampler,
86
- "first_n": FirstNSampler,
87
- }
88
-
89
-
90
- def get_sampler(name):
91
- try:
92
- return SAMPLER_REGISTRY[name]
93
- except KeyError:
94
- raise ValueError(f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/api/task.py DELETED
@@ -1,1297 +0,0 @@
1
- import abc
2
- import ast
3
- import itertools
4
- import json
5
-
6
- import os
7
- import random
8
- import re
9
- import shutil
10
- import subprocess
11
- from collections.abc import Callable
12
- from dataclasses import dataclass, field, asdict
13
- from glob import glob
14
- from typing import Any, List, Union
15
-
16
- import datasets
17
- import numpy as np
18
- from PIL import ImageFile
19
- from datasets import DownloadConfig, Image, Sequence
20
- from huggingface_hub import snapshot_download
21
- from tenacity import retry, stop_after_attempt, wait_fixed, stop_after_delay
22
- from tqdm import tqdm
23
-
24
- from accelerate import Accelerator
25
- from lmms_eval import utils
26
- from lmms_eval.api import samplers
27
- from lmms_eval.api.instance import Instance
28
- from lmms_eval.api.registry import (
29
- AGGREGATION_REGISTRY,
30
- DEFAULT_METRIC_REGISTRY,
31
- METRIC_REGISTRY,
32
- OUTPUT_TYPE_REGISTRY,
33
- get_aggregation,
34
- get_metric,
35
- get_metric_aggregation,
36
- is_higher_better,
37
- )
38
- from lmms_eval.filters import build_filter_ensemble
39
-
40
- from loguru import logger as eval_logger
41
-
42
- # HuggingfaceM4/NoCaps contains truncated image in test split
43
- # Include this inside code block to avoid error
44
- ImageFile.LOAD_TRUNCATED_IMAGES = True
45
-
46
- ALL_OUTPUT_TYPES = [
47
- "loglikelihood",
48
- "multiple_choice",
49
- "generate_until",
50
- ]
51
-
52
-
53
- @dataclass
54
- class TaskConfig(dict):
55
- # task naming/registry
56
- task: str = None
57
- task_alias: str = None
58
- group: Union[str, list] = None
59
- group_alias: Union[str, list] = None
60
- # HF dataset options.
61
- # which dataset to use,
62
- # and what splits for what purpose
63
- dataset_path: str = None
64
- dataset_name: str = None
65
- dataset_kwargs: dict = None
66
- training_split: str = None
67
- validation_split: str = None
68
- test_split: str = None
69
- fewshot_split: str = None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
70
- # formatting / prompting options.
71
- # see docs/advanced_task_guide.md for more info
72
- process_docs: Callable = None
73
- doc_to_visual: Union[Callable, str] = None
74
- doc_to_text: Union[Callable, str] = None
75
- doc_to_target: Union[Callable, str] = None
76
- doc_to_choice: Union[Callable, str, dict, list] = None
77
- process_results: Union[Callable, str] = None
78
- use_prompt: str = None
79
- description: str = ""
80
- target_delimiter: str = " "
81
- fewshot_delimiter: str = "\n\n"
82
- fewshot_config: dict = None
83
- # runtime configuration options
84
- num_fewshot: int = None
85
- # scoring options
86
- metric_list: list = None
87
- output_type: str = "generate_until"
88
- generation_kwargs: dict = None
89
- repeats: int = 1
90
- filter_list: Union[str, list] = None
91
- should_decontaminate: bool = False
92
- doc_to_decontamination_query: str = None
93
-
94
- metadata: Union[str, list] = None # by default, not used in the code. allows for users to pass arbitrary info to tasks
95
-
96
- model_specific_prompt_kwargs: dict = None
97
- model_specific_generation_kwargs: dict = None
98
- model_specific_target_kwargs: dict = None
99
-
100
- def __post_init__(self) -> None:
101
- if self.dataset_path and os.path.exists(os.path.dirname(self.dataset_path)):
102
- import inspect
103
- from importlib import import_module
104
-
105
- # self.dataset_path = inspect.getfile(import_module(self.dataset_path))
106
-
107
- if self.generation_kwargs is not None:
108
- if self.output_type != "generate_until":
109
- eval_logger.warning(f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!")
110
- assert self.output_type != "generate_until"
111
-
112
- if "temperature" in self.generation_kwargs:
113
- self.generation_kwargs["temperature"] = float(self.generation_kwargs["temperature"])
114
-
115
- if "until" not in self.generation_kwargs:
116
- self.generation_kwargs["until"] = [self.fewshot_delimiter]
117
- else:
118
- if self.output_type == "generate_until":
119
- # ensure that we greedily generate in absence of explicit arguments otherwise
120
- self.generation_kwargs = {
121
- "until": None if self.fewshot_delimiter is None else [self.fewshot_delimiter],
122
- "do_sample": False,
123
- }
124
-
125
- # TODO: how to make TaskConfigs be de- and re-serializable, even when using the !function constructor?
126
-
127
- def __getitem__(self, item):
128
- return getattr(self, item)
129
-
130
- def __setitem__(self, item, value):
131
- return setattr(self, item, value)
132
-
133
- def to_dict(self):
134
- """dumps the current config as a dictionary object, as a printable format.
135
- null fields will not be printed.
136
- Used for dumping results alongside full task configuration
137
-
138
- :return: dict
139
- A printable dictionary version of the TaskConfig object.
140
-
141
- # TODO: should any default value in the TaskConfig not be printed?
142
- """
143
- cfg_dict = asdict(self)
144
- # remove values that are `None`
145
- for k, v in list(cfg_dict.items()):
146
- if v is None:
147
- cfg_dict.pop(k)
148
- elif isinstance(v, Callable):
149
- # TODO: this should handle Promptsource template objects as a separate case?
150
- cfg_dict[k] = str(v)
151
- return cfg_dict
152
-
153
-
154
- class Task(abc.ABC):
155
- """A task represents an entire benchmark including its dataset, problems,
156
- answers, and evaluation methods. See BoolQ for a simple example implementation
157
-
158
- A `doc` can be any python object which represents one instance of evaluation.
159
- This is usually a dictionary e.g.
160
- {"question": ..., "answer": ...} or
161
- {"question": ..., question, answer)
162
- """
163
-
164
- VERSION = None
165
-
166
- # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
167
- # or a path to a custom `datasets` loading script.
168
- DATASET_PATH: str = None
169
-
170
- # The name of a subset within `DATASET_PATH`.
171
- DATASET_NAME: str = None
172
-
173
- OUTPUT_TYPE: str = None
174
-
175
- def __init__(
176
- self,
177
- data_dir=None,
178
- cache_dir=None,
179
- download_mode=None,
180
- config=None,
181
- ) -> None:
182
- """
183
- :param data_dir: str
184
- Stores the path to a local folder containing the `Task`'s data files.
185
- Use this to specify the path to manually downloaded data (usually when
186
- the dataset is not publicly accessible).
187
- :param cache_dir: str
188
- The directory to read/write the `Task` dataset. This follows the
189
- HuggingFace `datasets` API with the default cache directory located at:
190
- `~/.cache/huggingface/datasets`
191
- NOTE: You can change the cache location globally for a given process
192
- to another directory:
193
- `export HF_DATASETS_CACHE="/path/to/another/directory"`
194
- :param download_mode: datasets.DownloadMode
195
- How to treat pre-existing `Task` downloads and data.
196
- - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
197
- Reuse download and reuse dataset.
198
- - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
199
- Reuse download with fresh dataset.
200
- - `datasets.DownloadMode.FORCE_REDOWNLOAD`
201
- Fresh download and fresh dataset.
202
- """
203
- self.download(data_dir, cache_dir, download_mode)
204
- self._training_docs = None
205
- self._fewshot_docs = None
206
- self._instances = None
207
-
208
- self._config = TaskConfig({**config}) if config else TaskConfig()
209
-
210
- self._filters = [build_filter_ensemble("none", [["take_first", None]])]
211
-
212
- def download(self, data_dir=None, cache_dir=None, download_mode=None) -> None:
213
- """Downloads and returns the task dataset.
214
- Override this method to download the dataset from a custom API.
215
-
216
- :param data_dir: str
217
- Stores the path to a local folder containing the `Task`'s data files.
218
- Use this to specify the path to manually downloaded data (usually when
219
- the dataset is not publicly accessible).
220
- :param cache_dir: str
221
- The directory to read/write the `Task` dataset. This follows the
222
- HuggingFace `datasets` API with the default cache directory located at:
223
- `~/.cache/huggingface/datasets`
224
- NOTE: You can change the cache location globally for a given process
225
- by setting the shell environment variable, `HF_DATASETS_CACHE`,
226
- to another directory:
227
- `export HF_DATASETS_CACHE="/path/to/another/directory"`
228
- :param download_mode: datasets.DownloadMode
229
- How to treat pre-existing `Task` downloads and data.
230
- - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
231
- Reuse download and reuse dataset.
232
- - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
233
- Reuse download with fresh dataset.
234
- - `datasets.DownloadMode.FORCE_REDOWNLOAD`
235
- Fresh download and fresh dataset.
236
- """
237
- self.dataset = datasets.load_dataset(
238
- path=self.DATASET_PATH,
239
- name=self.DATASET_NAME,
240
- data_dir=data_dir,
241
- cache_dir=cache_dir,
242
- download_mode=download_mode,
243
- )
244
- self.dataset_no_image = datasets.load_dataset(
245
- path=self.DATASET_PATH,
246
- name=self.DATASET_NAME,
247
- data_dir=data_dir,
248
- cache_dir=cache_dir,
249
- download_mode=download_mode,
250
- )
251
- for doc_name in self.dataset_no_image:
252
- remove_cols = []
253
- features = self.dataset_no_image[doc_name].features
254
- # If it is an Image instance or a Sequence of Image instance. Remove it
255
- for feature in features:
256
- if isinstance(features[feature], Image):
257
- remove_cols.append(feature)
258
- elif isinstance(features[feature], Sequence) and isinstance(features[feature].feature, Image):
259
- remove_cols.append(feature)
260
- for remove_col in remove_cols:
261
- self.dataset_no_image[doc_name] = self.dataset_no_image[doc_name].remove_columns(remove_col)
262
-
263
- @property
264
- def config(self):
265
- """Returns the TaskConfig associated with this class."""
266
- return self._config
267
-
268
- @abc.abstractmethod
269
- def has_training_docs(self):
270
- """Whether the task has a training set"""
271
- pass
272
-
273
- @abc.abstractmethod
274
- def has_validation_docs(self):
275
- """Whether the task has a validation set"""
276
- pass
277
-
278
- @abc.abstractmethod
279
- def has_test_docs(self):
280
- """Whether the task has a test set"""
281
- pass
282
-
283
- def training_docs(self):
284
- """
285
- :return: Iterable[obj]
286
- A iterable of any object, that doc_to_text can handle
287
- """
288
- return []
289
-
290
- def validation_docs(self):
291
- """
292
- :return: Iterable[obj]
293
- A iterable of any object, that doc_to_text can handle
294
- """
295
- return []
296
-
297
- def test_docs(self):
298
- """
299
- :return: Iterable[obj]
300
- A iterable of any object, that doc_to_text can handle
301
- """
302
- return []
303
-
304
- def fewshot_docs(self):
305
- """
306
- :return: Iterable[obj]
307
- A iterable of any object, that doc_to_text can handle
308
- """
309
- if self.has_training_docs():
310
- return self.training_docs()
311
- elif self.has_validation_docs():
312
- return self.validation_docs()
313
- else:
314
- if self.config.num_fewshot is not None:
315
- eval_logger.warning("has_training_docs and has_validation_docs are False" ", using test_docs as fewshot_docs but this is not recommended.")
316
- return self.test_docs()
317
-
318
- def _process_doc(self, doc):
319
- """
320
- Override this to process (detokenize, strip, replace, etc.) individual
321
- documents. This can be used in a map over documents of a data split.
322
- E.g. `map(self._process_doc, self.dataset["validation"])`
323
-
324
- :return: dict
325
- The processed version of the specified `doc`.
326
- """
327
- return doc
328
-
329
- @property
330
- def instances(self):
331
- """After calling `task.build_all_requests()`, tasks
332
- maintain a list of the dataset instances which will be evaluated.
333
- """
334
- return self._instances
335
-
336
- def fewshot_examples(self, k, rnd):
337
- if self._training_docs is None:
338
- self._training_docs = list(self.training_docs())
339
-
340
- return rnd.sample(self._training_docs, k)
341
-
342
- def doc_to_decontamination_query(self, doc) -> None:
343
- print("Override doc_to_decontamination_query with document specific decontamination query.")
344
- assert False
345
-
346
- @abc.abstractmethod
347
- def doc_to_text(self, doc):
348
- pass
349
-
350
- @abc.abstractmethod
351
- def doc_to_target(self, doc):
352
- pass
353
-
354
- # @profile
355
- def build_all_requests(self, limit=None, rank=None, world_size=None) -> None:
356
- """Build a set of Instances for a task, and store them in task.instances"""
357
- if self.has_test_docs():
358
- docs = self.test_docs()
359
- split = self.config.test_split
360
- elif self.has_validation_docs():
361
- docs = self.validation_docs()
362
- split = self.config.validation_split
363
- else:
364
- assert False, f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
365
-
366
- eval_logger.info(f"Building contexts for task {self.CONFIG.task} on rank {rank}...")
367
- instances = []
368
- doc_id_iterator = utils.create_iterator([i for i in range(len(docs))], rank, world_size, limit)
369
- doc_id_iterator, doc_id_iterator_counting = itertools.tee(doc_id_iterator)
370
- total_docs = sum(1 for _ in doc_id_iterator_counting)
371
- pbar = tqdm(total=total_docs, desc=f"Building context", disable=(rank != 0))
372
- for doc_id in doc_id_iterator:
373
- # sample fewshot context #TODO: need to offset doc_id by rank now!
374
- fewshot_ctx = self.fewshot_context(doc_id, 0 if self.config.num_fewshot is None else self.config.num_fewshot, self.config.training_split if self.has_training_docs() else split)
375
-
376
- # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
377
- inst = self.construct_requests(doc_id=doc_id, ctx=fewshot_ctx, metadata=(self.config["task"], doc_id, self.config.repeats), split=split)
378
-
379
- if not isinstance(inst, list):
380
- inst = [inst]
381
-
382
- instances.extend(inst)
383
- pbar.update(1)
384
-
385
- pbar.close()
386
- self._instances = instances
387
- assert len(self._instances) != 0, "task.build_requests() did not find any docs!"
388
-
389
- @abc.abstractmethod
390
- def construct_requests(self, doc_id, ctx, **kwargs):
391
- """Uses RequestFactory to construct Requests and returns an iterable of
392
- Requests which will be sent to the LMM.
393
-
394
- :param doc_id: int
395
- The index of a document within `self.test_docs()` or `self.validation_docs()`,
396
- whichever is the main split used.
397
- :param ctx: str
398
- The context string, generated by fewshot_context. This includes the natural
399
- language description, as well as the few shot examples, and the question
400
- part of the document for `doc`.
401
- :param repeats: int
402
- TODO: update this docstring
403
- The number of times each instance in a dataset is inferred on. Defaults to 1,
404
- can be increased for techniques like majority voting.
405
- """
406
- pass
407
-
408
- @abc.abstractmethod
409
- def process_results(self, doc, results):
410
- """Take a single document and the LMM results and evaluates, returning a
411
- dict where keys are the names of submetrics and values are the values of
412
- the metric for that one document
413
-
414
- :param doc:
415
- The document as returned from training_docs, validation_docs, or test_docs.
416
- :param results:
417
- The results of the requests created in construct_requests.
418
- """
419
- pass
420
-
421
- @abc.abstractmethod
422
- def aggregation(self):
423
- """
424
- :returns: {str: [metric_score] -> float}
425
- A dictionary where keys are the names of submetrics and values are
426
- functions that aggregate a list of metric scores
427
- """
428
- pass
429
-
430
- @abc.abstractmethod
431
- def higher_is_better(self):
432
- """
433
- :returns: {str: bool}
434
- A dictionary where keys are the names of submetrics and values are
435
- whether a higher value of the submetric is better
436
- """
437
- pass
438
-
439
- @classmethod
440
- def count_bytes(cls, doc):
441
- """Used for byte-level perplexity metrics in rolling loglikelihood"""
442
- return len(doc.encode("utf-8"))
443
-
444
- @utils.positional_deprecated
445
- def fewshot_context(
446
- self,
447
- doc_id,
448
- num_fewshot,
449
- split,
450
- rnd=random.Random(1234),
451
- description=None,
452
- ):
453
- """Returns a fewshot context string that is made up of a prepended description
454
- (if provided), the `num_fewshot` number of examples, and an appended prompt example.
455
-
456
- :param doc_id: int
457
- The document id as returned from training_docs, validation_docs, or test_docs.
458
- :param num_fewshot: int
459
- The number of fewshot examples to provide in the returned context string.
460
- :param split: str
461
- The split of the document to retrieve from the dataset
462
- :param rnd: random.Random
463
- The pseudo-random number generator used to randomly sample examples.
464
- WARNING: This is currently a required arg although it's optionalized with a default `None`.
465
- :param description: str
466
- The task's description that will be prepended to the fewshot examples.
467
- :returns: str
468
- The fewshot context.
469
- """
470
- assert rnd is not None, "A `random.Random` generator argument must be provided to `rnd`"
471
-
472
- description = description if description else ""
473
- doc = self.dataset_no_image[split][doc_id]
474
-
475
- if num_fewshot == 0:
476
- labeled_examples = ""
477
- else:
478
- # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
479
- if self.has_training_docs():
480
- fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
481
- else:
482
- if self._fewshot_docs is None:
483
- self._fewshot_docs = list(self.validation_docs() if self.has_validation_docs() else self.test_docs())
484
-
485
- fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)
486
-
487
- # get rid of the doc that's the one we're evaluating, if it's in the fewshot
488
- fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
489
-
490
- labeled_examples = "\n\n".join([self.doc_to_text(doc) + self.doc_to_target(doc) for doc in fewshotex]) + "\n\n"
491
-
492
- example = self.doc_to_text(doc)
493
- return description + labeled_examples + example
494
-
495
- def apply_filters(self):
496
- if hasattr(self, "_filters"):
497
- for f in self._filters:
498
- f.apply(self._instances, None)
499
- else:
500
- eval_logger.warning("No filter defined, passing through instances")
501
- return self._instances
502
-
503
- def dump_config(self) -> dict:
504
- """Returns a dictionary representing the task's config.
505
-
506
- :returns: str
507
- The fewshot context.
508
- """
509
- # TODO: this should only return the overrides applied to a non-YAML task's configuration.
510
- # (num_fewshot)
511
- return self.config.to_dict()
512
-
513
- def override_metric(self, metric_name: str) -> None:
514
- """
515
- Override the default metrics used for evaluation with custom metrics.
516
-
517
- Parameters:
518
- - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
519
- """
520
- (
521
- self._metric_fn_list,
522
- self._aggregation_list,
523
- self._metric_fn_kwargs,
524
- self._higher_is_better,
525
- ) = ({}, {}, {}, {})
526
- self._metric_fn_list[metric_name] = get_metric(metric_name)
527
- self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
528
- self._higher_is_better[metric_name] = is_higher_better(metric_name)
529
- self._metric_fn_kwargs[metric_name] = {}
530
- if not isinstance(self, ConfigurableTask):
531
- self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
532
- self.aggregation = lambda: {metric_name: get_metric_aggregation(metric_name)}
533
- setattr(self._config, "metric_list", [{"metric": metric_name}])
534
- setattr(self._config, "process_results", None)
535
-
536
-
537
- class ConfigurableTask(Task):
538
- VERSION = "Yaml"
539
- OUTPUT_TYPE = None
540
- CONFIG = None
541
-
542
- def __init__(self, model_name) -> None: # TODO no super() call here
543
- # Get pre-configured attributes
544
- self._config = self.CONFIG
545
- # different model requires different prompt, we have to take those into account.
546
-
547
- self.model_name = model_name
548
- self._prepare_model_specific_config()
549
-
550
- assert self.config.output_type in ALL_OUTPUT_TYPES
551
- self.OUTPUT_TYPE = self.config.output_type
552
-
553
- self.DATASET_PATH = self.config.dataset_path
554
-
555
- if self.config.dataset_name is not None:
556
- self.DATASET_NAME = self.config.dataset_name
557
-
558
- self._prepare_metric_and_aggregation()
559
-
560
- self.download(self.config.dataset_kwargs)
561
- self._training_docs = None
562
- self._fewshot_docs = None
563
-
564
- if self.config.filter_list is not None:
565
- self._filters = []
566
- for filter_config in self.config.filter_list:
567
- for filter_pipeline in filter_config:
568
- filter_name = filter_config["name"]
569
- filter_functions = filter_config["filter"]
570
- components = []
571
- for function in filter_functions:
572
- kwargs = {key: function[key] for key in function if key != "function"}
573
- components.append([function["function"], kwargs])
574
- filter_pipeline = build_filter_ensemble(filter_name, components)
575
- self._filters.append(filter_pipeline)
576
- else:
577
- self._filters = [build_filter_ensemble("none", [["take_first", None]])]
578
- if self.config.fewshot_config is not None:
579
- self.sampler = samplers.get_sampler(self.config.fewshot_config.get("sampler", "default") if self.config.fewshot_config else "default")(list(self.fewshot_docs()), self, rnd=random.Random(1234))
580
-
581
- if self.has_test_docs():
582
- self.task_docs = self.test_docs()
583
- elif self.has_validation_docs():
584
- self.task_docs = self.validation_docs()
585
- else:
586
- assert False, f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
587
-
588
- # Test One Doc
589
- self.features = list(self.task_docs.features.keys())
590
- self.multiple_input = 0
591
- self.multiple_target = 0
592
- test_doc = self.task_docs[0]
593
- test_text = self.doc_to_text(test_doc)
594
- test_target = self.doc_to_target(test_doc)
595
-
596
- if self.config.doc_to_choice is not None:
597
- test_choice = self.doc_to_choice(test_doc)
598
- if type(test_choice) is not list:
599
- eval_logger.error("doc_to_choice must return list")
600
- else:
601
- num_choice = len(test_choice)
602
-
603
- if type(test_text) is int:
604
- self.multiple_input = num_choice
605
- else:
606
- test_choice = None
607
-
608
- if type(test_target) is list:
609
- self.multiple_target = len(test_target)
610
- else:
611
- if (type(test_target) is int) and (test_choice is not None):
612
- test_target = test_choice[test_target]
613
- else:
614
- test_target = str(test_target)
615
-
616
- if test_choice is not None:
617
- check_choices = test_choice
618
- else:
619
- check_choices = [test_target]
620
- if self.config.doc_to_choice is not None:
621
- for choice in check_choices:
622
- choice_has_whitespace = True if choice[0].isspace() else False
623
- delimiter_has_whitespace = True if self.config.target_delimiter.rstrip() != self.config.target_delimiter else False
624
-
625
- if delimiter_has_whitespace and choice_has_whitespace:
626
- eval_logger.warning(f'Both target_delimiter and target choice: "{choice}" have whitespace')
627
- elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
628
- eval_logger.warning(f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace')
629
-
630
- def _prepare_model_specific_config(self):
631
- self.model_specific_prompt_kwargs = self.config.model_specific_prompt_kwargs
632
- if self.model_specific_prompt_kwargs is not None:
633
- if self.model_name in self.model_specific_prompt_kwargs:
634
- self.model_specific_prompt_kwargs = self.model_specific_prompt_kwargs[self.model_name]
635
- else:
636
- self.model_specific_prompt_kwargs = self.model_specific_prompt_kwargs.get("default", None)
637
-
638
- self.model_specific_target_kwargs = self.config.model_specific_target_kwargs
639
- if self.model_specific_target_kwargs is not None:
640
- if self.model_name in self.model_specific_target_kwargs:
641
- self.model_specific_target_kwargs = self.model_specific_target_kwargs[self.model_name]
642
- else:
643
- self.model_specific_target_kwargs = self.model_specific_target_kwargs.get("default", None)
644
- self.model_specific_generation_kwargs = self.config.model_specific_generation_kwargs
645
- if self.model_specific_generation_kwargs is not None:
646
- if self.model_name in self.model_specific_generation_kwargs:
647
- self.model_specific_generation_kwargs = self.model_specific_generation_kwargs[self.model_name]
648
- else:
649
- self.model_specific_generation_kwargs = self.model_specific_generation_kwargs.get("default", {})
650
-
651
- self.config.generation_kwargs.update(self.model_specific_generation_kwargs)
652
-
653
- def _prepare_metric_and_aggregation(self):
654
- self._metric_fn_list = {}
655
- self._metric_fn_kwargs = {}
656
- self._aggregation_list = {}
657
- self._higher_is_better = {}
658
-
659
- if self.config.metric_list is None:
660
- # TODO: handle this in TaskConfig.__post_init__ ?
661
- _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]
662
-
663
- for metric_name in _metric_list:
664
- self._metric_fn_list[metric_name] = METRIC_REGISTRY[metric_name]
665
- self._metric_fn_kwargs[metric_name] = {}
666
- self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
667
- self._higher_is_better[metric_name] = is_higher_better(metric_name)
668
- else:
669
- for metric_config in self.config.metric_list:
670
- assert "metric" in metric_config
671
- metric_name = metric_config["metric"]
672
- kwargs = {key: metric_config[key] for key in metric_config if key not in ["metric", "aggregation", "higher_is_better"]}
673
-
674
- if self.config.process_results is not None:
675
- self._metric_fn_list[metric_name] = None
676
- self._metric_fn_kwargs[metric_name] = {}
677
- elif callable(metric_name):
678
- metric_fn = metric_name.__call__
679
- metric_name = metric_name.__name__
680
- self._metric_fn_list[metric_name] = metric_fn
681
- self._metric_fn_kwargs[metric_name] = kwargs
682
- else:
683
- self._metric_fn_list[metric_name] = METRIC_REGISTRY[metric_name]
684
- self._metric_fn_kwargs[metric_name] = kwargs
685
-
686
- if "aggregation" in metric_config:
687
- agg_name = metric_config["aggregation"]
688
- if type(agg_name) == str:
689
- self._aggregation_list[metric_name] = get_aggregation(agg_name)
690
- elif callable(agg_name):
691
- self._aggregation_list[metric_name] = metric_config["aggregation"]
692
- else:
693
- INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
694
- metric_agg = get_metric_aggregation(metric_name)
695
- eval_logger.warning(f"[Task: {self._config.task}] metric {metric_name} is defined, but aggregation is not. " f"using default " f"aggregation={INV_AGG_REGISTRY[metric_agg]}")
696
- self._aggregation_list[metric_name] = metric_agg
697
-
698
- if "higher_is_better" in metric_config:
699
- self._higher_is_better[metric_name] = metric_config["higher_is_better"]
700
- else:
701
- eval_logger.warning(f"[Task: {self._config.task}] metric {metric_name} is defined, but higher_is_better is not. " f"using default " f"higher_is_better={is_higher_better(metric_name)}")
702
- self._higher_is_better[metric_name] = is_higher_better(metric_name)
703
-
704
- @retry(stop=(stop_after_attempt(5) | stop_after_delay(60)), wait=wait_fixed(2))
705
- def download(self, dataset_kwargs=None) -> None:
706
- # If the dataset is a video dataset,
707
- # Recursively search whether their is a zip and unzip it to the huggingface home
708
- download_config = DownloadConfig()
709
- download_config.max_retries = dataset_kwargs.get("max_retries", 10) if dataset_kwargs is not None else 10
710
- download_config.num_proc = dataset_kwargs.get("num_proc", 8) if dataset_kwargs is not None else 8
711
- download_config.local_files_only = dataset_kwargs.get("local_files_only", False) if dataset_kwargs is not None else False
712
- if dataset_kwargs is not None:
713
- if "From_YouTube" in dataset_kwargs:
714
-
715
- def _download_from_youtube(path):
716
- try:
717
- for video in tqdm(self.all_dataset[split]):
718
- video_id = video["videoID"]
719
- target_path = os.path.join(path, f"{video_id}.mp4")
720
- assert shutil.which("yt-dlp") is not None, "yt-dlp must be installed and available in the system's PATH"
721
- command = f"yt-dlp -o {target_path} -f mp4 https://www.youtube.com/watch?v={video_id}"
722
- subprocess.run(command, shell=True)
723
- with open(os.path.join(cache_path, f"{task}_download_status.json"), "w") as f:
724
- f.write(json.dumps({task: "downloaded"}))
725
- except Exception as e:
726
- eval_logger.error(f"Error while downloading {task} data: {e}")
727
- with open(os.path.join(cache_path, f"{task}_download_status.json"), "w") as f:
728
- f.write(json.dumps({task: "not downloaded"}))
729
-
730
- hf_home = os.getenv("HF_HOME", "~/.cache/huggingface/")
731
- accelerator = Accelerator()
732
- if accelerator.is_main_process:
733
- dataset_kwargs.pop("From_YouTube")
734
- self.all_dataset = datasets.load_dataset(
735
- path=self.DATASET_PATH,
736
- name=self.DATASET_NAME,
737
- download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
738
- **dataset_kwargs if dataset_kwargs is not None else {},
739
- )
740
- dataset_kwargs["From_YouTube"] = True
741
- cache_path = snapshot_download(repo_id=self.DATASET_PATH, repo_type="dataset") # download_parquet
742
- split = vars(self.config)["test_split"]
743
- task = vars(self.config)["task"]
744
-
745
- video_path = os.path.join(hf_home, task)
746
- if os.path.exists(os.path.join(cache_path, f"{task}_download_status.json")):
747
- download_status = json.load(open(os.path.join(cache_path, f"{task}_download_status.json"), "r"))
748
- if download_status[task] == "downloaded":
749
- eval_logger.info(f"Data for {task} already download!")
750
- else:
751
- eval_logger.info(f"Start downloading YouTube data to {video_path}...")
752
- _download_from_youtube(video_path)
753
- else:
754
- eval_logger.info(f"Start downloading YouTube data to {video_path}...")
755
- _download_from_youtube(video_path)
756
-
757
- accelerator.wait_for_everyone()
758
- if "builder_script" in dataset_kwargs:
759
- builder_script = dataset_kwargs["builder_script"]
760
- self.DATASET_PATH = os.path.join(cache_path, builder_script)
761
- dataset_kwargs.pop("builder_script")
762
-
763
- downloaded_video_ids = [i.split(".mp4")[0] for i in os.listdir(os.path.expanduser(video_path)) if i.endswith(".mp4")]
764
- # Filtered the existing dataset with the downloaded video ids
765
- self.dataset = datasets.DatasetDict({split: self.all_dataset[split].filter(lambda x: x["videoID"] in downloaded_video_ids)})
766
-
767
- self.dataset_no_image = self.dataset
768
- dataset_kwargs.pop("From_YouTube")
769
- return
770
-
771
- if "video" in dataset_kwargs and dataset_kwargs["video"]:
772
- hf_home = os.getenv("HF_HOME", "~/.cache/huggingface/")
773
- cache_dir = dataset_kwargs["cache_dir"]
774
- cache_dir = os.path.join(hf_home, cache_dir)
775
- accelerator = Accelerator()
776
- if accelerator.is_main_process:
777
- force_download = dataset_kwargs.get("force_download", False)
778
- force_unzip = dataset_kwargs.get("force_unzip", False)
779
- cache_path = snapshot_download(repo_id=self.DATASET_PATH, repo_type="dataset", force_download=force_download, etag_timeout=60)
780
- zip_files = glob(os.path.join(cache_path, "**/*.zip"), recursive=True)
781
- tar_files = glob(os.path.join(cache_path, "**/*.tar*"), recursive=True)
782
-
783
- def unzip_video_data(zip_file):
784
- import zipfile
785
-
786
- with zipfile.ZipFile(zip_file, "r") as zip_ref:
787
- zip_ref.extractall(cache_dir)
788
- eval_logger.info(f"Extracted all files from {zip_file} to {cache_dir}")
789
-
790
- def untar_video_data(tar_file):
791
- import tarfile
792
-
793
- with tarfile.open(tar_file, "r") as tar_ref:
794
- tar_ref.extractall(cache_dir)
795
- eval_logger.info(f"Extracted all files from {tar_file} to {cache_dir}")
796
-
797
- def concat_tar_parts(tar_parts, output_tar):
798
- with open(output_tar, "wb") as out_tar:
799
- from tqdm import tqdm
800
-
801
- for part in tqdm(sorted(tar_parts)):
802
- with open(part, "rb") as part_file:
803
- out_tar.write(part_file.read())
804
- eval_logger.info(f"Concatenated parts {tar_parts} into {output_tar}")
805
-
806
- # Unzip zip files if needed
807
- if force_unzip or (not os.path.exists(cache_dir) and len(zip_files) > 0):
808
- for zip_file in zip_files:
809
- unzip_video_data(zip_file)
810
-
811
- # Concatenate and extract tar files if needed
812
- if force_unzip or (not os.path.exists(cache_dir) and len(tar_files) > 0):
813
- tar_parts_dict = {}
814
-
815
- # Group tar parts together
816
- for tar_file in tar_files:
817
- base_name = tar_file.split(".tar")[0]
818
- if base_name not in tar_parts_dict:
819
- tar_parts_dict[base_name] = []
820
- tar_parts_dict[base_name].append(tar_file)
821
-
822
- # Concatenate and untar split parts
823
- for base_name, parts in tar_parts_dict.items():
824
- eval_logger.info(f"Extracting following tar files: {parts}")
825
- output_tar = base_name + ".tar"
826
- if not os.path.exists(output_tar):
827
- eval_logger.info(f"Start concatenating tar files")
828
-
829
- concat_tar_parts(parts, output_tar)
830
- eval_logger.info(f"Finish concatenating tar files")
831
-
832
- if not os.path.exists(os.path.join(cache_dir, os.path.basename(base_name))):
833
- untar_video_data(output_tar)
834
-
835
- accelerator.wait_for_everyone()
836
- dataset_kwargs.pop("cache_dir")
837
- dataset_kwargs.pop("video")
838
-
839
- if "builder_script" in dataset_kwargs:
840
- builder_script = dataset_kwargs["builder_script"]
841
- self.DATASET_PATH = os.path.join(cache_path, builder_script)
842
- dataset_kwargs.pop("builder_script")
843
-
844
- if "force_download" in dataset_kwargs:
845
- dataset_kwargs.pop("force_download")
846
-
847
- if "force_unzip" in dataset_kwargs:
848
- dataset_kwargs.pop("force_unzip")
849
-
850
- if "local_files_only" in dataset_kwargs:
851
- dataset_kwargs.pop("local_files_only")
852
-
853
- self.dataset = datasets.load_dataset(
854
- path=self.DATASET_PATH,
855
- name=self.DATASET_NAME,
856
- download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
857
- download_config=download_config,
858
- **dataset_kwargs if dataset_kwargs is not None else {},
859
- )
860
- self.dataset_no_image = datasets.load_dataset(
861
- path=self.DATASET_PATH,
862
- name=self.DATASET_NAME,
863
- download_mode=datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
864
- download_config=download_config,
865
- **dataset_kwargs if dataset_kwargs is not None else {},
866
- )
867
- for doc_name in self.dataset_no_image:
868
- remove_cols = []
869
- features = self.dataset_no_image[doc_name].features
870
- # If it is an Image instance or a Sequence of Image instance. Remove it
871
- for feature in features:
872
- if isinstance(features[feature], Image):
873
- remove_cols.append(feature)
874
- elif isinstance(features[feature], Sequence) and isinstance(features[feature].feature, Image):
875
- remove_cols.append(feature)
876
- for remove_col in remove_cols:
877
- self.dataset_no_image[doc_name] = self.dataset_no_image[doc_name].remove_columns(remove_col)
878
-
879
- def has_training_docs(self) -> bool:
880
- if self.config.training_split is not None:
881
- return True
882
- else:
883
- return False
884
-
885
- def has_validation_docs(self) -> bool:
886
- if self.config.validation_split is not None:
887
- return True
888
- else:
889
- return False
890
-
891
- def has_test_docs(self) -> bool:
892
- if self.config.test_split is not None:
893
- return True
894
- else:
895
- return False
896
-
897
- def training_docs(self) -> datasets.Dataset:
898
- if self.has_training_docs():
899
- if self.config.process_docs is not None:
900
- return self.config.process_docs(self.dataset[self.config.training_split])
901
- return self.dataset[self.config.training_split]
902
-
903
- def validation_docs(self) -> datasets.Dataset:
904
- if self.has_validation_docs():
905
- if self.config.process_docs is not None:
906
- return self.config.process_docs(self.dataset[self.config.validation_split])
907
- return self.dataset[self.config.validation_split]
908
-
909
- def test_docs(self) -> datasets.Dataset:
910
- if self.has_test_docs():
911
- if self.config.process_docs is not None:
912
- return self.config.process_docs(self.dataset[self.config.test_split])
913
- return self.dataset[self.config.test_split]
914
-
915
- def fewshot_docs(self):
916
- if self.config.fewshot_split is not None:
917
- return self.dataset[self.config.fewshot_split]
918
- else:
919
- if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
920
- eval_logger.warning(f"Task '{self.config.task}': " "num_fewshot > 0 but fewshot_split is None. " "using preconfigured rule.")
921
- return super().fewshot_docs()
922
-
923
- @utils.positional_deprecated
924
- def fewshot_context(self, doc_id, num_fewshot, split):
925
- """Returns a fewshot context string that is made up of a prepended description
926
- (if provided), the `num_fewshot` number of examples, and an appended prompt example.
927
-
928
- :param doc_id: str
929
- The document id as returned from training_docs, validation_docs, or test_docs.
930
- :param num_fewshot: int
931
- The number of fewshot examples to provide in the returned context string.
932
- :returns: str
933
- The fewshot context.
934
- """
935
- doc = self.dataset_no_image[split][doc_id]
936
- if num_fewshot == 0:
937
- # always prepend the (possibly empty) task description
938
- labeled_examples = self.config.description
939
- else:
940
- labeled_examples = self.config.description + self.sampler.get_context(doc, num_fewshot)
941
- example = self.doc_to_text(doc)
942
- if type(example) == str:
943
- return labeled_examples + example
944
- elif type(example) == list:
945
- return [labeled_examples + ex for ex in example]
946
- elif type(example) == int:
947
- if self.config.doc_to_choice is not None:
948
- choices = self.doc_to_choice(doc)
949
- return labeled_examples + choices[example]
950
- else:
951
- return labeled_examples + str(example)
952
-
953
- def apply_filters(self):
954
- if hasattr(self, "_filters"):
955
- for f in self._filters:
956
- f.apply(self._instances, self.task_docs)
957
- else:
958
- eval_logger.warning("No filter defined, passing through instances")
959
- return self._instances
960
-
961
- def should_decontaminate(self):
962
- return self.config.should_decontaminate
963
-
964
- def doc_to_decontamination_query(self, doc):
965
- if self.config.should_decontaminate:
966
- if self.config.doc_to_decontamination_query is None:
967
- return self.doc_to_text(doc)
968
- else:
969
- doc_to_decontamination_query = self.config.doc_to_decontamination_query
970
- if doc_to_decontamination_query in self.features:
971
- return doc[doc_to_decontamination_query]
972
- elif callable(doc_to_decontamination_query):
973
- return doc_to_decontamination_query(doc)
974
- else:
975
- return ast.literal_eval(utils.apply_template(self.config.doc_to_decontamination_query, doc))
976
-
977
- def _process_doc(self, doc):
978
- """
979
- Override this to process (detokenize, strip, replace, etc.) individual
980
- documents. This can be used in a map over documents of a data split.
981
- E.g. `map(self._process_doc, self.dataset["validation"])`
982
-
983
- :return: dict
984
- The processed version of the specified `doc`.
985
- """
986
- return doc
987
-
988
- def doc_to_text(self, doc):
989
- doc_to_text = self.config.doc_to_text
990
-
991
- if type(doc_to_text) == int:
992
- return doc_to_text
993
- elif type(doc_to_text) == str:
994
- if doc_to_text in self.features:
995
- # if self.config.doc_to_choice is not None:
996
- # return self.doc_to_choice(doc)[doc[doc_to_text]]
997
- # else:
998
- return doc[doc_to_text]
999
- else:
1000
- text_string = utils.apply_template(doc_to_text, doc)
1001
- if text_string.isdigit() and self._config.doc_to_choice is not None:
1002
- return ast.literal_eval(text_string)
1003
- else:
1004
- return text_string
1005
- elif callable(doc_to_text):
1006
- return (
1007
- doc_to_text(doc, self.model_specific_prompt_kwargs)
1008
- if self.model_specific_prompt_kwargs is not None
1009
- else doc_to_text(
1010
- doc,
1011
- )
1012
- )
1013
- # Used when applying a Promptsource template
1014
- elif hasattr(doc_to_text, "apply"):
1015
- applied_prompt = doc_to_text.apply(doc)
1016
- if len(applied_prompt) == 2:
1017
- return applied_prompt[0]
1018
- else:
1019
- eval_logger.warning("Applied prompt returns empty string")
1020
- return self.config.fewshot_delimiter
1021
- else:
1022
- print(type(doc_to_text))
1023
- raise TypeError
1024
-
1025
- def doc_to_target(self, doc: dict) -> Union[int, str, list]:
1026
- doc_to_target = self.config.doc_to_target
1027
-
1028
- if type(doc_to_target) == int:
1029
- return doc_to_target
1030
- elif type(doc_to_target) == str:
1031
- if doc_to_target in self.features:
1032
- # if self.config.doc_to_choice is not None:
1033
- # return self.doc_to_choice(doc)[doc[doc_to_target]]
1034
- # else:
1035
- return doc[doc_to_target]
1036
- else:
1037
- target_string = utils.apply_template(doc_to_target, doc)
1038
- if target_string.isdigit() and self._config.doc_to_choice is not None:
1039
- return ast.literal_eval(target_string)
1040
- elif len(target_string) >= 2 and (target_string[0] == "[") and (target_string[-1] == "]"):
1041
- try:
1042
- return ast.literal_eval(target_string)
1043
- except (SyntaxError, ValueError):
1044
- return target_string
1045
- else:
1046
- return target_string
1047
- elif type(doc_to_target) == list:
1048
- return doc_to_target
1049
- elif callable(doc_to_target):
1050
- return doc_to_target(doc, self.model_specific_target_kwargs) if self.model_specific_target_kwargs is not None else doc_to_target(doc)
1051
- # Used when applying a Promptsource template
1052
- elif hasattr(doc_to_target, "apply"):
1053
- applied_prompt = doc_to_target.apply(doc)
1054
- if len(applied_prompt) == 2:
1055
- return applied_prompt[1]
1056
- else:
1057
- eval_logger.warning("Applied prompt returns empty string")
1058
- return self.config.fewshot_delimiter
1059
- else:
1060
- raise TypeError
1061
-
1062
- def doc_to_visual(self, doc: dict) -> Union[int, str, list]:
1063
- self.config.doc_to_visual
1064
- if type(self.config.doc_to_visual) == str:
1065
- assert self.config.doc_to_visual in self.features
1066
- # Single image. Still return a list for consistency.
1067
- return [doc[self.config.doc_to_visual]]
1068
- else:
1069
- assert callable(self.config.doc_to_visual)
1070
- return self.config.doc_to_visual(doc)
1071
-
1072
- def doc_to_choice(self, doc: Any) -> List[str]:
1073
- if self.config.doc_to_choice is None:
1074
- eval_logger.error("doc_to_choice was called but not set in config")
1075
- else:
1076
- doc_to_choice = self.config.doc_to_choice
1077
-
1078
- if type(doc_to_choice) == str:
1079
- if doc_to_choice in self.features:
1080
- return doc[doc_to_choice]
1081
- else:
1082
- return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1083
- elif type(doc_to_choice) == list:
1084
- return doc_to_choice
1085
- elif type(doc_to_choice) == dict:
1086
- return list(doc_to_choice.values())
1087
- elif callable(doc_to_choice):
1088
- return doc_to_choice(doc)
1089
- elif hasattr(doc_to_choice, "get_answer_choices_list"):
1090
- return doc_to_choice.get_answer_choices_list(doc)
1091
- else:
1092
- raise TypeError
1093
-
1094
- def construct_requests(self, doc_id: int, ctx: str, **kwargs) -> Union[List[Instance], Instance]:
1095
- split = kwargs.get("split")
1096
- kwargs.pop("split")
1097
- if self.OUTPUT_TYPE == "loglikelihood":
1098
- arguments = (ctx, self.doc_to_target, self.doc_to_visual, doc_id, self.config.task, split)
1099
- elif self.OUTPUT_TYPE == "multiple_choice":
1100
- doc = self.dataset[split][doc_id]
1101
- choices = self.doc_to_choice(doc)
1102
- target_delimiter = self.config.target_delimiter
1103
- if self.multiple_input:
1104
- # If there are multiple inputs, choices are placed in the ctx
1105
- cont = self.doc_to_target(doc)
1106
- arguments = [(ctx, f"{target_delimiter}{cont}", self.doc_to_visual, doc_id, self.config.task, split) for ctx in choices]
1107
- else:
1108
- # Otherwise they are placed in the continuation
1109
- arguments = [(ctx, f"{target_delimiter}{cont}", self.doc_to_visual, doc_id, self.config.task, split) for cont in choices]
1110
- request_list = [
1111
- Instance(
1112
- request_type="loglikelihood",
1113
- # doc=doc,
1114
- arguments=arg,
1115
- idx=i,
1116
- **kwargs,
1117
- )
1118
- for i, arg in enumerate(arguments)
1119
- ]
1120
- # TODO: we should raise a warning telling users this will at most ~2x runtime.
1121
- if "acc_mutual_info" in self._metric_fn_list.keys():
1122
- # if we are calculating multiple choice accuracy
1123
- # using mutual information instead of raw loglikelihood as metric, need unconditional lls.
1124
-
1125
- # here mutual info refers to calculating
1126
- # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
1127
- # in other words normalizing by subtracting the unconditional logprob of each choice.
1128
- request_list.extend(
1129
- [
1130
- Instance(
1131
- request_type="loglikelihood",
1132
- # doc=doc,
1133
- arguments=("", "{}".format(choice)),
1134
- idx=i,
1135
- **kwargs,
1136
- )
1137
- for i, choice in enumerate(choices)
1138
- ]
1139
- )
1140
- return request_list
1141
-
1142
- elif self.OUTPUT_TYPE == "generate_until":
1143
- arguments = (ctx, self.config.generation_kwargs, self.doc_to_visual, doc_id, self.config.task, split)
1144
- return Instance(request_type=self.OUTPUT_TYPE, arguments=arguments, idx=0, **kwargs)
1145
-
1146
- # TODO: we add a full_docs interface here for some evaluations that needs to access the full datasets during process_results function. we may have better ways to handle this.
1147
- @retry(stop=(stop_after_attempt(5) | stop_after_delay(1200)), wait=wait_fixed(2))
1148
- def process_results(self, doc, results, full_docs=None):
1149
- if self.OUTPUT_TYPE == "generate_until":
1150
- results[0] = results[0].strip()
1151
-
1152
- kwargs = {}
1153
- if full_docs is not None:
1154
- kwargs["full_docs"] = full_docs
1155
- if callable(self.config.process_results):
1156
- return self.config.process_results(doc, results, **kwargs)
1157
-
1158
- result_dict = {}
1159
- use_metric = list(self._metric_fn_list.keys())
1160
- if self.OUTPUT_TYPE == "loglikelihood":
1161
- results = results[0]
1162
- ll, is_greedy = results
1163
- return {
1164
- **({"perplexity": ll} if "perplexity" in use_metric else {}),
1165
- **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
1166
- }
1167
- elif self.OUTPUT_TYPE == "multiple_choice":
1168
- lls, is_greedy = zip(*results)
1169
-
1170
- # retrieve choices in List[str] form, to compute choice lengths, etc.
1171
- choices = self.doc_to_choice(doc)
1172
- completion_len = np.array([float(len(i)) for i in choices])
1173
-
1174
- if 2 * len(choices) == len(lls) and "acc_mutual_info" in self._metric_fn_list.keys():
1175
- # then we are doing mutual info.
1176
- # this stores the "dryrun" / unconditional answer loglikelihoods
1177
- lls_unconditional = lls[1::2]
1178
- assert len(lls_unconditional) == len(choices)
1179
- # and this stores our "regular" conditional loglikelihoods
1180
- lls = lls[::2]
1181
-
1182
- pred = np.argmax(lls)
1183
- pred_norm = np.argmax(lls / completion_len)
1184
-
1185
- if self.multiple_input:
1186
- gold = self.doc_to_text(doc)
1187
- else:
1188
- gold = self.doc_to_target(doc)
1189
-
1190
- gold_index_error = False
1191
- if type(gold) is list:
1192
- gold = [i if i < len(choices) else -100 for i in gold]
1193
- if -100 in gold:
1194
- gold_index_error = True
1195
- else:
1196
- if type(gold) is int:
1197
- gold = gold if gold < len(choices) else -100
1198
- elif type(gold) is str:
1199
- gold = choices.index(gold) if gold in choices else -100
1200
-
1201
- if gold == -100:
1202
- gold_index_error = True
1203
-
1204
- if gold_index_error:
1205
- eval_logger.warning(f"Label index was not in within range of available choices," f"Sample:\n\n{doc}\n\n")
1206
-
1207
- if self.multiple_target:
1208
- acc = 1.0 if pred in gold else 0.0
1209
- acc_norm = 1.0 if pred_norm in gold else 0.0
1210
- exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
1211
- else:
1212
- acc = 1.0 if pred == gold else 0.0
1213
- acc_norm = 1.0 if pred_norm == gold else 0.0
1214
- # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
1215
- exact_match = int(is_greedy[gold]) if gold != -100 else 0
1216
-
1217
- result_dict = {
1218
- **({"acc": acc} if "acc" in use_metric else {}),
1219
- **({"f1": (gold, pred)} if "f1" in use_metric else {}),
1220
- **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1221
- **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1222
- **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1223
- }
1224
-
1225
- if "acc_mutual_info" in use_metric:
1226
- lls_mutual_info = [ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)]
1227
- acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
1228
- result_dict["acc_mutual_info"] = acc_mutual_info
1229
-
1230
- elif self.OUTPUT_TYPE == "generate_until":
1231
- gold = self.doc_to_target(doc)
1232
- result = results[0]
1233
- if self.config.doc_to_choice is not None:
1234
- # If you set doc_to_choice,
1235
- # it assumes that doc_to_target returns a number.
1236
- choices = self.doc_to_choice(doc)
1237
- gold = choices[gold]
1238
- # we expect multiple_targets to be a list.
1239
- elif self.multiple_target:
1240
- gold = list(gold)
1241
- elif type(gold) != type(result):
1242
- # cast gold to the same type as result
1243
- gold = type(result)(gold)
1244
-
1245
- for metric in self._metric_fn_list.keys():
1246
- if self.multiple_target:
1247
- # in the case where we have multiple targets,
1248
- # return true if any are true
1249
- # TODO: this may break for multipLe_target, non zero-or-1 metrics
1250
- scores = []
1251
- if not isinstance(gold, list):
1252
- # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
1253
- # print(gold)
1254
- gold = [gold]
1255
- for gold_option in gold:
1256
- try:
1257
- result_score = self._metric_fn_list[metric](
1258
- references=[gold_option],
1259
- predictions=[result],
1260
- **self._metric_fn_kwargs[metric],
1261
- )
1262
- except TypeError: # TODO: this is hacky and I don't want to do it
1263
- result_score = self._metric_fn_list[metric]([gold_option, result])
1264
- if isinstance(result_score, dict):
1265
- # TODO: this handles the case where HF evaluate returns a dict.
1266
- result_score = result_score[metric]
1267
- scores.append(result_score)
1268
- if any(scores):
1269
- result_score = 1.0
1270
- else:
1271
- result_score = 0.0
1272
- else:
1273
- try:
1274
- result_score = self._metric_fn_list[metric](
1275
- references=[gold],
1276
- predictions=[result],
1277
- **self._metric_fn_kwargs[metric],
1278
- )
1279
- except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1280
- result_score = self._metric_fn_list[metric]([gold, result])
1281
- if isinstance(result_score, dict):
1282
- # TODO: this handles the case where HF evaluate returns a dict.
1283
- result_score = result_score[metric]
1284
- result_dict[metric] = result_score
1285
- else:
1286
- raise ValueError(
1287
- f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1288
- "'loglikelihood','generate_until' or 'multiple_choice'",
1289
- )
1290
-
1291
- return result_dict
1292
-
1293
- def aggregation(self):
1294
- return self._aggregation_list
1295
-
1296
- def higher_is_better(self):
1297
- return self._higher_is_better
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/evaluator.py DELETED
@@ -1,630 +0,0 @@
1
- import random
2
- import itertools
3
- import json
4
- import collections
5
- import sys
6
- import inspect
7
- from tqdm import tqdm
8
-
9
- import torch
10
-
11
- import numpy as np
12
- from datasets import Image, Sequence
13
-
14
- import lmms_eval.api
15
- import lmms_eval.tasks
16
- import lmms_eval.models
17
- import lmms_eval.api.metrics
18
- import lmms_eval.api.registry
19
-
20
- from lmms_eval.utils import (
21
- positional_deprecated,
22
- run_task_tests,
23
- make_table,
24
- create_iterator,
25
- get_git_commit_hash,
26
- simple_parse_args_string,
27
- )
28
-
29
- from loguru import logger as eval_logger
30
-
31
-
32
- @positional_deprecated
33
- def simple_evaluate(
34
- model,
35
- model_args=None,
36
- tasks=[],
37
- num_fewshot=None,
38
- batch_size=None,
39
- device=None,
40
- limit=None,
41
- bootstrap_iters: int = 100000,
42
- check_integrity: bool = False,
43
- show_task_to_terminal: bool = False,
44
- log_samples: bool = True,
45
- gen_kwargs: str = None,
46
- cli_args=None, # Bo: put args into more functions (cost 48 Bytes per call)
47
- predict_only: bool = False,
48
- ):
49
- """Instantiate and evaluate a model on a list of tasks.
50
-
51
- :param model: Union[str, LMM]
52
- Name of model or LMM object, see lmms_eval.models.get_model
53
- :param model_args: Optional[str]
54
- String arguments for each model class, see LMM.create_from_arg_string.
55
- Ignored if `model` argument is a LMM object.
56
- :param tasks: list[Union[str, Task]]
57
- List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
58
- :param num_fewshot: int
59
- Number of examples in few-shot context
60
- :param batch_size: int or str, optional
61
- Batch size for model
62
- :param device: str, optional
63
- PyTorch device (e.g. "cpu" or "cuda:0") for running models
64
- :param limit: int or float, optional
65
- Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
66
- :param bootstrap_iters:
67
- Number of iterations for bootstrap statistics
68
- :param check_integrity: bool
69
- Whether to run the relevant part of the test suite for the tasks
70
- :param show_task_to_terminal: bool
71
- If True, write out an example document and model input for checking task integrity
72
- :param log_samples: bool
73
- If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
74
- :param gen_kwargs: str
75
- String arguments for model generation
76
- Ignored for all tasks with loglikelihood output_type
77
- :return
78
- Dictionary of results
79
- """
80
- random.seed(0)
81
- np.random.seed(1234)
82
- torch.manual_seed(1234) # TODO: this may affect training runs that are run with evaluation mid-run.
83
-
84
- assert tasks != [], "No tasks specified, or no tasks found. Please verify the task names."
85
-
86
- if gen_kwargs:
87
- gen_kwargs = simple_parse_args_string(gen_kwargs)
88
- eval_logger.warning(f"generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.")
89
- if gen_kwargs == "":
90
- gen_kwargs = None
91
-
92
- if model_args is None:
93
- model_args = ""
94
- lm = lmms_eval.api.registry.get_model(model).create_from_arg_string(
95
- model_args,
96
- {
97
- "batch_size": batch_size,
98
- "device": device,
99
- },
100
- )
101
-
102
- task_dict = lmms_eval.tasks.get_task_dict(tasks, model_name=model)
103
- for task_name in task_dict.keys():
104
- task_obj = task_dict[task_name]
105
- if type(task_obj) == tuple:
106
- group, task_obj = task_obj
107
- if task_obj is None:
108
- continue
109
- lm.task_dict[task_name] = task_obj.dataset
110
-
111
- config = task_obj._config
112
- if config["output_type"] == "generate_until" and gen_kwargs:
113
- config["generation_kwargs"].update(gen_kwargs)
114
-
115
- if predict_only:
116
- log_samples = True
117
- eval_logger.info(f"Processing {task_name} in output-only mode. Metrics will not be calculated!")
118
- # we have to change the class properties post-hoc. This is pretty hacky.
119
- task_obj.override_metric(metric_name="bypass")
120
-
121
- if num_fewshot is not None:
122
- if config["num_fewshot"] == 0:
123
- eval_logger.info(f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored.")
124
- else:
125
- default_num_fewshot = config["num_fewshot"]
126
- eval_logger.warning(f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}")
127
-
128
- task_obj._config["num_fewshot"] = num_fewshot
129
-
130
- if check_integrity:
131
- run_task_tests(task_list=tasks)
132
-
133
- results = evaluate(
134
- lm=lm,
135
- task_dict=task_dict,
136
- limit=limit,
137
- bootstrap_iters=bootstrap_iters,
138
- show_task_to_terminal=show_task_to_terminal,
139
- log_samples=log_samples,
140
- cli_args=cli_args,
141
- )
142
-
143
- if lm.rank == 0:
144
- # add info about the model and few shot config
145
- results["model_configs"] = {
146
- "model": model if isinstance(model, str) else model.model.config._name_or_path,
147
- "model_args": model_args,
148
- "batch_size": batch_size,
149
- "device": device,
150
- "limit": limit,
151
- "bootstrap_iters": bootstrap_iters,
152
- "gen_kwargs": gen_kwargs,
153
- }
154
- results["git_hash"] = get_git_commit_hash()
155
- return results
156
- else:
157
- return None
158
-
159
-
160
- decontaminate_suffix = "_decontaminate"
161
-
162
-
163
- @positional_deprecated
164
- def evaluate(
165
- lm,
166
- task_dict,
167
- limit=None,
168
- bootstrap_iters: int = 100000,
169
- show_task_to_terminal: bool = False,
170
- log_samples: bool = True,
171
- cli_args=None,
172
- ):
173
- """Instantiate and evaluate a model on a list of tasks.
174
-
175
- :param lm: obj
176
- Language Model
177
- :param task_dict: dict[str, Task]
178
- Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
179
- :param limit: int, optional
180
- Limit the number of examples per task (only use this for testing)
181
- :param bootstrap_iters:
182
- Number of iterations for bootstrap statistics
183
- :param show_task_to_terminal: bool
184
- If True, write out an example document and model input for checking task integrity
185
- :param log_samples: bool
186
- If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
187
- :return
188
- Dictionary of results
189
- """
190
-
191
- # stores the final result for each task, for each metric/filter pair.
192
- results = collections.defaultdict(dict)
193
- # Tracks each task's version.
194
- versions = collections.defaultdict(dict)
195
- # Tracks the YAML configs of all chosen tasks.
196
- configs = collections.defaultdict(dict)
197
- # logs info about each document evaluated.
198
- samples = collections.defaultdict(list)
199
- # tracks all Instances/requests a model must generate output on.
200
- requests = collections.defaultdict(list)
201
- # Aggregated task scores presented with groups
202
- results_agg = collections.defaultdict(dict)
203
- # Aggregated groups scores only
204
- groups_agg = collections.defaultdict(dict)
205
- # stores the amount to pad out reqs per req. type so that
206
- # number of fwd passes per distributed rank is equal
207
- padding_requests = collections.defaultdict(int)
208
- # store the hierarchy to do proper ordering
209
- task_hierarchy = collections.defaultdict(list)
210
- # store the ordering of tasks and groups
211
- task_order = collections.defaultdict(int)
212
- task_group_alias = collections.defaultdict(dict)
213
- # store num-fewshot value per task
214
- num_fewshot = collections.defaultdict(int)
215
-
216
- # get lists of each type of request
217
- for task_name, task in task_dict.items():
218
- if type(task) == tuple:
219
- group_name, task = task
220
- task_hierarchy[group_name].append(task_name)
221
- versions[group_name] = "N/A"
222
-
223
- else:
224
- group_name = None
225
- task_hierarchy[task_name] = []
226
-
227
- if task is None:
228
- continue
229
-
230
- versions[task_name] = task.VERSION
231
- configs[task_name] = dict(task.dump_config())
232
-
233
- if "num_fewshot" in configs[task_name]:
234
- n_shot = configs[task_name]["num_fewshot"]
235
- else:
236
- n_shot = 0
237
- num_fewshot[task_name] = n_shot
238
-
239
- if "task_alias" in configs[task_name]:
240
- task_group_alias[task_name] = configs[task_name]["task_alias"]
241
-
242
- if ("group_alias" in configs[task_name]) and (group_name not in task_group_alias) and (group_name is not None):
243
- task_group_alias[group_name] = configs[task_name]["group_alias"]
244
-
245
- if limit is not None:
246
- if task.has_test_docs():
247
- task_docs = task.test_docs()
248
- elif task.has_validation_docs():
249
- task_docs = task.validation_docs()
250
- else:
251
- raise RuntimeError("Task has neither test_docs nor validation_docs")
252
- limit = int(len(task_docs) * limit) if limit < 1.0 else int(limit)
253
-
254
- task.build_all_requests(limit=limit, rank=lm.rank, world_size=lm.world_size)
255
-
256
- eval_logger.debug(f"Task: {task_name}; number of requests on rank {lm.rank}: {len(task.instances)}")
257
-
258
- if show_task_to_terminal:
259
- for inst in task.instances:
260
- # print the prompt for the first few documents
261
- if inst.doc_id < 1:
262
- eval_logger.info(
263
- f"Task: {task_name}; document {inst.doc_id}; context prompt (starting on next line):\
264
- \n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)"
265
- )
266
- eval_logger.info(f"Request: {str(inst)}")
267
-
268
- # aggregate Instances by LMM method requested to get output.
269
- for instance in task.instances:
270
- reqtype = instance.request_type
271
- requests[reqtype].append(instance)
272
-
273
- if lm.world_size > 1:
274
- instances_rnk = torch.tensor(len(task._instances), device=lm.device)
275
- gathered_item = lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
276
-
277
- # compute number of pseudobatches to pad with (FSDP/DDP require even batches among ranks)
278
- numpad = max(gathered_item) - gathered_item[lm.rank]
279
- padding_requests[task.OUTPUT_TYPE] += numpad
280
-
281
- ### Run LMM on inputs, get all outputs ###
282
- # execute each type of request
283
- for reqtype, reqs in requests.items():
284
- eval_logger.info("Running {} requests".format(reqtype))
285
- # create `K` copies of each request `req` based off `K = req.repeats`
286
- cloned_reqs = []
287
- for req in reqs:
288
- cloned_reqs.extend([req] * req.repeats)
289
-
290
- if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
291
- for _ in range(padding_requests[reqtype]):
292
- cloned_reqs.extend([req] * req.repeats)
293
-
294
- # run requests through model
295
- resps = getattr(lm, reqtype)(cloned_reqs) # Choiszt run generate until
296
-
297
- # put responses from model into a list of length K for each request.
298
- for x, req in zip(resps, cloned_reqs):
299
- req.resps.append(x)
300
-
301
- if lm.world_size > 1:
302
- lm.accelerator.wait_for_everyone()
303
-
304
- ### Postprocess outputs ###
305
- # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
306
- for task_name, task in task_dict.items():
307
- if type(task) == tuple:
308
- group, task = task
309
- if task is None:
310
- continue
311
- task.apply_filters()
312
-
313
- ### Collect values of metrics on all datapoints ###
314
- vals = collections.defaultdict(list)
315
-
316
- # unpack results and sort back in order and return control to Task
317
- for task_name, task in task_dict.items():
318
- if type(task) == tuple:
319
- group, task = task
320
- if task is None:
321
- continue
322
- # TODO: make it possible to use a different metric per filter
323
- # iterate over different filters used
324
- for key in task.instances[0].filtered_resps.keys():
325
- # hack: remove image columns to speed avoid loading images and speed up postprocessing
326
- # reason: doc_iterator will actually load image if it's in the doc.
327
- docs = task.test_docs() if task.has_test_docs() else task.validation_docs()
328
- if "d170" not in task_name and "dc100" not in task_name and "dc200" not in task_name and "llava_wilder" not in task_name and "livebench" not in task_name:
329
- remove_cols = []
330
- features = docs.features
331
- # If it is an Image instance or a Sequence of Image instance. Remove it
332
- for feature in features:
333
- if isinstance(features[feature], Image):
334
- remove_cols.append(feature)
335
- elif isinstance(features[feature], Sequence) and isinstance(features[feature].feature, Image):
336
- remove_cols.append(feature)
337
- if remove_cols:
338
- docs = docs.remove_columns(remove_cols)
339
-
340
- ####################### Processing with Full Docs Mode #######################
341
- if task_name in ["videochatgpt_consistency"]:
342
- full_docs = True
343
- else:
344
- full_docs = False
345
-
346
- doc_iterator = itertools.islice(enumerate(docs), lm.rank, limit, lm.world_size)
347
- # Instead of converting the iterator to a list, use `itertools.tee` to create a parallel iterator for counting
348
- # doc_iterator, doc_iterator_for_counting = itertools.tee(doc_iterator)
349
- # Don't use above one, this would crash if doc_iterator_for_counting contains too many objects and very slow
350
- doc_iterator_for_counting = itertools.islice(range(len(task.test_docs())), lm.rank, limit, lm.world_size) if task.has_test_docs() else itertools.islice(range(len(task.validation_docs())), lm.rank, limit, lm.world_size)
351
- total_docs = sum(1 for _ in doc_iterator_for_counting)
352
- pbar = tqdm(total=total_docs, desc=f"Postprocessing", disable=(lm.rank != 0))
353
- for doc_id, doc in doc_iterator:
354
- # subset instances to only this document id ; sort by idx
355
- requests = list(filter(lambda x: x.doc_id == doc_id, task.instances))
356
- requests.sort(key=lambda x: x.idx)
357
- if full_docs:
358
- metrics = task.process_results(doc, [req.filtered_resps[key] for req in requests], full_docs=docs)
359
- else:
360
- metrics = task.process_results(doc, [req.filtered_resps[key] for req in requests])
361
- if log_samples:
362
- target = task.doc_to_target(doc)
363
- example = {
364
- "doc_id": doc_id,
365
- "target": target,
366
- "doc": doc,
367
- "arguments": [tuple(a for a in req.args if isinstance(a, (int, str))) for req in requests], # do not include image
368
- "resps": [req.resps for req in requests],
369
- "filtered_resps": [req.filtered_resps[key] for req in requests],
370
- }
371
- example.update(metrics)
372
- samples[task_name].append(example)
373
- for metric, value in metrics.items():
374
- vals[(task_name, key, metric)].append(value)
375
- pbar.update(1)
376
-
377
- pbar.close()
378
-
379
- if lm.world_size > 1:
380
- # if multigpu, then gather data across all ranks
381
- # first gather logged samples across all ranks
382
- for task_name, task_samples in list(samples.items()):
383
- full_samples = [None] * lm.world_size
384
- torch.distributed.all_gather_object(full_samples, task_samples)
385
- samples[task_name] = list(itertools.chain.from_iterable(full_samples))
386
- # then collect metrics across all ranks
387
- vals_torch = collections.defaultdict(list)
388
- for (task_name, key, metric), items in vals.items():
389
- numitem = 0
390
- if type(items[0]) == tuple:
391
- numitem = len(items[0])
392
-
393
- if isinstance(items[0], (str, list, dict)):
394
- # handle the string case
395
- gathered_items = [None] * lm.accelerator.num_processes
396
- torch.distributed.all_gather_object(gathered_items, items)
397
-
398
- gathered_item = list(itertools.chain.from_iterable(gathered_items))
399
- else:
400
- # distributed gather requires all ranks to have same dimensions
401
- # so we pad out with float32 min value
402
- pad_value = torch.finfo(torch.float32).min
403
- metrics_tensor = torch.tensor(items, device=lm.device)
404
-
405
- original_dtype = metrics_tensor.dtype # store original dtype
406
- torch_device_tensor = lm.accelerator.pad_across_processes(metrics_tensor.to(torch.float32), pad_index=pad_value)
407
- gathered_item = lm.accelerator.gather(torch_device_tensor)
408
-
409
- if numitem > 0:
410
- gathered_filtered = gathered_item[gathered_item[:, 0] != pad_value]
411
- else:
412
- gathered_filtered = gathered_item[gathered_item != pad_value]
413
-
414
- gathered_item = gathered_filtered.to(original_dtype).cpu().detach().numpy().tolist()
415
- # reconvert if we were passed a tuple of values
416
- if numitem > 0:
417
- gathered_item = [tuple(g) for g in gathered_item]
418
-
419
- if lm.rank == 0:
420
- vals_torch[(task_name, key, metric)] = gathered_item
421
-
422
- vals = vals_torch
423
- # Ensure all ranks wait for rank 0 to finish aggregation
424
- torch.distributed.barrier()
425
-
426
- if lm.rank == 0:
427
- ### Get task ordering for correct sample-wide aggregation
428
- group_to_task = {}
429
- for group in task_hierarchy.keys():
430
- if group not in task_order:
431
- task_order[group] = 0
432
-
433
- if len(task_hierarchy[group]) > 0:
434
- group_to_task[group] = task_hierarchy[group].copy()
435
-
436
- for task in task_hierarchy[group]:
437
- if task in task_order:
438
- task_order[task] += 1
439
- else:
440
- task_order[task] = 1 + task_order[group]
441
-
442
- if task in task_hierarchy:
443
- group_to_task[group].remove(task)
444
- group_to_task[group].extend(task_hierarchy[task])
445
-
446
- task_to_group = {}
447
- for group in group_to_task:
448
- for task in group_to_task[group]:
449
- if task in task_to_group:
450
- task_to_group[task].append(group)
451
- else:
452
- task_to_group[task] = [group]
453
-
454
- ### Aggregate results over all datapoints ###
455
- # aggregate results ; run bootstrap CIs
456
- for (task_name, key, metric), items in vals.items():
457
- task = task_dict[task_name]
458
- metric_key = metric + "," + key
459
-
460
- if type(task) == tuple:
461
- group_name, task = task
462
- else:
463
- group_name = None
464
-
465
- if metric not in task.aggregation():
466
- continue
467
-
468
- agg_fn = task.aggregation()[metric]
469
-
470
- # Bo: for models that need to know the args to save to correct path
471
- if inspect.getfullargspec(agg_fn).args == ["results", "args"]:
472
- results[task_name][metric_key] = agg_fn(items, cli_args)
473
- else:
474
- # Bo: for models only need agg items
475
- results[task_name][metric_key] = agg_fn(items)
476
-
477
- results[task_name]["samples"] = len(items)
478
-
479
- # hotfix: bleu, chrf, ter seem to be really expensive to bootstrap
480
- # so we run them less iterations. still looking for a cleaner way to do this
481
- if bootstrap_iters > 0:
482
- stderr = lmms_eval.api.metrics.stderr_for_metric(
483
- metric=task.aggregation()[metric],
484
- bootstrap_iters=min(bootstrap_iters, 100) if metric in ["bleu", "chrf", "ter"] else bootstrap_iters,
485
- )
486
-
487
- if stderr is not None and len(items) > 1:
488
- results[task_name][metric + "_stderr" + "," + key] = stderr(items)
489
- else:
490
- results[task_name][metric + "_stderr" + "," + key] = "N/A"
491
-
492
- if bool(results):
493
- for group, task_list in reversed(task_hierarchy.items()):
494
- if task_list == []:
495
- total_size = results[group]["samples"]
496
- else:
497
- total_size = 0
498
-
499
- for task in task_list:
500
- metrics = results[task]
501
-
502
- current_size = metrics.pop("samples")
503
- # TODO: There should be a way for users
504
- # to toggle between weighted and
505
- # unweighted averaging
506
- # For unweighted averaging, use:
507
- # current_size = 1
508
-
509
- all_stderr = []
510
- for metric in [key for key in metrics.keys() if "_stderr" not in key]:
511
- stderr = "_stderr,".join(metric.split(","))
512
- stderr_score = results[task][stderr]
513
- var_score = stderr_score**2 if stderr_score != "N/A" else 0
514
- metric_score = results[task][metric]
515
-
516
- all_stderr.append(stderr)
517
-
518
- if metric_score is None:
519
- results[group][metric] = None
520
- results[group][stderr] = 0
521
- continue
522
-
523
- if metric in results[group]:
524
- if isinstance(results[group][metric], str) == False:
525
- results[group][metric] = (results[group][metric] * total_size + metric_score * current_size) / (total_size + current_size)
526
- # $$s_z^2 = \frac{(n-1) s_x^2 + (m-1) s_y^2}{n+m-1} + \frac{nm(\bar x - \bar y)^2}{(n+m)(n+m-1)}.$$
527
- results[group][stderr] = ((total_size - 1) * results[group][stderr] + (current_size - 1) * var_score) / (total_size + current_size - 1) + total_size * current_size / (
528
- (total_size + current_size) * (total_size + current_size - 1)
529
- ) * (results[group][metric] - metric_score) ** 2
530
- else:
531
- # accuracy = re.search(r'acc: ([\d.]+)%', results[group][metric]).group(1)
532
- # score = re.search(r'score: ([\d.]+)', results[group][metric]).group(1)
533
- # group_accuracy = float(accuracy)
534
- # group_score = float(score)
535
- # group_accuracy = (group_accuracy * total_size + metric_score * current_size) / total_size
536
- # group_score = (group_score * total_size + metric_score * current_size) / total_size
537
- # results[group][metric] = "Acc: " + str(group_accuracy) + " Score: " + str(group_score)
538
- results[group][metric] = "group_results"
539
- results[group][stderr] = 0
540
- else:
541
- results[group][metric] = metric_score
542
- results[group][stderr] = var_score
543
-
544
- total_size += current_size
545
-
546
- for stderr in all_stderr:
547
- results[group][stderr] = np.sqrt(results[group][stderr])
548
-
549
- results[group]["samples"] = total_size
550
-
551
- def print_tasks(task_hierarchy, task_order, task_version, task_group_alias):
552
- results_agg = collections.defaultdict(dict)
553
- groups_agg = collections.defaultdict(dict)
554
- for group_name, task_list in task_hierarchy.items():
555
- order = task_order[group_name]
556
- results_agg[group_name] = results[group_name].copy()
557
- results_agg[group_name]["tab"] = order
558
-
559
- if (order < max(task_order.values())) and (len(task_list) > 0):
560
- groups_agg[group_name] = results[group_name].copy()
561
- groups_agg[group_name]["tab"] = order
562
-
563
- if task_list != []:
564
- for task in sorted(task_list):
565
- if task in task_hierarchy:
566
- _task_hierarchy = {task: task_hierarchy[task]}
567
- else:
568
- _task_hierarchy = {task: []}
569
-
570
- _results_agg, _groups_agg, task_version = print_tasks(_task_hierarchy, task_order, task_version, task_group_alias)
571
-
572
- results_agg = {**results_agg, **_results_agg}
573
- groups_agg = {**groups_agg, **_groups_agg}
574
-
575
- return results_agg, groups_agg, task_version
576
-
577
- results_agg, groups_agg, versions = print_tasks(task_hierarchy, task_order, versions, task_group_alias)
578
-
579
- for task in results_agg:
580
- task_results = results_agg[task]
581
-
582
- if "samples" in task_results:
583
- task_results.pop("samples")
584
-
585
- tab_string = ""
586
- if "tab" in task_results:
587
- tab = task_results.pop("tab")
588
- tab_string = " " * tab + "- " if tab > 0 else ""
589
-
590
- if task in task_group_alias:
591
- task_alias = task_group_alias[task]
592
- results_agg[task]["alias"] = tab_string + task_alias
593
- else:
594
- results_agg[task]["alias"] = tab_string + task
595
-
596
- for group in groups_agg:
597
- group_results = groups_agg[group]
598
-
599
- if "samples" in group_results:
600
- group_results.pop("samples")
601
-
602
- tab_string = ""
603
- if "tab" in group_results:
604
- tab = group_results.pop("tab")
605
- tab_string = " " * tab + "- " if tab > 0 else ""
606
-
607
- if group in task_group_alias:
608
- group_alias = task_group_alias[group]
609
- groups_agg[group]["alias"] = tab_string + group_alias
610
- else:
611
- groups_agg[group]["alias"] = tab_string + group
612
-
613
- for group_name, task_list in task_hierarchy.items():
614
- if task_list != []:
615
- num_fewshot[group_name] = num_fewshot[task_list[0]]
616
-
617
- results_dict = {
618
- "results": dict(results_agg.items()),
619
- **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
620
- "configs": dict(sorted(configs.items())),
621
- "versions": dict(sorted(versions.items())),
622
- "n-shot": dict(sorted(num_fewshot.items())),
623
- }
624
- if log_samples:
625
- results_dict["samples"] = dict(samples)
626
-
627
- return results_dict
628
-
629
- else:
630
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/filters/__init__.py DELETED
@@ -1,45 +0,0 @@
1
- from lmms_eval.api.filter import FilterEnsemble, Filter
2
- from . import selection
3
- from . import extraction
4
- from . import transformation
5
-
6
-
7
- FILTER_REGISTRY = {
8
- "take_first": selection.TakeFirstFilter,
9
- "regex": extraction.RegexFilter,
10
- "majority_vote": selection.MajorityVoteFilter,
11
- "take_first_k": selection.TakeKFilter,
12
- "remove_whitespace": extraction.WhitespaceFilter,
13
- "lowercase": transformation.LowercaseFilter,
14
- "uppercase": transformation.UppercaseFilter,
15
- "map": transformation.MapFilter,
16
- "multi_choice_regex": extraction.MultiChoiceRegexFilter,
17
- # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function
18
- # that takes an input and returns a scalar and then should select the max reward,
19
- # or should implement different filters for different ways of handling a reward model's inference.
20
- # "arg_max": selection.ArgMaxFilter,
21
- }
22
-
23
-
24
- def get_filter(filter_name):
25
- if filter_name in FILTER_REGISTRY:
26
- return FILTER_REGISTRY[filter_name]
27
- else:
28
- return filter_name
29
-
30
-
31
- def build_filter_ensemble(filter_name, components):
32
- """
33
- Create a filtering pipeline.
34
- """
35
- filters = []
36
- for function, kwargs in components:
37
- if kwargs is None:
38
- f = get_filter(function)()
39
- else:
40
- # create a filter given its name in the registry
41
- f = get_filter(function)(**kwargs) # TODO: pass kwargs to filters properly
42
- # add the filter as a pipeline step
43
- filters.append(f)
44
-
45
- return FilterEnsemble(name=filter_name, filters=filters)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/filters/decontamination.py DELETED
@@ -1,23 +0,0 @@
1
- from lmms_eval.api.filter import Filter
2
-
3
-
4
- class DecontaminationFilter(Filter):
5
- """
6
- A filter which evaluates
7
- """
8
-
9
- name = "track_decontamination"
10
-
11
- def __init__(self, path) -> None:
12
- """
13
-
14
- TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path").
15
- should further cache result on a given (task_name, doc_id)
16
- """
17
- self._decontam_results = None
18
-
19
- def apply(self, resps, docs) -> None:
20
- """
21
- Return {"no_contamination", "only_contamination"} keys for the 2 different subsets
22
- """
23
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/filters/extraction.py DELETED
@@ -1,278 +0,0 @@
1
- import re
2
- import sys
3
- import unicodedata
4
- from lmms_eval.api.filter import Filter
5
-
6
-
7
- class WhitespaceFilter(Filter):
8
- """ """
9
-
10
- def __init__(self) -> None:
11
- pass
12
-
13
- def apply(self, resps, docs):
14
- def filter_set(inst):
15
- filtered_resp = []
16
- for resp in inst:
17
- if resp.startswith(" "):
18
- resp = resp[1:]
19
-
20
- filtered_resp.append(resp)
21
-
22
- return filtered_resp
23
-
24
- filtered_resps = [filter_set(resp) for resp in resps]
25
-
26
- return filtered_resps
27
-
28
-
29
- class RegexFilter(Filter):
30
- """ """
31
-
32
- def __init__(
33
- self,
34
- regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
35
- group_select=0,
36
- fallback: str = "[invalid]",
37
- ) -> None:
38
- """
39
- pass a string `regex` to run `re.compile(r"regex")` on.
40
- `fallback` defines the output returned if no matches for the regex are located.
41
- """
42
- self.regex_pattern = regex_pattern
43
- self.regex = re.compile(regex_pattern)
44
- self.group_select = group_select
45
- self.fallback = fallback
46
-
47
- def apply(self, resps, docs):
48
- # here, we assume we have a list, in which each element is
49
- # a list of model responses for some particular input/target pair.
50
- # so we process each of these (same input/target response sets)
51
- # independently (and keep them a list.)
52
- def filter_set(inst):
53
- filtered = []
54
- for resp in inst:
55
- match = self.regex.findall(resp)
56
- if match:
57
- match = match[self.group_select]
58
- if isinstance(match, tuple):
59
- match = [m for m in match if m][0]
60
- match = match.strip()
61
- else:
62
- match = self.fallback
63
- filtered.append(match)
64
- return filtered
65
-
66
- # print(resps)
67
- filtered_resps = list(map(lambda x: filter_set(x), resps))
68
- # print(filtered_resps)
69
-
70
- return filtered_resps
71
-
72
-
73
- class MultiChoiceRegexFilter(RegexFilter):
74
- """
75
- A filter used to extract a model's answer on multiple choice questions with
76
- letter answers. assumes each document has a "choices" field
77
- containing the list of answer choices and that the answer label symbols
78
- are of the form (A), (B), (C), ... or A, B, C.
79
- """
80
-
81
- def __init__(
82
- self,
83
- regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
84
- group_select=0,
85
- fallback: str = "[invalid]",
86
- ignore_case=False,
87
- ignore_punctuation=False,
88
- regexes_to_ignore=None,
89
- ) -> None:
90
- """
91
- regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
92
- - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
93
- - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
94
- group_select: Selects the (group_select)th match from the findall result.
95
- ignore_case: Ignores the case during step 1 matching
96
- ignore_punctuation: Remove the punctuation during step 1 matching
97
- regexes_to_ignore: Remove these regexes during step 1 matching
98
- """
99
- super().__init__(regex_pattern, group_select, fallback)
100
- self.ignore_case = ignore_case
101
- self.ignore_punctuation = ignore_punctuation
102
- self.regexes_to_ignore = regexes_to_ignore
103
-
104
- def apply(self, resps, docs):
105
- # here, we assume we have a list, in which each element is
106
- # a list of model responses for some particular input/target pair.
107
- # so we process each of these (same input/target response sets)
108
- # independently (and keep them a list.)
109
-
110
- def find_match(regex, resp, convert_dict={}):
111
- match = regex.findall(resp)
112
- if match:
113
- match = match[self.group_select]
114
- if isinstance(match, tuple):
115
- match = [m for m in match if m][0]
116
- match = match.strip()
117
- if match and match in convert_dict:
118
- match = convert_dict[match]
119
- return match
120
-
121
- punct_tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P"))
122
-
123
- def filter_ignores(st):
124
- if self.regexes_to_ignore is not None:
125
- for s in self.regexes_to_ignore:
126
- st = re.sub(s, "", st)
127
-
128
- if self.ignore_case:
129
- st = st.lower()
130
-
131
- if self.ignore_punctuation:
132
- # https://stackoverflow.com/a/266162
133
- st = st.translate(punct_tbl)
134
- return st
135
-
136
- filtered_resps = []
137
-
138
- for r, doc in zip(resps, docs):
139
- fallback_regexes = []
140
- choice_to_alpha = {}
141
- next_alpha = "A"
142
-
143
- without_paren_fallback_regexes = []
144
- without_paren_to_target = {}
145
-
146
- choices = doc["choices"]
147
- for c in choices:
148
- m = filter_ignores(c.strip())
149
- fallback_regexes.append(f"{re.escape(m)}")
150
- choice_to_alpha[m] = f"({next_alpha})"
151
-
152
- without_paren_fallback_regexes.append(next_alpha)
153
- without_paren_to_target[next_alpha] = f"({next_alpha})"
154
-
155
- next_alpha = chr(ord(next_alpha) + 1)
156
- fallback_regex = re.compile("|".join(fallback_regexes))
157
- without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
158
- without_paren_fallback_regex = re.compile(f":[\s]*({without_paren_fallback_regex})")
159
-
160
- filtered = []
161
- for resp in r:
162
- match = find_match(self.regex, resp)
163
- if not match:
164
- match = find_match(fallback_regex, filter_ignores(resp), choice_to_alpha)
165
- if not match:
166
- match = find_match(without_paren_fallback_regex, resp, without_paren_to_target)
167
- if not match:
168
- match = self.fallback
169
- filtered.append(match)
170
- filtered_resps.append(filtered)
171
-
172
- return filtered_resps
173
-
174
-
175
- class ExtendedRegexFilter(RegexFilter):
176
- punct_tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P"))
177
-
178
- def __init__(
179
- self,
180
- regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
181
- group_select=0,
182
- fallback: str = "[invalid]",
183
- ignore_case=False,
184
- ignore_punctuation=False,
185
- regexes_to_ignore=None,
186
- ) -> None:
187
- super().__init__(regex_pattern, group_select, fallback)
188
- self.ignore_case = ignore_case
189
- self.ignore_punctuation = ignore_punctuation
190
- self.regexes_to_ignore = regexes_to_ignore
191
-
192
- def filter_ignores(self, st):
193
- if self.regexes_to_ignore is not None:
194
- for s in self.regexes_to_ignore:
195
- st = re.sub(s, "", st)
196
-
197
- if self.ignore_case:
198
- st = st.lower()
199
-
200
- if self.ignore_punctuation:
201
- # https://stackoverflow.com/a/266162
202
- st = st.translate(self.punct_tbl)
203
- return st
204
-
205
- def find_match(self, regex, resp, convert_dict={}):
206
- match = regex.findall(resp)
207
- if match:
208
- match = match[self.group_select]
209
- if isinstance(match, tuple):
210
- match = [m for m in match if m][0]
211
- match = match.strip()
212
- if match and match in convert_dict:
213
- match = convert_dict[match]
214
- return match
215
-
216
-
217
- # Designed for the AI2D/RealworldQA dataset
218
- class SimpleMultiChoiceRegexFilter(ExtendedRegexFilter):
219
- def __init__(self, *args, **kwargs):
220
- """
221
- regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
222
- - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
223
- - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
224
- group_select: Selects the (group_select)th match from the findall result.
225
- ignore_case: Ignores the case during step 1 matching
226
- ignore_punctuation: Remove the punctuation during step 1 matching
227
- regexes_to_ignore: Remove these regexes during step 1 matching
228
- """
229
- super().__init__(*args, **kwargs)
230
-
231
- def apply(self, resps, docs):
232
- # here, we assume we have a list, in which each element is
233
- # a list of model responses for some particular input/target pair.
234
- # so we process each of these (same input/target response sets)
235
- # independently (and keep them a list.)
236
-
237
- filtered_resps = []
238
-
239
- for r, doc in zip(resps, docs):
240
- fallback_regexes = []
241
- choice_to_alpha = {}
242
- next_alpha = "A"
243
-
244
- without_paren_fallback_regexes = []
245
- without_paren_to_target = {}
246
-
247
- # Regex to extract multiple choice options from the question
248
- multiple_choices_regex = re.compile(r"\b([A-Z])\.\s+([^\n]*)")
249
- matches = multiple_choices_regex.findall(doc["question"])
250
-
251
- # Build regex patterns and mappings for each choice
252
- for m in matches:
253
- choice_text = m[1].strip()
254
- fallback_regexes.append(f"{re.escape(choice_text)}")
255
- choice_to_alpha[choice_text] = next_alpha
256
-
257
- next_alpha = chr(ord(next_alpha) + 1)
258
-
259
- # Compile regex to match any of the extracted choices
260
- fallback_regex = re.compile("|".join(fallback_regexes))
261
-
262
- # Process each response
263
- filtered = []
264
- for resp in r:
265
- # Remove any punctuation and extra spaces
266
- cleaned_resp = re.sub(r"[^\w\s]", "", resp).strip()
267
- # Try to match cleaned response with the choice text
268
- match = fallback_regex.search(cleaned_resp)
269
- if match and match.group() in choice_to_alpha:
270
- # Map the matched choice text back to its corresponding letter
271
- filtered.append(choice_to_alpha[match.group()])
272
- else:
273
- # If no match, return the cleaned response
274
- filtered.append(cleaned_resp)
275
-
276
- filtered_resps.append(filtered[0])
277
-
278
- return filtered_resps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/filters/selection.py DELETED
@@ -1,48 +0,0 @@
1
- from collections import Counter
2
-
3
- from lmms_eval.api.filter import Filter
4
-
5
-
6
- class TakeFirstFilter(Filter):
7
- def __init__(self) -> None:
8
- """
9
- Can define custom behavior here, if an individual instantiation of a Filter class should have state.
10
- """
11
-
12
- def apply(self, resps, docs):
13
- """
14
- Assuming each entry of `resps` is a list of model responses, we discard all but the first response.
15
- """
16
- return map(lambda r: r[0], resps)
17
-
18
-
19
- class TakeKFilter(Filter):
20
- def __init__(self, *args, **kwargs) -> None:
21
- self.k = kwargs.pop("k")
22
-
23
- super().__init__(*args, **kwargs)
24
-
25
- def apply(self, resps, docs):
26
- # check we have at least k responses per doc, else we can't take the first k
27
- assert len(resps[0]) >= self.k, f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ."
28
- return map(lambda r: r[: self.k], resps)
29
-
30
-
31
- class MajorityVoteFilter(Filter):
32
- def __init__(self) -> None:
33
- """
34
- Can define custom behavior here, if an individual instantiation of a Filter class should have state.
35
- """
36
-
37
- def apply(self, resps, docs):
38
- """
39
- Each entry of `resps` is a list of model responses.
40
- We select the response that occurs most frequently in each entry of `resps`.
41
- """
42
-
43
- def select_majority(resp):
44
- counts = Counter(resp)
45
- vote = counts.most_common(1)[0][0]
46
- return vote
47
-
48
- return map(lambda r: [select_majority(r)], resps)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/filters/transformation.py DELETED
@@ -1,48 +0,0 @@
1
- from lmms_eval.api.filter import Filter
2
-
3
-
4
- class LowercaseFilter(Filter):
5
- def __init__(self) -> None:
6
- pass
7
-
8
- def apply(self, resps, docs):
9
- def filter_set(inst):
10
- return [resp.lower() for resp in inst]
11
-
12
- return [filter_set(resp) for resp in resps]
13
-
14
-
15
- class UppercaseFilter(Filter):
16
- def __init__(self) -> None:
17
- pass
18
-
19
- def apply(self, resps, docs):
20
- def filter_set(inst):
21
- return [resp.upper() for resp in inst]
22
-
23
- return [filter_set(resp) for resp in resps]
24
-
25
-
26
- class MapFilter(Filter):
27
- def __init__(self, mapping_dict: dict = {}, default_value=None) -> None:
28
- """
29
- Initializes the MapFilter with a given mapping dictionary and default value.
30
-
31
- Args:
32
- - mapping_dict (dict): A dictionary containing the key-value mappings.
33
- Default is an empty dictionary.
34
- - default_value (Any): The value to be returned when a key is not found in the mapping_dict.
35
- Default is None.
36
-
37
- Example:
38
- mapper = MapFilter({'A': 1, 'B': 2}, default_value=0)
39
- """
40
- assert isinstance(mapping_dict, dict), "Provided mapping_dict is not a dictionary"
41
- self.mapping_dict = mapping_dict
42
- self.default_value = default_value
43
-
44
- def apply(self, resps, docs):
45
- def filter_set(inst):
46
- return [self.mapping_dict.get(resp, self.default_value) for resp in inst]
47
-
48
- return [filter_set(resp) for resp in resps]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/logging_utils.py DELETED
@@ -1,367 +0,0 @@
1
- # Code mostly from: https://github.com/EleutherAI/lm-evaluation-harness/pull/1339, credit to: https://github.com/ayulockin
2
- import copy
3
-
4
- import re
5
- import os
6
- import json
7
- import glob
8
- import pandas as pd
9
- import numpy as np
10
- from datetime import datetime
11
- from typing import Any, Dict, List, Literal, Tuple, Union
12
- from packaging.version import Version
13
- from lmms_eval import utils
14
- import tenacity
15
- from loguru import logger
16
-
17
- try:
18
- import wandb
19
-
20
- assert Version(wandb.__version__) >= Version("0.13.6")
21
- if Version(wandb.__version__) < Version("0.13.6"):
22
- wandb.require("report-editing:v0")
23
- except Exception as e:
24
- logger.warning("To use the wandb reporting functionality please install wandb>=0.13.6.\n" "To install the latest version of wandb run `pip install wandb --upgrade`\n" f"{e}")
25
-
26
-
27
- def remove_none_pattern(input_string):
28
- # Define the pattern to match ',none' at the end of the string
29
- pattern = re.compile(r",none$")
30
-
31
- # Use sub() to replace ',none' with an empty string
32
- result = re.sub(pattern, "", input_string)
33
-
34
- # check if the input_string changed
35
- removed = result != input_string
36
-
37
- return result, removed
38
-
39
-
40
- def _handle_non_serializable(o: Any) -> Union[int, str, list]:
41
- """Handle non-serializable objects by converting them to serializable types.
42
-
43
- Args:
44
- o (Any): The object to be handled.
45
-
46
- Returns:
47
- Union[int, str, list]: The converted object. If the object is of type np.int64 or np.int32,
48
- it will be converted to int. If the object is of type set, it will be converted
49
- to a list. Otherwise, it will be converted to str.
50
- """
51
- if isinstance(o, np.int64) or isinstance(o, np.int32):
52
- return int(o)
53
- elif isinstance(o, set):
54
- return list(o)
55
- else:
56
- return str(o)
57
-
58
-
59
- def get_wandb_printer() -> Literal["Printer"]:
60
- """Returns a wandb printer instance for pretty stdout."""
61
- from wandb.sdk.lib.printer import get_printer
62
- from wandb.sdk.wandb_settings import Settings
63
-
64
- printer = get_printer(Settings()._jupyter)
65
- return printer
66
-
67
-
68
- # class WandbLogger:
69
- class WandbLogger:
70
- def __init__(self, args):
71
- self.wandb_args = utils.simple_parse_args_string(args.wandb_args)
72
- self.args = args
73
- self.all_args_dict = vars(args)
74
- self.printer = get_wandb_printer()
75
- try:
76
- self.init_run()
77
- except Exception as e:
78
- logger.warning(f"Failed to initialize W&B run: {e}")
79
- os.environ["WANDB_MODE"] = "offline"
80
- self.init_run()
81
-
82
- def finish(self):
83
- self.run.finish()
84
-
85
- @tenacity.retry(wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_attempt(5))
86
- def init_run(self):
87
- if "name" not in self.wandb_args:
88
- if "config" in self.all_args_dict and self.all_args_dict["config"] != "":
89
- self.wandb_args["name"] = self.all_args_dict["config"].split("/")[-1].replace(".yaml", "") + "/" + self.args.log_samples_suffix
90
- else:
91
- task_names = self.args.tasks.replace(",", "/")
92
- self.wandb_args["name"] = f"{self.args.model}/<{task_names}>/{self.args.log_samples_suffix}"
93
- if self.args.num_fewshot:
94
- self.wandb_args["name"] += f"_{self.args.num_fewshot}shot"
95
- if "project" not in self.wandb_args:
96
- self.wandb_args["project"] = "lmms-eval"
97
- # initialize a W&B run
98
- self.run = wandb.init(**self.wandb_args)
99
-
100
- def post_init(self, results: Dict[str, Any]) -> None:
101
- self.results: Dict[str, Any] = copy.deepcopy(results)
102
- self.task_names: List[str] = list(results.get("results", {}).keys())
103
- self.group_names: List[str] = list(results.get("groups", {}).keys())
104
-
105
- def _get_config(self) -> Dict[str, Any]:
106
- """Get configuration parameters."""
107
- self.task_configs = self.results.get("configs", {})
108
- cli_configs = self.results.get("config", {})
109
- configs = {
110
- "task_configs": self.task_configs,
111
- "cli_configs": cli_configs,
112
- }
113
-
114
- return configs
115
-
116
- def _sanitize_results_dict(self) -> Tuple[Dict[str, str], Dict[str, Any]]:
117
- """Sanitize the results dictionary."""
118
- _results = copy.deepcopy(self.results.get("results", dict()))
119
- _results["model_configs"] = self.results.get("model_configs", dict())
120
-
121
- # Remove None from the metric string name
122
- tmp_results = copy.deepcopy(_results)
123
- for task_name in self.task_names:
124
- task_result = tmp_results.get(task_name, dict())
125
- for metric_name, metric_value in task_result.items():
126
- _metric_name, removed = remove_none_pattern(metric_name)
127
- if removed:
128
- _results[task_name][_metric_name] = metric_value
129
- _results[task_name].pop(metric_name)
130
-
131
- # remove string valued keys from the results dict
132
- wandb_summary = {}
133
- for task in self.task_names:
134
- task_result = _results.get(task, dict())
135
- for metric_name, metric_value in task_result.items():
136
- if isinstance(metric_value, str):
137
- wandb_summary[f"{task}/{metric_name}"] = metric_value
138
-
139
- wandb_summary["model_configs"] = self.results.get("model_configs", dict())
140
- for summary_metric, summary_value in wandb_summary.items():
141
- if summary_metric != "model_configs":
142
- _task, _summary_metric = summary_metric.split("/")
143
- _results[_task].pop(_summary_metric)
144
-
145
- tmp_results = copy.deepcopy(_results)
146
- for task_name, task_results in tmp_results.items():
147
- if task_name != "model_configs":
148
- for metric_name, metric_value in task_results.items():
149
- _results[f"{task_name}/{metric_name}"] = metric_value
150
- _results[task_name].pop(metric_name)
151
- for task in self.task_names:
152
- _results.pop(task)
153
-
154
- return wandb_summary, _results
155
-
156
- def _log_results_as_table(self) -> None:
157
- """Generate and log evaluation results as a table to W&B."""
158
- columns = [
159
- "Model",
160
- "Args",
161
- "Tasks",
162
- "Version",
163
- "Filter",
164
- "num_fewshot",
165
- "Metric",
166
- "Value",
167
- "Stderr",
168
- ]
169
-
170
- def make_table(columns: List[str], key: str = "results"):
171
- table = wandb.Table(columns=columns)
172
- results = copy.deepcopy(self.results)
173
-
174
- model_name = results.get("model_configs").get("model")
175
- model_args = results.get("model_configs").get("model_args")
176
-
177
- for k, dic in results.get(key).items():
178
- if k in self.group_names and not key == "groups":
179
- continue
180
- version = results.get("versions").get(k)
181
- if version == "N/A":
182
- version = None
183
- n = results.get("n-shot").get(k)
184
-
185
- for (mf), v in dic.items():
186
- m, _, f = mf.partition(",")
187
- if m.endswith("_stderr"):
188
- continue
189
- if m == "alias":
190
- continue
191
-
192
- if m + "_stderr" + "," + f in dic:
193
- se = dic[m + "_stderr" + "," + f]
194
- if se != "N/A":
195
- se = "%.4f" % se
196
- data = [model_name, model_args, k, version, f, n, m, str(v), str(se)]
197
- if key == "groups":
198
- data = [self.group_names] + data
199
- table.add_data(*data)
200
- else:
201
- data = [model_name, model_args, k, version, f, n, m, str(v), ""]
202
- if key == "groups":
203
- data = [self.group_names] + data
204
- table.add_data(*data)
205
-
206
- return table
207
-
208
- # log the complete eval result to W&B Table
209
- table = make_table(columns, "results")
210
- self.run.log({"evaluation/eval_results": table})
211
-
212
- if "groups" in self.results.keys():
213
- table = make_table(["Groups"] + columns, "groups")
214
- self.run.log({"evaluation/group_eval_results": table})
215
-
216
- def _log_results_as_artifact(self) -> None:
217
- """Log results as JSON artifact to W&B."""
218
- dumped = json.dumps(self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False)
219
- artifact = wandb.Artifact("results", type="eval_results")
220
- with artifact.new_file("results.json", mode="w", encoding="utf-8") as f:
221
- f.write(dumped)
222
- self.run.log_artifact(artifact)
223
-
224
- def log_eval_result(self) -> None:
225
- """Log evaluation results to W&B."""
226
- # Log configs to wandb
227
- configs = self._get_config()
228
- self.run.config.update(configs, allow_val_change=True)
229
-
230
- wandb_summary, self.wandb_results = self._sanitize_results_dict()
231
- # update wandb.run.summary with items that were removed
232
- self.run.summary.update(wandb_summary)
233
- # Log the evaluation metrics to wandb
234
- self.run.log(self.wandb_results)
235
- # Log the evaluation metrics as W&B Table
236
- self._log_results_as_table()
237
- # Log the results dict as json to W&B Artifacts
238
- self._log_results_as_artifact()
239
-
240
- def _generate_dataset(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> pd.DataFrame:
241
- """Generate a dataset from evaluation data.
242
-
243
- Args:
244
- data (List[Dict[str, Any]]): The data to generate a dataset for.
245
- config (Dict[str, Any]): The configuration of the task.
246
-
247
- Returns:
248
- pd.DataFrame: A dataframe that is ready to be uploaded to W&B.
249
- """
250
- ids = [x["doc_id"] for x in data]
251
- labels = [x["target"] for x in data]
252
- instance = [""] * len(ids)
253
- resps = [""] * len(ids)
254
- filtered_resps = [""] * len(ids)
255
- model_outputs = {}
256
-
257
- metrics_list = config["metric_list"]
258
- metrics = {}
259
- for metric in metrics_list:
260
- metric = metric.get("metric")
261
- if metric in ["word_perplexity", "byte_perplexity", "bits_per_byte"]:
262
- metrics[f"{metric}_loglikelihood"] = [x[metric][0] for x in data]
263
- if metric in ["byte_perplexity", "bits_per_byte"]:
264
- metrics[f"{metric}_bytes"] = [x[metric][1] for x in data]
265
- else:
266
- metrics[f"{metric}_words"] = [x[metric][1] for x in data]
267
- else:
268
- metrics[metric] = [x[metric] for x in data]
269
-
270
- if config["output_type"] == "loglikelihood":
271
- instance = [x["arguments"][0][0] for x in data]
272
- labels = [x["arguments"][0][1] for x in data]
273
- resps = [f'log probability of continuation is {x["resps"][0][0][0]} ' + "\n\n" + "continuation will {} generated with greedy sampling".format("not be" if not x["resps"][0][0][1] else "be") for x in data]
274
- filtered_resps = [f'log probability of continuation is {x["filtered_resps"][0][0]} ' + "\n\n" + "continuation will {} generated with greedy sampling".format("not be" if not x["filtered_resps"][0][1] else "be") for x in data]
275
- elif config["output_type"] == "multiple_choice":
276
- instance = [x["arguments"][0][0] for x in data]
277
- choices = ["\n".join([f"{idx}. {y[1]}" for idx, y in enumerate(x["arguments"])]) for x in data]
278
- resps = [np.argmax([n[0][0] for n in x["resps"]]) for x in data]
279
- filtered_resps = [np.argmax([n[0] for n in x["filtered_resps"]]) for x in data]
280
- elif config["output_type"] == "generate_until":
281
- instance = [x["arguments"][0][0] for x in data]
282
- resps = [x["resps"][0][0] for x in data]
283
- filtered_resps = [x["filtered_resps"][0] for x in data]
284
-
285
- model_outputs["raw_predictions"] = resps
286
- model_outputs["filtered_predictions"] = filtered_resps
287
-
288
- df_data = {
289
- "id": ids,
290
- "data": instance,
291
- }
292
- if config["output_type"] == "multiple_choice":
293
- df_data["choices"] = choices
294
-
295
- tmp_data = {
296
- "input_len": [len(x) for x in instance],
297
- "labels": labels,
298
- "output_type": config["output_type"],
299
- }
300
- df_data.update(tmp_data)
301
- df_data.update(model_outputs)
302
- df_data.update(metrics)
303
-
304
- return pd.DataFrame(df_data)
305
-
306
- def _log_samples_as_artifact(self, data: List[Dict[str, Any]], task_name: str) -> None:
307
- # log the samples as an artifact
308
- dumped = json.dumps(
309
- data,
310
- indent=2,
311
- default=_handle_non_serializable,
312
- ensure_ascii=False,
313
- )
314
- artifact = wandb.Artifact(f"{task_name}", type="samples_by_task")
315
- with artifact.new_file(f"{task_name}_eval_samples.json", mode="w", encoding="utf-8") as f:
316
- f.write(dumped)
317
- self.run.log_artifact(artifact)
318
- # artifact.wait()
319
-
320
- def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None:
321
- """Log evaluation samples to W&B.
322
-
323
- Args:
324
- samples (Dict[str, List[Dict[str, Any]]]): Evaluation samples for each task.
325
- """
326
- task_names: List[str] = [x for x in self.task_names if x not in self.group_names]
327
-
328
- ungrouped_tasks = []
329
- tasks_by_groups = {}
330
-
331
- for task_name in task_names:
332
- group_names = self.task_configs[task_name].get("group", None)
333
- if group_names:
334
- if isinstance(group_names, str):
335
- group_names = [group_names]
336
-
337
- for group_name in group_names:
338
- if not tasks_by_groups.get(group_name):
339
- tasks_by_groups[group_name] = [task_name]
340
- else:
341
- tasks_by_groups[group_name].append(task_name)
342
- else:
343
- ungrouped_tasks.append(task_name)
344
-
345
- for task_name in ungrouped_tasks:
346
- eval_preds = samples[task_name]
347
-
348
- # log the samples as a W&B Table
349
- df = self._generate_dataset(eval_preds, self.task_configs.get(task_name))
350
- self.run.log({f"{task_name}_eval_results": df})
351
-
352
- # log the samples as a json file as W&B Artifact
353
- self._log_samples_as_artifact(eval_preds, task_name)
354
-
355
- for group, grouped_tasks in tasks_by_groups.items():
356
- grouped_df = pd.DataFrame()
357
- for task_name in grouped_tasks:
358
- eval_preds = samples[task_name]
359
- df = self._generate_dataset(eval_preds, self.task_configs.get(task_name))
360
- df["group"] = group
361
- df["task"] = task_name
362
- grouped_df = pd.concat([grouped_df, df], ignore_index=True)
363
-
364
- # log the samples as a json file as W&B Artifact
365
- self._log_samples_as_artifact(eval_preds, task_name)
366
-
367
- self.run.log({f"{group}_eval_results": grouped_df})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/__init__.py DELETED
@@ -1,40 +0,0 @@
1
- from loguru import logger
2
- import sys
3
-
4
- logger.remove()
5
- logger.add(sys.stdout, level="WARNING")
6
-
7
- AVAILABLE_MODELS = {
8
- "llava": "Llava",
9
- "qwen_vl": "Qwen_VL",
10
- "fuyu": "Fuyu",
11
- "batch_gpt4": "BatchGPT4",
12
- "gpt4v": "GPT4V",
13
- "instructblip": "InstructBLIP",
14
- "minicpm_v": "MiniCPM_V",
15
- "llava_vid": "LlavaVid",
16
- "videoChatGPT": "VideoChatGPT",
17
- "llama_vid": "LLaMAVid",
18
- "video_llava": "VideoLLaVA",
19
- "xcomposer2_4KHD": "XComposer2_4KHD",
20
- "claude": "Claude",
21
- "qwen_vl_api": "Qwen_VL_API",
22
- "llava_sglang": "LlavaSglang",
23
- "idefics2": "Idefics2",
24
- "internvl": "InternVLChat",
25
- "gemini_api": "GeminiAPI",
26
- "reka": "Reka",
27
- "from_log": "FromLog",
28
- "mplug_owl_video": "mplug_Owl",
29
- "phi3v": "Phi3v",
30
- "tinyllava": "TinyLlava",
31
- "llava_hf": "LlavaHf",
32
- "longva": "LongVA",
33
- }
34
-
35
- for model_name, model_class in AVAILABLE_MODELS.items():
36
- try:
37
- exec(f"from .{model_name} import {model_class}")
38
- except ImportError as e:
39
- # logger.warning(f"Failed to import {model_class} from {model_name}: {e}")
40
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/batch_gpt4.py DELETED
@@ -1,204 +0,0 @@
1
- # Standard library imports
2
- from copy import deepcopy
3
- from io import BytesIO
4
- import base64
5
-
6
- import os
7
- import time
8
- import json
9
-
10
- # Related third-party imports
11
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
12
- from accelerate.state import AcceleratorState
13
- import numpy as np
14
- from PIL import Image
15
- import requests as url_requests
16
- from tqdm import tqdm
17
- from openai import OpenAI
18
-
19
- # Local application/library specific imports
20
- from lmms_eval.api.instance import Instance
21
- from lmms_eval.api.model import lmms
22
- from lmms_eval.api.registry import register_model
23
- from loguru import logger as eval_logger
24
-
25
- # Conditional imports
26
- try:
27
- from decord import VideoReader, cpu
28
- except ImportError:
29
- eval_logger.warning("Decord is not installed. Video input will not be supported.")
30
-
31
- # Constants and global configurations
32
- API_TYPE = os.getenv("API_TYPE", "openai")
33
- NUM_SECONDS_TO_SLEEP = 5
34
-
35
- if API_TYPE == "openai":
36
- API_URL = os.getenv("OPENAI_API_URL", "https://api.openai.com/v1/chat/completions")
37
- API_KEY = os.getenv("OPENAI_API_KEY", "YOUR_API_KEY")
38
- headers = {
39
- "Authorization": f"Bearer {API_KEY}",
40
- "Content-Type": "application/json",
41
- }
42
- elif API_TYPE == "azure":
43
- API_URL = os.getenv("AZURE_ENDPOINT", "https://api.cognitive.microsoft.com/sts/v1.0/issueToken")
44
- API_KEY = os.getenv("AZURE_API_KEY", "YOUR_API_KEY")
45
- headers = {
46
- "api-key": API_KEY,
47
- "Content-Type": "application/json",
48
- }
49
- else:
50
- API_URL = "YOUR_API_URL"
51
- API_KEY = "YOUR_API_KEY"
52
-
53
-
54
- @register_model("batch_gpt4")
55
- class BatchGPT4(lmms):
56
- def __init__(
57
- self,
58
- model_version: str = "gpt-4o",
59
- api_key: str = API_KEY,
60
- api_url: str = API_URL,
61
- modality: str = "image",
62
- max_frames_for_video: int = 10,
63
- timeout: int = 120,
64
- **kwargs,
65
- ) -> None:
66
- super().__init__()
67
- # Manually set a image token for GPT4V so that we can search for it
68
- # and split the text and image
69
- # Here we just use the same token as llava for convenient
70
- self.model_version = model_version
71
- self.modality = modality
72
- self.max_frames_for_video = max_frames_for_video
73
- self.image_token = "<image>"
74
- self.timeout = timeout
75
-
76
- self.api_key = api_key
77
- self.api_url = api_url
78
- self.client = OpenAI(api_key=api_key)
79
-
80
- accelerator = Accelerator()
81
- assert accelerator.state.local_process_index == 0, "BatchGPT4 does not support distributed inference."
82
- assert accelerator.state.num_processes == 1, "BatchGPT4 does not support distributed inference."
83
-
84
- # Function to encode the image
85
- def encode_image(self, image: Image):
86
- output_buffer = BytesIO()
87
- image.save(output_buffer, format="PNG")
88
- byte_data = output_buffer.getvalue()
89
- base64_str = base64.b64encode(byte_data).decode("utf-8")
90
- return base64_str
91
-
92
- # Function to encode the video
93
- def encode_video(self, video_path, for_get_frames_num):
94
- vr = VideoReader(video_path, ctx=cpu(0))
95
- total_frame_num = len(vr)
96
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, for_get_frames_num, dtype=int)
97
- frame_idx = uniform_sampled_frames.tolist()
98
- frames = vr.get_batch(frame_idx).asnumpy()
99
-
100
- base64_frames = []
101
- for frame in frames:
102
- img = Image.fromarray(frame)
103
- output_buffer = BytesIO()
104
- img.save(output_buffer, format="PNG")
105
- byte_data = output_buffer.getvalue()
106
- base64_str = base64.b64encode(byte_data).decode("utf-8")
107
- base64_frames.append(base64_str)
108
-
109
- return base64_frames
110
-
111
- def flatten(self, input):
112
- new_list = []
113
- for i in input:
114
- for j in i:
115
- new_list.append(j)
116
- return new_list
117
-
118
- def generate_until(self, requests):
119
- # Prepare the batch requests data
120
- requests_data = {}
121
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Batch Preparing")
122
- for idx, (contexts, gen_kwargs, doc_to_visual, doc_id, task, split) in enumerate([reg.args for reg in requests]):
123
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
124
- visuals = self.flatten(visuals)
125
- imgs = []
126
- for visual in visuals:
127
- if self.modality == "image":
128
- img = self.encode_image(visual)
129
- imgs.append(img)
130
- elif self.modality == "video":
131
- frames = self.encode_video(visual, self.max_frames_for_video)
132
- imgs.extend(frames)
133
-
134
- messages = []
135
- if self.image_token not in contexts:
136
- messages.append({"role": "user", "content": contexts})
137
- for img in imgs:
138
- messages.append({"role": "user", "content": f"data:image/jpeg;base64,{img}"})
139
- else:
140
- contexts_split = contexts.split(self.image_token)
141
- for idx, context in enumerate(contexts_split):
142
- if idx < len(imgs):
143
- messages.append({"role": "user", "content": context})
144
- messages.append({"role": "user", "content": f"data:image/jpeg;base64,{imgs[idx]}"})
145
- if len(contexts_split) > len(imgs):
146
- messages.append({"role": "user", "content": contexts_split[-1]})
147
-
148
- requests_data[f"request-{idx}"] = {"model": self.model_version, "messages": messages, "max_tokens": gen_kwargs.get("max_new_tokens", 1024)}
149
- pbar.update(1)
150
-
151
- file_path = os.getenv("HF_HOME", "~/.cache/huggingface") + f"/batchinput_{len(requests_data)}.jsonl"
152
- file_path = self.create_batch_input_file(requests_data, file_path)
153
- file_id = self.upload_input_file(file_path)
154
-
155
- batch_response = self.create_batch(file_id, metadata={"description": "Batch Processing for GPT-4"})
156
- batch_status = self.check_batch_status(batch_response.id)
157
- while True:
158
- batch_status = self.check_batch_status(batch_response.id)
159
- if batch_status.status == "completed":
160
- eval_logger.info("Batch processing completed.")
161
- batch_results = self.retrieve_batch_results(batch_status.output_file_id)
162
- res = [result["response"]["choices"][0]["message"]["content"] for result in json.loads(batch_results)]
163
- return res
164
- elif batch_status.status == "failed":
165
- eval_logger.info("Batch processing failed.")
166
- res = ["Batch failed"] * len(requests)
167
- return res
168
- else:
169
- eval_logger.info(f"Batch status: {batch_status.status}. Retrying in {NUM_SECONDS_TO_SLEEP} seconds.")
170
- time.sleep(NUM_SECONDS_TO_SLEEP)
171
-
172
- def loglikelihood(self, requests):
173
- # TODO
174
- assert False, "GPT4V not support"
175
-
176
- def create_batch_input_file(self, requests_data, file_path="batchinput.jsonl"):
177
- with open(file_path, "w") as file:
178
- for request_id, data in requests_data.items():
179
- json_record = json.dumps({"custom_id": request_id, "method": "POST", "url": "/v1/chat/completions", "body": data})
180
- file.write(json_record + "\n")
181
- return file_path
182
-
183
- def upload_input_file(self, file_path):
184
- with open(file_path, "rb") as file:
185
- response = self.client.files.create(file=file, purpose="batch")
186
- return response.id
187
-
188
- def create_batch(self, file_id, metadata=None):
189
- if metadata is None:
190
- metadata = {}
191
- response = self.client.batches.create(input_file_id=file_id, endpoint="/v1/chat/completions", completion_window="24h", metadata=metadata)
192
- return response
193
-
194
- def check_batch_status(self, batch_id):
195
- return self.client.batches.retrieve(batch_id)
196
-
197
- def retrieve_batch_results(self, file_id):
198
- return self.client.files.content(file_id)
199
-
200
- def cancel_batch(self, batch_id):
201
- return self.client.batches.cancel(batch_id)
202
-
203
- def list_batches(self, limit=10):
204
- return self.client.batches.list(limit=limit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/claude.py DELETED
@@ -1,256 +0,0 @@
1
- from io import BytesIO
2
- from copy import deepcopy
3
- import os
4
- import base64
5
- import json
6
- from typing import List, Tuple, Union
7
- from tqdm import tqdm
8
- import time
9
-
10
- from lmms_eval.api.instance import Instance
11
- from lmms_eval.api.model import lmms
12
- from lmms_eval.api.registry import register_model
13
-
14
- from accelerate import Accelerator, DistributedType
15
-
16
- from PIL import Image
17
-
18
- NUM_SECONDS_TO_SLEEP = 5
19
-
20
- from loguru import logger
21
-
22
- eval_logger = logger
23
-
24
- try:
25
- import anthropic
26
- from decord import VideoReader, cpu
27
- import numpy as np
28
- except Exception as e:
29
- eval_logger.warning(f"Error importing claude: {e}")
30
-
31
- API_URL = os.getenv("ANTHROPIC_API_URL", "https://api.anthropic.com/v1/complete")
32
- API_KEY = os.getenv("ANTHROPIC_API_KEY", "YOUR_API_KEY")
33
-
34
-
35
- @register_model("claude")
36
- class Claude(lmms):
37
- def __init__(
38
- self,
39
- model_version: str = "claude-3-opus-20240229",
40
- image_token: str = "<image>", # Use to separate interleaved image and text
41
- system_prompt: str = "", # Whether you want some special system prompt here
42
- modality: str = "image",
43
- continual_mode: bool = False,
44
- response_persistent_folder: str = None,
45
- **kwargs,
46
- ) -> None:
47
- super().__init__()
48
- self.model_version = model_version
49
- self.image_token = image_token
50
- self.system_prompt = system_prompt
51
- self.modality = modality
52
-
53
- self.continual_mode = continual_mode
54
- if self.continual_mode and response_persistent_folder is None:
55
- raise ValueError("Continual mode requires a persistent path for the response. Please provide a valid path.")
56
- self.response_persistent_folder = response_persistent_folder
57
- self.response_persistent_file = os.path.join(self.response_persistent_folder, f"{self.model_version}_response.json")
58
-
59
- if os.path.exists(self.response_persistent_file):
60
- with open(self.response_persistent_file, "r") as f:
61
- self.response_cache = json.load(f)
62
- self.cache_mode = "resume"
63
- else:
64
- self.response_cache = {}
65
- self.cache_mode = "start"
66
-
67
- accelerator = Accelerator()
68
- if accelerator.num_processes > 1:
69
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
70
- self.accelerator = accelerator
71
- if self.accelerator.is_local_main_process:
72
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
73
- self._rank = self.accelerator.local_process_index
74
- self._world_size = self.accelerator.num_processes
75
- else:
76
- self.accelerator = accelerator
77
- self._rank = self.accelerator.local_process_index
78
- self._world_size = self.accelerator.num_processes
79
-
80
- self.device = self.accelerator.device
81
-
82
- def encode_image(self, image):
83
- output_buffer = BytesIO()
84
- image.save(output_buffer, format="PNG")
85
- byte_data = output_buffer.getvalue()
86
- base64_str = base64.b64encode(byte_data).decode("utf-8")
87
- return base64_str
88
-
89
- def flatten(self, input):
90
- new_list = []
91
- for i in input:
92
- for j in i:
93
- new_list.append(j)
94
- return new_list
95
-
96
- def get_image_size(self, image):
97
- # Create a BytesIO object to store the image bytes
98
- img_byte_array = BytesIO()
99
-
100
- # Save the image to the BytesIO object
101
- image.save(img_byte_array, format="PNG")
102
-
103
- # Get the size of the BytesIO object
104
- img_size = img_byte_array.tell()
105
-
106
- return img_size
107
-
108
- # The max file size is 5MB for claude
109
- def shrink_image_to_file_size(self, img: Image, max_file_size=4838990) -> Image:
110
- # Get the current size of the image
111
- original_size = self.get_image_size(img)
112
-
113
- # If the image size is already smaller than the desired size, return
114
- if original_size <= max_file_size:
115
- return img
116
-
117
- # Calculate the ratio to shrink the image
118
- # Somehow I found out sqrt ratio is not enough to shrink the image
119
- # below threshold, so I guess we do more
120
- shrink_ratio = min(0.9, max_file_size / original_size)
121
-
122
- # Resize the image with the calculated ratio
123
- new_width = int(img.width * shrink_ratio)
124
- new_height = int(img.height * shrink_ratio)
125
- img = img.resize((new_width, new_height), Image.LANCZOS)
126
-
127
- return self.shrink_image_to_file_size(img, max_file_size)
128
-
129
- def encode_video(self, video_path):
130
- vr = VideoReader(video_path, ctx=cpu(0))
131
- total_frame_num = len(vr)
132
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, self.max_frames_for_video, dtype=int)
133
- frame_idx = uniform_sampled_frames.tolist()
134
- frames = vr.get_batch(frame_idx).asnumpy()
135
-
136
- base64_frames = []
137
- for frame in frames:
138
- img = Image.fromarray(frame)
139
- output_buffer = BytesIO()
140
- img.save(output_buffer, format="PNG")
141
- byte_data = output_buffer.getvalue()
142
- base64_str = base64.b64encode(byte_data).decode("utf-8")
143
- base64_frames.append(f"data:image/jpeg;base64,{base64_str}")
144
-
145
- return base64_frames
146
-
147
- def generate_until(self, requests) -> List[str]:
148
- client = anthropic.Anthropic()
149
-
150
- res = []
151
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
152
-
153
- empty_image_block = {
154
- "type": "image",
155
- "source": {
156
- "type": "base64",
157
- "media_type": "image/png",
158
- },
159
- }
160
- empty_text_block = {"type": "text"}
161
- empty_messages = [
162
- {
163
- "role": "user",
164
- "content": [],
165
- }
166
- ]
167
-
168
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
169
- ###################### CONTINUAL MODE ######################
170
- if self.continual_mode is True and self.cache_mode == "resume":
171
- doc_uuid = f"{task}___{split}___{doc_id}"
172
- if doc_uuid in self.response_cache:
173
- response_text = self.response_cache[doc_uuid]
174
- if response_text:
175
- res.append(response_text)
176
- pbar.update(1)
177
- continue
178
-
179
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
180
- visuals = self.flatten(visuals)
181
- imgs = []
182
- for visual in visuals:
183
- if isinstance(visual, str) and os.path.exists(visual): # Assuming visual is a path to a video
184
- visual = self.encode_video(visual)
185
- for img in visual:
186
- imgs.append(img)
187
- else:
188
- visual = self.shrink_image_to_file_size(visual)
189
- img = self.encode_image(visual)
190
- imgs.append(img)
191
-
192
- messages = deepcopy(empty_messages)
193
-
194
- if self.image_token not in contexts:
195
- for img in imgs:
196
- image_block = deepcopy(empty_image_block)
197
- image_block["source"]["data"] = img
198
- messages[0]["content"].append(image_block)
199
- text_block = deepcopy(empty_text_block)
200
- text_block["text"] = contexts
201
- messages[0]["content"].append(text_block)
202
- else:
203
- contexts = contexts.split(self.image_token)
204
- for idx, img in enumerate(imgs):
205
- text_block = deepcopy(empty_text_block)
206
- image_block = deepcopy(empty_image_block)
207
- text_block["text"] = contexts
208
- messages[0]["content"].append(text_block)
209
- image_block["source"]["data"] = img
210
- messages[0]["content"].append(image_block)
211
-
212
- # If n image tokens are in the contexts
213
- # contexts will be splitted into n+1 chunks
214
- # Manually add it into the messages
215
- text_block = deepcopy(empty_text_block)
216
- text_block["text"] = contexts
217
- messages["content"].append(text_block)
218
-
219
- if "max_new_tokens" not in gen_kwargs:
220
- gen_kwargs["max_new_tokens"] = 1024
221
- if "temperature" not in gen_kwargs:
222
- gen_kwargs["temperature"] = 0
223
- if "top_p" not in gen_kwargs:
224
- gen_kwargs["top_p"] = None
225
- if "num_beams" not in gen_kwargs:
226
- gen_kwargs["num_beams"] = 1
227
-
228
- for attempt in range(5):
229
- try:
230
- message = client.messages.create(model=self.model_version, max_tokens=gen_kwargs["max_new_tokens"], system=self.system_prompt, temperature=gen_kwargs["temperature"], top_p=gen_kwargs["top_p"], messages=messages)
231
- except Exception as e:
232
- eval_logger.info(f"Attempt {attempt + 1} failed with error: {str(e)}")
233
- if attempt < 5 - 1: # If we have retries left, sleep and then continue to next attempt
234
- time.sleep(NUM_SECONDS_TO_SLEEP)
235
- else: # If this was the last attempt, log and return empty
236
- eval_logger.error(f"All 5 attempts failed. Last error message: {str(e)}")
237
- res.append("")
238
- pbar.update(1)
239
- continue
240
-
241
- res.append(message.content[0].text)
242
- pbar.update(1)
243
-
244
- ###################### CONTINUAL MODE ######################
245
- if self.continual_mode is True: # Cache the response
246
- doc_uuid = f"{task}___{split}___{doc_id}"
247
- self.response_cache[doc_uuid] = response_text
248
- with open(self.response_persistent_file, "w") as f:
249
- json.dump(self.response_cache, f)
250
-
251
- pbar.close()
252
-
253
- return res
254
-
255
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
256
- assert False, "Not supported for claude"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/from_log.py DELETED
@@ -1,116 +0,0 @@
1
- import json
2
- import os
3
- import re
4
-
5
- from datetime import datetime
6
- from typing import List, Tuple
7
- from tqdm import tqdm
8
- from lmms_eval.api.registry import register_model
9
- from lmms_eval.api.model import lmms
10
- from lmms_eval.api.instance import Instance
11
- from accelerate import Accelerator, DistributedType
12
-
13
- from loguru import logger as eval_logger
14
-
15
-
16
- @register_model("from_log")
17
- class FromLog(lmms):
18
- def __init__(
19
- self,
20
- logs: str = "logs",
21
- model_name: str = None,
22
- model_args: str = None,
23
- have_limits: bool = False,
24
- **kwargs,
25
- ) -> None:
26
- super().__init__()
27
-
28
- self.logs = {}
29
-
30
- log_folders = logs.split(",")
31
-
32
- def matched_model(_model_args):
33
- if model_name and model_name != _model_args["model"]:
34
- return False
35
-
36
- if model_args:
37
- _model_args_list = model_args.split(",")
38
-
39
- for _model_arg in _model_args_list:
40
- if _model_arg not in _model_args["model_args"]:
41
- return False
42
-
43
- if not have_limits and _model_args["limit"] is not None:
44
- return False
45
-
46
- return True
47
-
48
- for log_folder in log_folders:
49
- for root, dirs, files in os.walk(log_folder):
50
- for file in files:
51
- if file.endswith(".json"):
52
- try:
53
- log_file = os.path.join(root, file)
54
-
55
- with open(log_file, "r") as f:
56
- log_data = json.load(f)
57
-
58
- # check if model is matched
59
- _model_args = log_data["args"]
60
- if not matched_model(_model_args):
61
- raise Exception("Model not matched")
62
-
63
- # load logs
64
- logs = {}
65
- for data in log_data["logs"]:
66
- id = data["doc_id"]
67
- response = data["resps"][0]
68
- logs[id] = response
69
-
70
- task = log_data["model_configs"]["task"]
71
-
72
- pattern = re.compile(r"\d{4}_\d{4}")
73
-
74
- if "time" in log_data:
75
- log_time = log_data["time"]
76
- elif pattern.search(os.path.abspath(log_file)):
77
- log_time = pattern.findall(os.path.abspath(log_file))[-1]
78
- else:
79
- log_time = "unknown"
80
-
81
- if task not in self.logs or (self.logs[task]["time"] == "unknown" or datetime.strptime(log_time, "%m%d_%H%M") > datetime.strptime(self.logs[task]["time"], "%m%d_%H%M")):
82
- self.logs[task] = {"time": log_time, "logs": logs}
83
-
84
- except Exception as e:
85
- pass
86
-
87
- accelerator = Accelerator()
88
- if accelerator.num_processes > 1:
89
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
90
- self.accelerator = accelerator
91
- if self.accelerator.is_local_main_process:
92
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
93
- self._rank = self.accelerator.local_process_index
94
- self._world_size = self.accelerator.num_processes
95
- else:
96
- self.accelerator = accelerator
97
- self._rank = self.accelerator.local_process_index
98
- self._world_size = self.accelerator.num_processes
99
-
100
- self.device = self.accelerator.device
101
-
102
- def generate_until(self, requests) -> List[str]:
103
- res = []
104
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
105
-
106
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
107
- response = self.logs[task]["logs"][doc_id]
108
- res.append(response[0])
109
- pbar.update(1)
110
-
111
- pbar.close()
112
- return res
113
-
114
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
115
- # TODO
116
- assert False, "not support"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/fuyu.py DELETED
@@ -1,261 +0,0 @@
1
- import warnings
2
-
3
- warnings.simplefilter("ignore", category=DeprecationWarning)
4
- warnings.filterwarnings("ignore")
5
-
6
- from accelerate import Accelerator, DistributedType
7
- from transformers import FuyuForCausalLM, AutoTokenizer, FuyuImageProcessor, FuyuProcessor
8
- from lmms_eval.api.model import lmms
9
- from lmms_eval.api.registry import register_model
10
- import torch
11
- from PIL import Image
12
- from typing import List, Optional, Union, Tuple
13
- from lmms_eval import utils
14
- from lmms_eval.api.instance import Instance
15
- from tqdm import tqdm
16
- from accelerate import Accelerator, DistributedType
17
- from accelerate.state import AcceleratorState
18
-
19
- from loguru import logger as eval_logger
20
-
21
-
22
- @register_model("fuyu")
23
- class Fuyu(lmms):
24
- """
25
- Fuyu Model
26
- """
27
-
28
- def __init__(
29
- self,
30
- pretrained: str = "adept/fuyu-8b",
31
- device: Optional[str] = "cuda",
32
- max_new_tokens: int = 256,
33
- batch_size: Optional[Union[int, str]] = 1,
34
- **kwargs,
35
- ) -> None:
36
- super().__init__()
37
- # Do not use kwargs for now
38
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
39
-
40
- accelerator = Accelerator()
41
- if accelerator.num_processes > 1:
42
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
43
- else:
44
- self._device = device
45
-
46
- self._model = FuyuForCausalLM.from_pretrained(pretrained, torch_dtype=torch.bfloat16, device_map=self.device)
47
- self.model.eval()
48
- self.model.tie_weights()
49
- self._tokenizer = AutoTokenizer.from_pretrained(pretrained)
50
- self._config = self.model.config
51
-
52
- self.image_processor = FuyuImageProcessor()
53
- self.processor = FuyuProcessor(image_processor=self.image_processor, tokenizer=self.tokenizer)
54
- self.max_new_tokens = max_new_tokens
55
- self.batch_size_per_gpu = int(batch_size)
56
- accelerator = Accelerator()
57
- if accelerator.num_processes > 1:
58
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
59
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
60
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
61
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
62
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
63
- kwargs = {
64
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
65
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
66
- }
67
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
68
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
69
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
70
- self._model = accelerator.prepare(self.model)
71
- else:
72
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
73
- self.accelerator = accelerator
74
- if self.accelerator.is_local_main_process:
75
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
76
- self._rank = self.accelerator.local_process_index
77
- self._world_size = self.accelerator.num_processes
78
- else:
79
- self.model.to(self._device)
80
- self._rank = 0
81
- self._word_size = 1
82
-
83
- """if accelerator.num_processes > 1:
84
- assert accelerator.distributed_type in [
85
- DistributedType.FSDP,
86
- DistributedType.MULTI_GPU,
87
- ], "Unsupported distributed type provided. Only DDP and FSDP are supported."
88
- if accelerator.distributed_type == DistributedType.FSDP:
89
- self._model = accelerator.prepare(self.model)
90
- else:
91
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
92
- self.accelerator = accelerator
93
- if self.accelerator.is_local_main_process:
94
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
95
- self._rank = self.accelerator.local_process_index
96
- self._world_size = self.accelerator.num_processes"""
97
-
98
- @property
99
- def config(self):
100
- # return the associated transformers.AutoConfig for the given pretrained model.
101
- return self._config
102
-
103
- @property
104
- def tokenizer(self):
105
- return self._tokenizer
106
-
107
- @property
108
- def model(self):
109
- # returns the model, unwrapping it if using Accelerate
110
- if hasattr(self, "accelerator"):
111
- return self.accelerator.unwrap_model(self._model)
112
- else:
113
- return self._model
114
-
115
- @property
116
- def eot_token_id(self):
117
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
118
- return self.tokenizer.eos_token_id
119
-
120
- @property
121
- def max_length(self):
122
- # Assuming max_length is the sum of max context tokens and max new tokens
123
- return self.tokenizer.model_max_length
124
-
125
- @property
126
- def batch_size(self):
127
- return self.batch_size_per_gpu
128
-
129
- @property
130
- def device(self):
131
- return self._device
132
-
133
- @property
134
- def rank(self):
135
- return self._rank
136
-
137
- @property
138
- def world_size(self):
139
- return self._world_size
140
-
141
- def flatten(self, input, only_get_first=False):
142
- new_list = []
143
- for i in input:
144
- for j in i:
145
- new_list.append(j)
146
- if only_get_first:
147
- break
148
- return new_list
149
-
150
- def generate_until(self, requests: List[Instance]) -> List[str]:
151
- res = []
152
-
153
- def _collate(x):
154
- # the negative sign on len(toks) sorts descending - this has a few advantages:
155
- # - time estimates will always be over not underestimates, which is more useful for planning
156
- # - to know the size of a batch when going through the list, you know the first one is always the batch
157
- # padded context length. this is useful to simplify the batching logic and more importantly to make
158
- # automatic adaptive batches much much easier to implement
159
- # - any OOMs will happen right away rather than near the end
160
- toks = self.tok_encode(x[0])
161
- return -len(toks), x[0]
162
-
163
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
164
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
165
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
166
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
167
-
168
- for chunk in chunks:
169
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
170
- task = task[0]
171
- split = split[0]
172
- visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id]
173
- visuals = self.flatten(visuals, only_get_first=True)
174
- gen_kwargs = all_gen_kwargs[0]
175
-
176
- # if isinstance(visuals[0], list):
177
- # visuals = [visuals[idx][0] for idx in range(len(visuals))] # get the first image in multi-image scenarios.
178
-
179
- # assert len(contexts) == self.batch_size_per_gpu, f"Expected contexts batch size {self.batch_size_per_gpu}, got {len(contexts)}"
180
- # assert len(visuals) == self.batch_size_per_gpu, f"Expected visuals batch size {self.batch_size_per_gpu}, got {len(visuals)}"
181
- formatted_contexts = [f"{context}\n" for context in contexts]
182
- model_inputs = self.processor(text=formatted_contexts, images=visuals, device=self.device)
183
- for k, v in model_inputs.items():
184
- model_inputs[k] = v.to(self.device, non_blocking=True) if isinstance(v, torch.Tensor) else [vv.to(self.device, non_blocking=True) for vv in v]
185
-
186
- for index in range(len(model_inputs["image_patches"])):
187
- model_inputs["image_patches"][index] = model_inputs["image_patches"][index].to(dtype=next(self.model.parameters()).dtype)
188
-
189
- # preconfigure gen_kwargs with defaults
190
- gen_kwargs["image_sizes"] = [visuals[idx].size for idx in range(len(visuals))]
191
- if "max_new_tokens" not in gen_kwargs:
192
- gen_kwargs["max_new_tokens"] = 256
193
- if "temperature" not in gen_kwargs:
194
- gen_kwargs["temperature"] = 0
195
- if "top_p" not in gen_kwargs:
196
- gen_kwargs["top_p"] = None
197
- if "num_beams" not in gen_kwargs:
198
- gen_kwargs["num_beams"] = 1
199
- # generation_output = self.model.generate(
200
- # **model_inputs, temperature=gen_kwargs["temperature"], max_new_tokens=gen_kwargs["max_new_tokens"], top_p=gen_kwargs["top_p"], num_beams=gen_kwargs["num_beams"], pad_token_id=self.tokenizer.eos_token_id
201
- # )
202
- generation_output = self.model.generate(**model_inputs, max_new_tokens=gen_kwargs["max_new_tokens"], pad_token_id=self.tokenizer.eos_token_id)
203
- generation_texts = self.processor.batch_decode(generation_output, skip_special_tokens=True)
204
- response = [gen_text.split("\x04")[1].strip(" ").strip("\n") for gen_text in generation_texts]
205
- res.extend(response)
206
- pbar.update(1)
207
-
208
- pbar.close()
209
- return res
210
-
211
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
212
- # TODO
213
- res = []
214
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
215
-
216
- for contexts, doc_to_target, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
217
- # encode, pad, and truncate contexts for this batch
218
- if type(doc_to_target) == str:
219
- continuation = doc_to_target
220
- else:
221
- continuation = doc_to_target(self.task_dict[task][split][doc_id])
222
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
223
- visuals = self.flatten(visuals)
224
- formatted_contexts = [f"{contexts}\n"]
225
- formatted_continuation = [f"{contexts}\n{continuation}"]
226
- model_inputs = self.processor(text=formatted_continuation, images=visuals, device=self.device)
227
- for k, v in model_inputs.items():
228
- model_inputs[k] = v.to(self.device, non_blocking=True) if isinstance(v, torch.Tensor) else [vv.to(self.device, non_blocking=True) for vv in v]
229
-
230
- for index in range(len(model_inputs["image_patches"])):
231
- model_inputs["image_patches"][index] = model_inputs["image_patches"][index].to(dtype=next(self.model.parameters()).dtype)
232
-
233
- labels = model_inputs["input_ids"].clone()
234
- contxt_id = self.processor(text=formatted_contexts, return_tensors="pt")["input_ids"]
235
- labels[: len(contxt_id)] = -100
236
- with torch.inference_mode():
237
- outputs = self.model(**model_inputs, labels=labels)
238
- loss = outputs["loss"]
239
- # loss = torch.exp(loss)
240
- logits = outputs["logits"]
241
- greedy_tokens = logits.argmax(dim=-1)
242
- cont_toks = model_inputs["input_ids"][:, contxt_id.shape[1] :] # [1, seq]
243
- greedy_tokens = greedy_tokens[:, contxt_id.shape[1] : model_inputs["input_ids"].shape[1]] # [1, seq]
244
- max_equal = (greedy_tokens == cont_toks).all()
245
- res.append((float(loss.item()), bool(max_equal)))
246
- pbar.update(1)
247
-
248
- pbar.close()
249
- return res
250
-
251
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
252
- """ """
253
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
254
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
255
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
256
- if left_truncate_len:
257
- encoding = encoding[-left_truncate_len:]
258
- return encoding
259
-
260
- def tok_decode(self, tokens):
261
- return self.tokenizer.decode(tokens)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/gemini_api.py DELETED
@@ -1,185 +0,0 @@
1
- import io
2
- import os
3
- import time
4
-
5
- import json
6
-
7
- from PIL import Image
8
- from typing import List, Tuple
9
- from tqdm import tqdm
10
- from lmms_eval.api.registry import register_model
11
- from lmms_eval.api.model import lmms
12
- from lmms_eval.api.instance import Instance
13
- from accelerate import Accelerator, DistributedType
14
-
15
- from loguru import logger as eval_logger
16
-
17
- try:
18
- import google.generativeai as genai
19
- from google.generativeai.types import HarmCategory, HarmBlockThreshold
20
-
21
- NUM_SECONDS_TO_SLEEP = 30
22
- GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
23
- genai.configure(api_key=GOOGLE_API_KEY)
24
-
25
- except Exception as e:
26
- eval_logger.error(f"Error importing generativeai: {str(e)}")
27
- genai = None
28
-
29
-
30
- @register_model("gemini_api")
31
- class GeminiAPI(lmms):
32
- def __init__(
33
- self,
34
- model_version: str = "gemini-1.5-flash-latest",
35
- modality: str = "image",
36
- timeout: int = 120,
37
- continual_mode: bool = False,
38
- response_persistent_folder: str = None, # We will cache the Gemini API response in this path and use it for future requests
39
- **kwargs,
40
- ) -> None:
41
- super().__init__()
42
- self.model_version = model_version
43
- self.timeout = timeout
44
- self.model = genai.GenerativeModel(model_version)
45
- self.continual_mode = continual_mode
46
- if self.continual_mode and response_persistent_folder is None:
47
- raise ValueError("Continual mode requires a persistent path for the response. We will cache the Gemini API response in this path and use it for future requests. Please provide a valid path.")
48
- self.response_persistent_folder = response_persistent_folder
49
- self.response_persistent_file = os.path.join(self.response_persistent_folder, f"{self.model_version}_response.json")
50
-
51
- if os.path.exists(self.response_persistent_file):
52
- with open(self.response_persistent_file, "r") as f:
53
- self.response_cache = json.load(f)
54
- self.cache_mode = "resume"
55
- else:
56
- self.response_cache = {}
57
- self.cache_mode = "start"
58
-
59
- accelerator = Accelerator()
60
- if accelerator.num_processes > 1:
61
- assert self.continual_mode is False, "Continual mode is not supported with distributed inference."
62
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
63
- self.accelerator = accelerator
64
- if self.accelerator.is_local_main_process:
65
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
66
- self._rank = self.accelerator.local_process_index
67
- self._world_size = self.accelerator.num_processes
68
- else:
69
- self.accelerator = accelerator
70
- self._rank = self.accelerator.local_process_index
71
- self._world_size = self.accelerator.num_processes
72
-
73
- self.device = self.accelerator.device
74
-
75
- self.modality = modality
76
-
77
- def flatten(self, input):
78
- new_list = []
79
- for i in input:
80
- for j in i:
81
- new_list.append(j)
82
- return new_list
83
-
84
- def get_image_size(self, image):
85
- # Create a BytesIO object to store the image bytes
86
- img_byte_array = io.BytesIO()
87
-
88
- # Save the image to the BytesIO object
89
- image.save(img_byte_array, format="PNG")
90
-
91
- # Get the size of the BytesIO object
92
- img_size = img_byte_array.tell()
93
-
94
- return img_size
95
-
96
- def encode_video(self, video_path):
97
- uploaded_obj = genai.upload_file(path=video_path)
98
- time.sleep(5)
99
- return uploaded_obj
100
-
101
- def convert_video(self, images):
102
- for idx, img in enumerate(images):
103
- if self.modality == "video" and isinstance(img, str):
104
- try:
105
- images[idx] = self.encode_video(img)
106
- except Exception as e:
107
- eval_logger.error(f"Error converting video: {str(e)}")
108
- return images
109
-
110
- def generate_until(self, requests) -> List[str]:
111
- res = []
112
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
113
-
114
- def get_uuid(task, split, doc_id):
115
- return f"{task}___{split}___{doc_id}"
116
-
117
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
118
- if self.continual_mode is True and self.cache_mode == "resume":
119
- doc_uuid = get_uuid(task, split, doc_id)
120
- if doc_uuid in self.response_cache:
121
- content = self.response_cache[doc_uuid]
122
- if content:
123
- res.append(content)
124
- pbar.update(1)
125
- continue
126
-
127
- if "max_new_tokens" not in gen_kwargs:
128
- gen_kwargs["max_new_tokens"] = 1024
129
- if "temperature" not in gen_kwargs:
130
- gen_kwargs["temperature"] = 0
131
-
132
- config = genai.GenerationConfig(
133
- max_output_tokens=gen_kwargs["max_new_tokens"],
134
- temperature=gen_kwargs["temperature"],
135
- )
136
-
137
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
138
- visuals = self.flatten(visuals)
139
- visuals = self.convert_video(visuals)
140
-
141
- message = [contexts] + visuals
142
-
143
- for attempt in range(5):
144
- try:
145
- content = self.model.generate_content(
146
- message,
147
- generation_config=config,
148
- safety_settings={
149
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
150
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
151
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
152
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
153
- },
154
- )
155
- content = content.text
156
- break
157
- except Exception as e:
158
- eval_logger.info(f"Attempt {attempt + 1} failed with error: {str(e)}")
159
- if isinstance(e, ValueError):
160
- try:
161
- eval_logger.info(f"Prompt feed_back: {content.prompt_feedback}")
162
- content = ""
163
- break
164
- except Exception:
165
- pass
166
- if attempt < 5 - 1: # If we have retries left, sleep and then continue to next attempt
167
- time.sleep(NUM_SECONDS_TO_SLEEP)
168
- else: # If this was the last attempt, log and return empty
169
- eval_logger.error(f"All 5 attempts failed. Last error message: {str(e)}")
170
- content = ""
171
- res.append(content)
172
- pbar.update(1)
173
-
174
- if self.continual_mode is True: # Cache the response
175
- doc_uuid = get_uuid(task, split, doc_id)
176
- self.response_cache[doc_uuid] = content
177
- with open(self.response_persistent_file, "w") as f:
178
- json.dump(self.response_cache, f)
179
-
180
- pbar.close()
181
- return res
182
-
183
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
184
- # TODO
185
- assert False, "Gemini API not support"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/gpt4v.py DELETED
@@ -1,191 +0,0 @@
1
- from io import BytesIO
2
- from copy import deepcopy
3
- import numpy as np
4
- import os
5
- import base64
6
- from typing import List, Tuple
7
- from tqdm import tqdm
8
- import requests as url_requests
9
- import time
10
-
11
-
12
- from lmms_eval.api.instance import Instance
13
- from lmms_eval.api.model import lmms
14
- from lmms_eval.api.registry import register_model
15
- from lmms_eval import utils
16
-
17
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
18
- from accelerate.state import AcceleratorState
19
-
20
- try:
21
- from decord import VideoReader, cpu
22
- except ImportError:
23
- pass
24
-
25
- from PIL import Image
26
-
27
- API_TYPE = os.getenv("API_TYPE", "openai")
28
- NUM_SECONDS_TO_SLEEP = 30
29
- from loguru import logger as eval_logger
30
-
31
- if API_TYPE == "openai":
32
- API_URL = os.getenv("OPENAI_API_URL", "https://api.openai.com/v1/chat/completions")
33
- API_KEY = os.getenv("OPENAI_API_KEY", "YOUR_API_KEY")
34
- headers = {
35
- "Authorization": f"Bearer {API_KEY}",
36
- "Content-Type": "application/json",
37
- }
38
- elif API_TYPE == "azure":
39
- API_URL = os.getenv("AZURE_ENDPOINT", "https://api.cognitive.microsoft.com/sts/v1.0/issueToken")
40
- API_KEY = os.getenv("AZURE_API_KEY", "YOUR_API_KEY")
41
- headers = {
42
- "api-key": API_KEY,
43
- "Content-Type": "application/json",
44
- }
45
-
46
-
47
- @register_model("gpt4v")
48
- class GPT4V(lmms):
49
- def __init__(
50
- self,
51
- model_version: str = "gpt-4-vision-preview",
52
- modality: str = "video",
53
- max_frames_for_video: int = 10,
54
- timeout: int = 120,
55
- **kwargs,
56
- ) -> None:
57
- super().__init__()
58
- # Manually set a image token for GPT4V so that we can search for it
59
- # and split the text and image
60
- # Here we just use the same token as llava for convenient
61
- self.model_version = model_version
62
- self.modality = modality
63
- self.max_frames_for_video = max_frames_for_video
64
- self.image_token = "<image>"
65
- self.timeout = timeout
66
-
67
- accelerator = Accelerator()
68
- # assert self.batch_size_per_gpu == 1, "Llava currently does not support batched generation. See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue."
69
- if accelerator.num_processes > 1:
70
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
71
- self.accelerator = accelerator
72
- if self.accelerator.is_local_main_process:
73
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
74
- self._rank = self.accelerator.local_process_index
75
- self._world_size = self.accelerator.num_processes
76
- else:
77
- self.accelerator = accelerator
78
- self._rank = self.accelerator.local_process_index
79
- self._world_size = self.accelerator.num_processes
80
-
81
- self.device = self.accelerator.device
82
-
83
- # Function to encode the image
84
- def encode_image(self, image: Image):
85
- output_buffer = BytesIO()
86
- image.save(output_buffer, format="PNG")
87
- byte_data = output_buffer.getvalue()
88
- base64_str = base64.b64encode(byte_data).decode("utf-8")
89
- return base64_str
90
-
91
- # Function to encode the video
92
- def encode_video(self, video_path, for_get_frames_num):
93
- vr = VideoReader(video_path, ctx=cpu(0))
94
- total_frame_num = len(vr)
95
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, for_get_frames_num, dtype=int)
96
- frame_idx = uniform_sampled_frames.tolist()
97
- frames = vr.get_batch(frame_idx).asnumpy()
98
-
99
- base64_frames = []
100
- for frame in frames:
101
- img = Image.fromarray(frame)
102
- output_buffer = BytesIO()
103
- img.save(output_buffer, format="PNG")
104
- byte_data = output_buffer.getvalue()
105
- base64_str = base64.b64encode(byte_data).decode("utf-8")
106
- base64_frames.append(base64_str)
107
-
108
- return base64_frames
109
-
110
- def flatten(self, input):
111
- new_list = []
112
- for i in input:
113
- for j in i:
114
- new_list.append(j)
115
- return new_list
116
-
117
- def generate_until(self, requests) -> List[str]:
118
- res = []
119
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
120
-
121
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
122
- # encode, pad, and truncate contexts for this batch
123
- # visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
124
- visuals = [doc_to_visual(self.task_dict[task][split][0])]
125
- visuals = self.flatten(visuals)
126
- imgs = [] # multiple images or frames for video
127
- for visual in visuals:
128
- if self.modality == "image":
129
- img = self.encode_image(visual)
130
- imgs.append(img)
131
- elif self.modality == "video":
132
- frames = self.encode_video(visual, self.max_frames_for_video)
133
- imgs.extend(frames)
134
-
135
- payload = {"model": self.model_version, "messages": []}
136
- response_json = {"role": "user", "content": []}
137
- # When there is no image token in the context, append the image to the text
138
- if self.image_token not in contexts:
139
- payload["messages"].append(deepcopy(response_json))
140
- payload["messages"][0]["content"].append({"type": "text", "text": contexts})
141
- for img in imgs:
142
- payload["messages"][0]["content"].append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
143
- else:
144
- contexts = contexts.split(self.image_token)
145
- for idx, img in enumerate(imgs):
146
- payload["messages"].append(deepcopy(response_json))
147
- payload["messages"][idx]["content"].append({"type": "text", "text": contexts[idx]})
148
- payload["messages"][idx]["content"].append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img}"}})
149
-
150
- # If n image tokens are in the contexts
151
- # contexts will be splitted into n+1 chunks
152
- # Manually add it into the payload
153
- payload["messages"].append(deepcopy(response_json))
154
- payload["messages"][-1]["content"].append({"type": "text", "text": contexts[-1]})
155
-
156
- if "max_new_tokens" not in gen_kwargs:
157
- gen_kwargs["max_new_tokens"] = 1024
158
- if "temperature" not in gen_kwargs:
159
- gen_kwargs["temperature"] = 0
160
- if "top_p" not in gen_kwargs:
161
- gen_kwargs["top_p"] = None
162
- if "num_beams" not in gen_kwargs:
163
- gen_kwargs["num_beams"] = 1
164
-
165
- payload["max_tokens"] = gen_kwargs["max_new_tokens"]
166
- payload["temperature"] = gen_kwargs["temperature"]
167
-
168
- for attempt in range(5):
169
- try:
170
- response = url_requests.post(API_URL, headers=headers, json=payload, timeout=self.timeout)
171
- response_data = response.json()
172
-
173
- content = response_data["choices"][0]["message"]["content"].strip()
174
- break # If successful, break out of the loop
175
-
176
- except Exception as e:
177
- eval_logger.info(f"Attempt {attempt + 1} failed with error: {str(e)}")
178
- if attempt < 5 - 1: # If we have retries left, sleep and then continue to next attempt
179
- time.sleep(NUM_SECONDS_TO_SLEEP)
180
- else: # If this was the last attempt, log and return empty
181
- eval_logger.error(f"All 5 attempts failed. Last error message: {str(e)}")
182
- eval_logger.error(f"Response: {response}")
183
- content = ""
184
- res.append(content)
185
- pbar.update(1)
186
- pbar.close()
187
- return res
188
-
189
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
190
- # TODO
191
- assert False, "GPT4V not support"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/idefics2.py DELETED
@@ -1,231 +0,0 @@
1
- import torch
2
-
3
- from tqdm import tqdm
4
- from lmms_eval import utils
5
- from lmms_eval.api.instance import Instance
6
- from lmms_eval.api.model import lmms
7
- from lmms_eval.api.registry import register_model
8
- from accelerate import Accelerator, DistributedType
9
- from accelerate.state import AcceleratorState
10
- from typing import List, Optional, Union, Tuple
11
- from transformers import Idefics2ForConditionalGeneration, AutoProcessor
12
-
13
- import warnings
14
-
15
- warnings.filterwarnings("ignore")
16
-
17
- from loguru import logger as eval_logger
18
-
19
- DEFAULT_IMAGE_TOKEN = "<image>"
20
- try:
21
- import flash_attn
22
-
23
- best_fit_attn_implementation = "flash_attention_2"
24
- except ImportError:
25
- best_fit_attn_implementation = "eager"
26
-
27
-
28
- @register_model("idefics2")
29
- class Idefics2(lmms):
30
- """
31
- Idefics2 Model for Hugging Face Transformers: https://github.com/huggingface/transformers/blob/main/src/transformers/models/idefics2/modeling_idefics2.py
32
-
33
- Example usage:
34
-
35
- accelerate launch --num_processes=8 -m lmms_eval \
36
- --model idefics2 \
37
- --model_args pretrained=HuggingFaceM4/idefics2-8b \
38
- --tasks mme \
39
- --batch_size 1 \
40
- --output_path ./logs/ \
41
- --log_samples
42
- """
43
-
44
- def __init__(
45
- self,
46
- pretrained: str = "HuggingFaceM4/idefics2-8b",
47
- revision: str = "main",
48
- device: str = "cuda",
49
- dtype: Optional[Union[str, torch.dtype]] = "float16",
50
- batch_size: int = 1,
51
- trust_remote_code: Optional[bool] = False,
52
- attn_implementation: Optional[str] = best_fit_attn_implementation,
53
- device_map: str = "",
54
- use_cache: bool = True,
55
- do_image_splitting: bool = False,
56
- **kwargs,
57
- ) -> None:
58
- super().__init__()
59
- # Do not use kwargs for now
60
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
61
-
62
- accelerator = Accelerator()
63
- if accelerator.num_processes > 1 and device_map == "":
64
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
65
- self.device_map = f"cuda:{accelerator.local_process_index}"
66
- else:
67
- self._device = torch.device(device)
68
- self.device_map = device_map
69
- if isinstance(dtype, str) and dtype != "auto":
70
- dtype = getattr(torch, dtype)
71
- self._model = Idefics2ForConditionalGeneration.from_pretrained(pretrained, revision=revision, torch_dtype=dtype, device_map=self.device_map, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation)
72
- self._processor = AutoProcessor.from_pretrained(pretrained, do_image_splitting=do_image_splitting, revision=revision, trust_remote_code=trust_remote_code)
73
-
74
- self._tokenizer = self._processor.tokenizer
75
- self._config = self._model.config
76
- self.batch_size_per_gpu = int(batch_size)
77
- self.use_cache = use_cache
78
- if accelerator.num_processes > 1 and device_map == "":
79
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
80
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
81
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
82
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
83
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
84
- kwargs = {
85
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
86
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
87
- }
88
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
89
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
90
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
91
- self._model = accelerator.prepare(self.model)
92
- else:
93
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
94
- self.accelerator = accelerator
95
- if self.accelerator.is_local_main_process:
96
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
97
- self._rank = self.accelerator.local_process_index
98
- self._world_size = self.accelerator.num_processes
99
- elif accelerator.num_processes == 1 and device_map == "auto":
100
- eval_logger.info(f"Using {accelerator.num_processes} devices with pipeline parallelism")
101
- self._rank = 0
102
- self._word_size = 1
103
- else:
104
- eval_logger.info(f"Using single device: {self._device}")
105
- self.model.to(self._device)
106
- self._rank = 0
107
- self._word_size = 1
108
-
109
- @property
110
- def config(self):
111
- # return the associated transformers.AutoConfig for the given pretrained model.
112
- return self._config
113
-
114
- @property
115
- def tokenizer(self):
116
- return self._tokenizer
117
-
118
- @property
119
- def model(self):
120
- # returns the model, unwrapping it if using Accelerate
121
- if hasattr(self, "accelerator"):
122
- return self.accelerator.unwrap_model(self._model)
123
- else:
124
- return self._model
125
-
126
- @property
127
- def eot_token_id(self):
128
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
129
- return self.tokenizer.eos_token_id
130
-
131
- @property
132
- def max_length(self):
133
- return self._max_length
134
-
135
- @property
136
- def batch_size(self):
137
- return self.batch_size_per_gpu
138
-
139
- @property
140
- def device(self):
141
- return self._device
142
-
143
- @property
144
- def rank(self):
145
- return self._rank
146
-
147
- @property
148
- def world_size(self):
149
- return self._world_size
150
-
151
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
152
- """ """
153
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
154
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
155
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
156
- if left_truncate_len:
157
- encoding = encoding[-left_truncate_len:]
158
- return encoding
159
-
160
- def tok_decode(self, tokens):
161
- return self.tokenizer.decode(tokens)
162
-
163
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
164
- raise NotImplementedError("Loglikelihood is not implemented for Idefics2 model")
165
-
166
- def flatten(self, input):
167
- new_list = []
168
- for i in input:
169
- for j in i:
170
- new_list.append(j)
171
- return new_list
172
-
173
- def generate_until(self, requests: List[Instance]) -> List[str]:
174
- res = []
175
-
176
- def _collate(x):
177
- # the negative sign on len(toks) sorts descending - this has a few advantages:
178
- # - time estimates will always be over not underestimates, which is more useful for planning
179
- # - to know the size of a batch when going through the list, you know the first one is always the batch
180
- # padded context length. this is useful to simplify the batching logic and more importantly to make
181
- # automatic adaptive batches much much easier to implement
182
- # - any OOMs will happen right away rather than near the end
183
- toks = self.tok_encode(x[0])
184
- return -len(toks), x[0]
185
-
186
- # we group requests by their generation_kwargs,
187
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
188
- # in the same batch.
189
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
190
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
191
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
192
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
193
- for chunk in chunks:
194
- contexts, all_gen_kwargs, doc_to_visuals, doc_id, tasks, splits = zip(*chunk)
195
- visuals = [doc_to_visual(self.task_dict[task][split][ids]) for ids, task, split, doc_to_visual in zip(doc_id, tasks, splits, doc_to_visuals)]
196
- # we assume all gen kwargs in the batch are the same
197
- # this is safe to assume because the `grouper` object ensures it.
198
- gen_kwargs = all_gen_kwargs[0]
199
- #
200
- until = gen_kwargs.pop("until", None)
201
- image_aspect_ratio = gen_kwargs.pop("image_aspect_ratio", None)
202
- if "max_new_tokens" not in gen_kwargs:
203
- gen_kwargs["max_new_tokens"] = 1024
204
- if "temperature" not in gen_kwargs:
205
- gen_kwargs["temperature"] = 0
206
-
207
- prompts = []
208
- for context, visual in zip(contexts, visuals):
209
- content = []
210
- if DEFAULT_IMAGE_TOKEN not in context:
211
- for image in visual:
212
- content.append({"type": "image"})
213
- content.append({"type": "text", "text": context})
214
- message = [{"role": "user", "content": content}]
215
- prompt = self._processor.apply_chat_template(message, add_generation_prompt=True)
216
- prompts.append(prompt)
217
- inputs = self._processor(text=prompts, images=visuals, padding=True, return_tensors="pt")
218
- inputs = {k: v.to(self.device) for k, v in inputs.items()}
219
- output_ids = self.model.generate(**inputs, **gen_kwargs)
220
- # only retain the generated text
221
- for output_id, input_id in zip(output_ids, inputs["input_ids"]):
222
- generated_id = output_id[len(input_id) :]
223
- generated_text = self.tokenizer.decode(generated_id, skip_special_tokens=True)
224
-
225
- res.append(generated_text)
226
- pbar.update(1)
227
- # reorder this group of results back to original unsorted form
228
- res = re_ords.get_original(res)
229
-
230
- pbar.close()
231
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/instructblip.py DELETED
@@ -1,229 +0,0 @@
1
- import torch
2
-
3
- import copy
4
- from tqdm import tqdm
5
- from lmms_eval import utils
6
- from lmms_eval.api.instance import Instance
7
- from lmms_eval.api.model import lmms
8
- from lmms_eval.api.registry import register_model
9
- from lmms_eval.tasks.mmmu.utils_group_img import process_images
10
- from accelerate import Accelerator, DistributedType
11
- from accelerate.state import AcceleratorState
12
- from typing import List, Optional, Union, Tuple
13
- import transformers
14
- from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
15
-
16
- from lmms_eval.utils import stop_sequences_criteria
17
-
18
-
19
- import warnings
20
-
21
- warnings.filterwarnings("ignore")
22
-
23
- from loguru import logger as eval_logger
24
-
25
-
26
- @register_model("instructblip")
27
- class InstructBLIP(lmms):
28
- """
29
- InstructBLIP Model
30
- """
31
-
32
- def __init__(
33
- self,
34
- pretrained: str = "Salesforce/instructblip-vicuna-7b",
35
- device: Optional[str] = "cuda",
36
- dtype: Optional[Union[str, torch.dtype]] = "auto",
37
- batch_size: Optional[Union[int, str]] = 1,
38
- **kwargs,
39
- ) -> None:
40
- super().__init__()
41
- # Do not use kwargs for now
42
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
43
-
44
- accelerator = Accelerator()
45
- if accelerator.num_processes > 1:
46
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
47
- else:
48
- self._device = device
49
- self._model = InstructBlipForConditionalGeneration.from_pretrained(pretrained, device_map=self._device)
50
- self._image_processor = InstructBlipProcessor.from_pretrained(pretrained)
51
- self._tokenizer = self._image_processor.tokenizer
52
- self._config = self._model.config
53
- self.model.eval()
54
- self.model.tie_weights()
55
- self.batch_size_per_gpu = int(batch_size)
56
- if accelerator.num_processes > 1:
57
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
58
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
59
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
60
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
61
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
62
- kwargs = {
63
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
64
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
65
- }
66
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
67
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
68
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
69
- self._model = accelerator.prepare(self.model)
70
- else:
71
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
72
- self.accelerator = accelerator
73
- if self.accelerator.is_local_main_process:
74
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
75
- self._rank = self.accelerator.local_process_index
76
- self._world_size = self.accelerator.num_processes
77
- else:
78
- self.model.to(self._device)
79
- self._rank = 0
80
- self._word_size = 1
81
-
82
- @property
83
- def config(self):
84
- # return the associated transformers.AutoConfig for the given pretrained model.
85
- return self._config
86
-
87
- @property
88
- def tokenizer(self):
89
- return self._tokenizer
90
-
91
- @property
92
- def model(self):
93
- # returns the model, unwrapping it if using Accelerate
94
- if hasattr(self, "accelerator"):
95
- return self.accelerator.unwrap_model(self._model)
96
- else:
97
- return self._model
98
-
99
- @property
100
- def eot_token_id(self):
101
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
102
- return self.tokenizer.eos_token_id
103
-
104
- @property
105
- def max_length(self):
106
- return self._max_length
107
-
108
- @property
109
- def batch_size(self):
110
- return self.batch_size_per_gpu
111
-
112
- @property
113
- def device(self):
114
- return self._device
115
-
116
- @property
117
- def rank(self):
118
- return self._rank
119
-
120
- @property
121
- def world_size(self):
122
- return self._world_size
123
-
124
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
125
- """ """
126
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
127
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
128
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
129
- if left_truncate_len:
130
- encoding = encoding[-left_truncate_len:]
131
- return encoding
132
-
133
- def tok_decode(self, tokens):
134
- return self.tokenizer.decode(tokens)
135
-
136
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
137
- # TODO
138
- assert False, "We have not implemented this function for InstructBLIP yet"
139
-
140
- def flatten(self, input):
141
- new_list = []
142
- for i in input:
143
- for j in i:
144
- new_list.append(j)
145
- return new_list
146
-
147
- def generate_until(self, requests: List[Instance]) -> List[str]:
148
- res = []
149
-
150
- def _collate(x):
151
- # the negative sign on len(toks) sorts descending - this has a few advantages:
152
- # - time estimates will always be over not underestimates, which is more useful for planning
153
- # - to know the size of a batch when going through the list, you know the first one is always the batch
154
- # padded context length. this is useful to simplify the batching logic and more importantly to make
155
- # automatic adaptive batches much much easier to implement
156
- # - any OOMs will happen right away rather than near the end
157
- toks = self.tok_encode(x[0])
158
- return -len(toks), x[0]
159
-
160
- # we group requests by their generation_kwargs,
161
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
162
- # in the same batch.
163
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
164
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
165
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
166
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
167
- for chunk in chunks:
168
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
169
- task = task[0]
170
- split = split[0]
171
- visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id]
172
- visuals = self.flatten(visuals)
173
- # we assume all gen kwargs in the batch are the same
174
- # this is safe to assume because the `grouper` object ensures it.
175
- gen_kwargs = all_gen_kwargs[0]
176
-
177
- # Set default values for until and max_new_tokens
178
- until = [self.tok_decode(self.eot_token_id)]
179
-
180
- # Update values from gen_kwargs if present
181
- if "until" in gen_kwargs:
182
- until = gen_kwargs.pop("until")
183
- if isinstance(until, str):
184
- until = [until]
185
- elif not isinstance(until, list):
186
- raise ValueError(f"Expected `gen_kwargs['until']` to be of type Union[str,list] but got {type(until)}")
187
- assert self.batch_size_per_gpu == 1, "Do not support batch_size_per_gpu > 1 for now"
188
- context = contexts[0]
189
- if "<image>" in context:
190
- # instruct blip does not expect the <image> tag
191
- context = context.replace("<image>", "")
192
- # Set trunction equals true here, the max length for qformer tokenizer is 512
193
- # if not truncate, some questions will cause size mismatch
194
- # The transformer implementation can't handle multi images for blip
195
- # Concat it into one image
196
- if len(visuals) > 1:
197
- visuals = [process_images(visuals)]
198
- inputs = self._image_processor(images=visuals, text=context, return_tensors="pt", truncation=True).to(self.device)
199
-
200
- gen_kwargs["image_sizes"] = [visuals[idx].size for idx in range(len(visuals))]
201
- if "max_new_tokens" not in gen_kwargs:
202
- gen_kwargs["max_new_tokens"] = 1024
203
- if "temperature" not in gen_kwargs:
204
- gen_kwargs["temperature"] = 0
205
- if "top_p" not in gen_kwargs:
206
- gen_kwargs["top_p"] = None
207
- if "num_beams" not in gen_kwargs:
208
- gen_kwargs["num_beams"] = 1
209
- try:
210
- cont = self.model.generate(
211
- **inputs,
212
- do_sample=True if gen_kwargs["temperature"] > 0 else False,
213
- temperature=gen_kwargs["temperature"],
214
- top_p=gen_kwargs["top_p"],
215
- num_beams=gen_kwargs["num_beams"],
216
- max_new_tokens=gen_kwargs["max_new_tokens"],
217
- )
218
- except Exception as e:
219
- eval_logger.error(f"Error {e} in generating")
220
- cont = ""
221
- text_outputs = self.tokenizer.batch_decode(cont, skip_special_tokens=True)[0].strip()
222
- res.append(text_outputs)
223
- self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)
224
- pbar.update(1)
225
- # reorder this group of results back to original unsorted form
226
- res = re_ords.get_original(res)
227
-
228
- pbar.close()
229
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/internvl.py DELETED
@@ -1,484 +0,0 @@
1
- import os
2
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
3
- from accelerate.state import AcceleratorState
4
- from typing import List, Optional, Union, Tuple
5
- import torch
6
- from tqdm import tqdm
7
- import numpy as np
8
- import math
9
- from datetime import timedelta
10
- from transformers import AutoConfig
11
- from huggingface_hub import snapshot_download
12
- import requests
13
-
14
- from lmms_eval import utils
15
- from lmms_eval.api.instance import Instance
16
- from lmms_eval.api.model import lmms
17
- from lmms_eval.api.registry import register_model
18
- from lmms_eval.utils import stop_sequences_criteria
19
- from PIL import Image
20
-
21
- import subprocess
22
- from pathlib import Path
23
-
24
- wd = Path(__file__).parent.parent.parent.resolve()
25
- import sys
26
-
27
- sys.path.append(os.path.join(str(wd), "InternVL", "internvl_chat"))
28
- from loguru import logger as eval_logger
29
-
30
- if not hasattr(eval_logger, "internvl_warning_logged"):
31
- eval_logger.internvl_warning_logged = False
32
-
33
- try:
34
- from internvl.model.internlm2.modeling_internlm2 import InternLM2ForCausalLM
35
- from internvl.model.internvl_chat.configuration_internvl_chat import InternVLChatConfig
36
- from internvl.model.internvl_chat.modeling_intern_vit import InternVisionModel
37
- from internvl.model.internvl_chat import InternVLChatModel
38
- from internvl.train.dataset import build_transform, dynamic_preprocess
39
- except ImportError:
40
- eval_logger.debug("InternVL is not installed. Please install InternVL to use this model.")
41
- if not eval_logger.internvl_warning_logged:
42
- eval_logger.debug("InternVL is not installed. Please install InternVL to use this model.")
43
- eval_logger.internvl_warning_logged = True
44
-
45
- import warnings
46
- from typing import Any, List, Optional, Tuple, Union
47
-
48
- import torch.utils.checkpoint
49
-
50
- from peft import LoraConfig, get_peft_model
51
- from torch import nn
52
- from torch.nn import CrossEntropyLoss
53
- from transformers import AutoModel, GenerationConfig, LlamaForCausalLM, LlamaTokenizer
54
- from transformers.modeling_outputs import CausalLMOutputWithPast
55
- from transformers.modeling_utils import PreTrainedModel
56
- from transformers import AutoTokenizer
57
- import re
58
- from huggingface_hub import snapshot_download
59
-
60
-
61
- @register_model("internvl")
62
- class InternVLChat(lmms):
63
- # config_class = InternVLChatConfig
64
- main_input_name = "pixel_values"
65
- _no_split_modules = ["InternVisionEncoderLayer", "LlamaDecoderLayer"]
66
-
67
- """
68
- 0. Install lmms-eval
69
- cd lmms-eval
70
- pip install -e .
71
-
72
- How to Install InternVL:
73
- 1. Clone the InternVL repository:
74
- git clone https://github.com/OpenGVLab/InternVL.git
75
-
76
- 2. Install the requirements:
77
- pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118
78
-
79
- 3. Install flash-attn==2.3.6:
80
- pip install flash-attn==2.3.6 --no-build-isolation
81
- """
82
-
83
- """
84
- How to download the pretrained model:
85
- 1. Download the pretrained model from hugginface:
86
- cd pretrained/
87
- # pip install -U huggingface_hub
88
- huggingface-cli download --resume-download --local-dir-use-symlinks False OpenGVLab/InternVL-Chat-V1-5 --local-dir InternVL-Chat-V1-5
89
-
90
- 2. the pretrained model should be in the following directory:
91
- pretrained
92
- └── InternVL-Chat-V1-5
93
- """
94
-
95
- #
96
- # The above steps can be optional, I add snapshot download, so now can just use hf repo_id
97
- # model_args pretrained=OpenGVLab/InternVL-Chat-V1-5
98
- #
99
-
100
- """
101
- InternVL-Chat-V1-5 Model for OpenGVLab https://github.com/OpenGVLab/InternVL/blob/main/internvl_chat/internvl/model/internvl_chat/modeling_internvl_chat.py
102
- Example usage:
103
-
104
- accelerate launch --num_processes=8 --main_process_port 12345 -m lmms_eval \
105
- --model internvl \
106
- --model_args pretrained=OpenGVLab/InternVL-Chat-V1-5 \
107
- --tasks llava_wilder_small \
108
- --batch_size 1 \
109
- --output_path ./logs/ \
110
- --log_samples
111
- """
112
-
113
- def __init__(
114
- self,
115
- config=None,
116
- pretrained: str = "OpenGVLab/InternVL-Chat-V1-5",
117
- truncation: Optional[bool] = True,
118
- device: Optional[str] = "cuda:0",
119
- dtype: Optional[Union[str, torch.dtype]] = "auto",
120
- batch_size: Optional[Union[int, str]] = 1,
121
- trust_remote_code: Optional[bool] = False,
122
- revision=None,
123
- device_map="cuda:0",
124
- conv_template="vicuna_v1",
125
- use_cache=True,
126
- truncate_context=False, # whether to truncate the context in generation, set it False for LLaVA-1.6
127
- customized_config=None, # ends in json
128
- dynamic=True,
129
- load_in_8bit=False,
130
- vision_model=None,
131
- language_model=None,
132
- max_num=12,
133
- **kwargs,
134
- ) -> None:
135
- super().__init__()
136
-
137
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
138
-
139
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
140
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
141
- if accelerator.num_processes > 1:
142
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
143
- self.device_map = f"cuda:{accelerator.local_process_index}"
144
- elif accelerator.num_processes == 1 and device_map == "auto":
145
- self._device = torch.device(device)
146
- self.device_map = device_map
147
- else:
148
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
149
- self.device_map = f"cuda:{accelerator.local_process_index}"
150
-
151
- self.dynamic = dynamic # dynamic image_size
152
- self.max_num = max_num
153
- if accelerator.is_main_process:
154
- cache_dir = snapshot_download(repo_id=pretrained, cache_dir="cache_dir", local_dir="cache_dir", local_dir_use_symlinks=False)
155
- accelerator.wait_for_everyone()
156
- # So what I did is that I let main process to download the repo, and then
157
- # other process can just simply read from this repo
158
- cache_dir = snapshot_download(repo_id=pretrained, cache_dir="cache_dir", local_dir="cache_dir", local_dir_use_symlinks=False)
159
- config = InternVLChatConfig.from_pretrained(cache_dir)
160
- tokenizer = AutoTokenizer.from_pretrained(cache_dir, trust_remote_code=True, use_fast=False)
161
- model = InternVLChatModel.from_pretrained(cache_dir, low_cpu_mem_usage=True, config=config, torch_dtype=torch.bfloat16, load_in_8bit=load_in_8bit).eval()
162
- if not load_in_8bit:
163
- model = model.cuda()
164
- # self.model=model
165
- # self.device=self._device
166
- self._tokenizer = tokenizer
167
- # self.tokenizer=tokenizer
168
- self._model = model
169
- self._config = self._model.config
170
- self.use_thumbnail = self.model.config.use_thumbnail
171
- self.model.eval()
172
- self.model.tie_weights()
173
- self.truncation = truncation
174
- self.batch_size_per_gpu = int(batch_size)
175
- self.conv_template = conv_template
176
- self.use_cache = use_cache
177
- self.truncate_context = truncate_context
178
- if accelerator.num_processes > 1:
179
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
180
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
181
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
182
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
183
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
184
- kwargs = {
185
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
186
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
187
- }
188
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
189
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
190
-
191
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
192
- self._model = accelerator.prepare(self.model)
193
- else:
194
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
195
- self.accelerator = accelerator
196
- if self.accelerator.is_local_main_process:
197
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
198
- self._rank = self.accelerator.local_process_index
199
- self._world_size = self.accelerator.num_processes
200
- elif accelerator.num_processes == 1 and device_map == "auto":
201
- eval_logger.info(f"Using {accelerator.num_processes} devices with tensor parallelism")
202
- self._rank = 0
203
- self._word_size = 1
204
- else:
205
- eval_logger.info(f"Using single device: {self._device}")
206
- self.model.to(self._device)
207
- self._rank = 0
208
- self._world_size = 1
209
-
210
- # from internvl model
211
-
212
- self.image_size = config.force_image_size or config.vision_config.image_size
213
-
214
- def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
215
- lora_config = LoraConfig(
216
- r=r,
217
- target_modules=["attn.qkv", "attn.proj", "mlp.fc1", "mlp.fc2"],
218
- lora_alpha=lora_alpha,
219
- lora_dropout=lora_dropout,
220
- )
221
- self.vision_model = get_peft_model(self.vision_model, lora_config)
222
- self.vision_model.print_trainable_parameters()
223
-
224
- def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
225
- lora_config = LoraConfig(
226
- r=r, target_modules=["self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", "self_attn.o_proj", "mlp.gate_proj", "mlp.down_proj", "mlp.up_proj"], lora_alpha=lora_alpha, lora_dropout=lora_dropout, task_type="CAUSAL_LM"
227
- )
228
- self.language_model = get_peft_model(self.language_model, lora_config)
229
- self.language_model.enable_input_require_grads()
230
- self.language_model.print_trainable_parameters()
231
-
232
- def pixel_shuffle(self, x, scale_factor=0.5):
233
- n, w, h, c = x.size()
234
- # N, W, H, C --> N, W, H * scale, C // scale
235
- x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
236
- # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
237
- x = x.permute(0, 2, 1, 3).contiguous()
238
- # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
239
- x = x.view(n, int(h * scale_factor), int(w * scale_factor), int(c / (scale_factor * scale_factor)))
240
- if self.ps_version == "v1":
241
- warnings.warn("In ps_version 'v1', the height and width have not been swapped back, " "which results in a transposed image.")
242
- else:
243
- x = x.permute(0, 2, 1, 3).contiguous()
244
- return x
245
-
246
- def noised_embed(self, vit_embeds, noise_alpha=5):
247
- dims = torch.tensor(vit_embeds.size(1) * vit_embeds.size(2))
248
- mag_norm = noise_alpha / torch.sqrt(dims)
249
- noise = torch.zeros_like(vit_embeds).uniform_(-mag_norm, mag_norm)
250
- return vit_embeds + noise
251
-
252
- def extract_feature(self, pixel_values):
253
- if self.select_layer == -1:
254
- vit_embeds = self.vision_model(pixel_values=pixel_values, output_hidden_states=False, return_dict=True).last_hidden_state
255
- else:
256
- vit_embeds = self.vision_model(pixel_values=pixel_values, output_hidden_states=True, return_dict=True).hidden_states[self.select_layer]
257
- vit_embeds = vit_embeds[:, 1:, :]
258
-
259
- if self.training and self.neftune_alpha is not None:
260
- vit_embeds = self.noised_embed(vit_embeds, self.neftune_alpha)
261
-
262
- h = w = int(vit_embeds.shape[1] ** 0.5)
263
- vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
264
- vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
265
- vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
266
- vit_embeds = self.mlp1(vit_embeds) # .to(pixel_values.device)
267
- return vit_embeds
268
-
269
- def multi_image_chat(self, tokenizer, pixel_values, image_counts, question, generation_config, history=None, return_history=False, IMG_START_TOKEN="<img>", IMG_END_TOKEN="</img>", IMG_CONTEXT_TOKEN="<IMG_CONTEXT>"):
270
- img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
271
- self.img_context_token_id = img_context_token_id
272
- if tokenizer.convert_tokens_to_ids("<|im_end|>") != 0:
273
- eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") # 92542, InternLM2
274
- else:
275
- eos_token_id = tokenizer.eos_token_id
276
-
277
- from internvl.conversation import get_conv_template
278
-
279
- template = get_conv_template(self.template)
280
-
281
- if history is None:
282
- history = []
283
- image_tokens = ""
284
- image_bs = pixel_values.shape[0]
285
- # print(f"dynamic ViT batch size: {image_bs}, image_counts: {image_counts}")
286
- for idx, image_count in enumerate(image_counts):
287
- image_tokens += f"<image {idx+1}> (图{idx+1}):" + IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * image_count + IMG_END_TOKEN
288
- question = image_tokens + "\n" + question
289
- else:
290
- for old_question, old_answer in history:
291
- template.append_message(template.roles[0], old_question)
292
- template.append_message(template.roles[1], old_answer)
293
- template.append_message(template.roles[0], question)
294
- template.append_message(template.roles[1], None)
295
- query = template.get_prompt()
296
- model_inputs = tokenizer(query, return_tensors="pt")
297
- input_ids = model_inputs["input_ids"].cuda()
298
- attention_mask = model_inputs["attention_mask"].cuda()
299
- generation_config["eos_token_id"] = eos_token_id
300
-
301
- generation_output = self.generate(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, **generation_config)
302
- response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
303
- response = response.split("<|im_end|>")[0].strip() # for InternLM2
304
- history.append((question, response))
305
- if return_history:
306
- return response, history
307
- else:
308
- query_to_print = query.replace(image_tokens, "<image>")
309
- # print(query_to_print, response)
310
- return response
311
- return response
312
-
313
- @property
314
- def tokenizer(self):
315
- return self._tokenizer
316
-
317
- @property
318
- def model(self):
319
- # returns the model, unwrapping it if using Accelerate
320
- if hasattr(self, "accelerator"):
321
- return self.accelerator.unwrap_model(self._model)
322
- else:
323
- return self._model
324
-
325
- @property
326
- def batch_size(self):
327
- return self.batch_size_per_gpu
328
-
329
- @property
330
- def device(self):
331
- return self._device
332
-
333
- @property
334
- def rank(self):
335
- return self._rank
336
-
337
- @property
338
- def world_size(self):
339
- return self._world_size
340
-
341
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
342
- """ """
343
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
344
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
345
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
346
- if left_truncate_len:
347
- encoding = encoding[-left_truncate_len:]
348
- return encoding
349
-
350
- def tok_decode(self, tokens):
351
- try:
352
- return self.tokenizer.decode(tokens)
353
- except:
354
- return self.tokenizer.decode([tokens])
355
-
356
- def post_processing(self, response):
357
- response = response.replace("\n", "").replace("不是", "No").replace("是", "Yes").replace("否", "No")
358
- response = response.lower().replace("true", "yes").replace("false", "no")
359
- pattern = re.compile(r"[\u4e00-\u9fa5]")
360
- response = re.sub(pattern, "", response)
361
- return response
362
-
363
- @torch.no_grad()
364
- def generate(
365
- self,
366
- pixel_values: Optional[torch.FloatTensor] = None,
367
- input_ids: Optional[torch.FloatTensor] = None,
368
- attention_mask: Optional[torch.LongTensor] = None,
369
- visual_features: Optional[torch.FloatTensor] = None,
370
- generation_config: Optional[GenerationConfig] = None,
371
- output_hidden_states: Optional[bool] = None,
372
- return_dict: Optional[bool] = None,
373
- **generate_kwargs,
374
- ) -> torch.LongTensor:
375
- assert self.img_context_token_id is not None
376
- if pixel_values is not None:
377
- if visual_features is not None:
378
- vit_embeds = visual_features
379
- else:
380
- vit_embeds = self.extract_feature(pixel_values)
381
-
382
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
383
- B, N, C = input_embeds.shape
384
- input_embeds = input_embeds.reshape(B * N, C)
385
-
386
- input_ids = input_ids.reshape(B * N)
387
- selected = input_ids == self.img_context_token_id
388
- assert selected.sum() != 0
389
- input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
390
-
391
- input_embeds = input_embeds.reshape(B, N, C)
392
- else:
393
- input_embeds = self.language_model.get_input_embeddings()(input_ids)
394
-
395
- outputs = self.language_model.generate(
396
- inputs_embeds=input_embeds,
397
- attention_mask=attention_mask,
398
- generation_config=generation_config,
399
- output_hidden_states=output_hidden_states,
400
- return_dict=return_dict,
401
- use_cache=True,
402
- **generate_kwargs,
403
- )
404
-
405
- return outputs
406
-
407
- def flatten(self, input):
408
- new_list = []
409
- for i in input:
410
- for j in i:
411
- new_list.append(j)
412
- return new_list
413
-
414
- def load_image(self, flattened_visuals, input_size=224):
415
- assert flattened_visuals[0].mode == "RGB"
416
- image = flattened_visuals[0].convert("RGB")
417
- transform = build_transform(is_train=False, input_size=input_size)
418
- if self.dynamic:
419
- images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=self.use_thumbnail, max_num=self.max_num)
420
- else:
421
- images = [image]
422
- pixel_values = [transform(image) for image in images]
423
- pixel_values = torch.stack(pixel_values)
424
- return pixel_values
425
-
426
- def generate_until(self, requests: List[Instance]) -> List[str]:
427
- res = []
428
-
429
- def _collate(x):
430
- # the negative sign on len(toks) sorts descending - this has a few advantages:
431
- # - time estimates will always be over not underestimates, which is more useful for planning
432
- # - to know the size of a batch when going through the list, you know the first one is always the batch
433
- # padded context length. this is useful to simplify the batching logic and more importantly to make
434
- # automatic adaptive batches much much easier to implement
435
- # - any OOMs will happen right away rather than near the end
436
- toks = self.tok_encode(x[0])
437
- return -len(toks), x[0]
438
-
439
- # we group requests by their generation_kwargs,
440
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
441
- # in the same batch.
442
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
443
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
444
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
445
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
446
- for chunk in chunks:
447
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
448
- task = task[0]
449
- split = split[0]
450
- batched_visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id] # [B, N]
451
- flattened_visuals = self.flatten(batched_visuals)
452
- pixel_values = self.load_image(flattened_visuals, self.image_size).cuda().to(torch.bfloat16)
453
- gen_kwargs = all_gen_kwargs[0]
454
-
455
- if "max_new_tokens" not in gen_kwargs:
456
- gen_kwargs["max_new_tokens"] = 1024
457
- if "temperature" not in gen_kwargs:
458
- gen_kwargs["temperature"] = 0
459
- if "top_p" not in gen_kwargs:
460
- gen_kwargs["top_p"] = None
461
- if "num_beams" not in gen_kwargs:
462
- gen_kwargs["num_beams"] = 1
463
-
464
- generation_config = dict(
465
- do_sample=False,
466
- top_k=50,
467
- top_p=gen_kwargs["top_p"],
468
- num_beams=gen_kwargs["num_beams"],
469
- max_new_tokens=gen_kwargs["max_new_tokens"],
470
- eos_token_id=self.tokenizer.eos_token_id,
471
- )
472
- question = contexts[0]
473
- response = self.model.chat(tokenizer=self.tokenizer, pixel_values=pixel_values, question=question, generation_config=generation_config)
474
- # TODO(choiszt) try batch_chat for multiple inputs
475
- response = self.post_processing(response)
476
- res.append(response)
477
- self.cache_hook.add_partial("generate_until", (question, gen_kwargs), response)
478
- pbar.update(1)
479
- res = re_ords.get_original(res)
480
- return res
481
- # print(chunk)
482
-
483
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
484
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/llama_vid.py DELETED
@@ -1,271 +0,0 @@
1
- import os
2
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
3
- from accelerate.state import AcceleratorState
4
- from typing import List, Optional, Union, Tuple
5
- import torch
6
- from tqdm import tqdm
7
- from decord import VideoReader, cpu
8
- import numpy as np
9
- import math
10
- from datetime import timedelta
11
- from transformers import AutoConfig
12
- from huggingface_hub import snapshot_download
13
- import requests
14
-
15
- from lmms_eval import utils
16
- from lmms_eval.api.instance import Instance
17
- from lmms_eval.api.model import lmms
18
- from lmms_eval.api.registry import register_model
19
- from lmms_eval.utils import stop_sequences_criteria
20
- from lmms_eval.models.model_utils.load_video import read_video_pyav
21
-
22
- import subprocess
23
-
24
- from loguru import logger as eval_logger
25
-
26
- try:
27
- from llamavid.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
28
- from llamavid.conversation import conv_templates, SeparatorStyle
29
- from llamavid.model.builder import load_pretrained_model
30
- from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
31
- except ImportError:
32
- eval_logger.debug("LLaMA-Video is not installed. Please install LLaMA-Video to use this model.")
33
-
34
-
35
- @register_model("llama_vid")
36
- class LLaMAVid(lmms):
37
- def __init__(
38
- self,
39
- pretrained: str = "YanweiLi/llama-vid-7b-full-224-video-fps-1",
40
- truncation: Optional[bool] = True,
41
- device: Optional[str] = "cuda:0",
42
- dtype: Optional[Union[str, torch.dtype]] = "auto",
43
- batch_size: Optional[Union[int, str]] = 1,
44
- trust_remote_code: Optional[bool] = False,
45
- revision=None,
46
- attn_implementation=(
47
- "sdpa" if torch.__version__ > "2.1.2" else "eager"
48
- ), # inference implementation for attention, can be "sdpa", "eager", "flash_attention_2". Seems FA2 is not effective during inference: https://discuss.huggingface.co/t/flash-attention-has-no-effect-on-inference/73453/5
49
- device_map="cuda:0",
50
- conv_template="vicuna_v1",
51
- use_cache=True,
52
- truncate_context=False,
53
- num_frames: int = 100,
54
- **kwargs,
55
- ) -> None:
56
- super().__init__()
57
-
58
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
59
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
60
- if accelerator.num_processes > 1:
61
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
62
- self.device_map = f"cuda:{accelerator.local_process_index}"
63
- elif accelerator.num_processes == 1 and device_map == "auto":
64
- self._device = torch.device(device)
65
- self.device_map = device_map
66
- else:
67
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
68
- self.device_map = f"cuda:{accelerator.local_process_index}"
69
-
70
- self.pretrained = pretrained
71
- self.model_path = snapshot_download(self.pretrained)
72
- self.model_name = get_model_name_from_path(pretrained)
73
- self.num_frames = num_frames
74
- if not os.path.exists("./model_zoo/LAVIS/eva_vit_g.pth") and accelerator.is_main_process:
75
- eval_logger.info("\n\n Eva Encoder is not found for LLaMA-VID. Download automatically to the folder ./model_zoo/LAVIS")
76
- cache_path = "model_zoo/LAVIS"
77
- os.makedirs(cache_path, exist_ok=True)
78
- subprocess.run(["wget https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth -O ./model_zoo/LAVIS/eva_vit_g.pth"], shell=True)
79
-
80
- accelerator.wait_for_everyone()
81
- self._tokenizer, self._model, self.image_processor, self._max_length = load_pretrained_model(
82
- self.model_path,
83
- None,
84
- self.model_name,
85
- device_map=self.device_map,
86
- )
87
-
88
- self._config = self._model.config
89
- self.model.eval()
90
- self.model.tie_weights()
91
- self.truncation = truncation
92
- self.batch_size_per_gpu = int(batch_size)
93
- self.conv_template = conv_template
94
- self.use_cache = use_cache
95
- self.truncate_context = truncate_context
96
- # assert self.batch_size_per_gpu == 1, "Llava currently does not support batched generation. See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue."
97
- if accelerator.num_processes > 1:
98
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
99
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
100
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
101
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
102
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
103
- kwargs = {
104
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
105
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
106
- }
107
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
108
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
109
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
110
- self._model = accelerator.prepare(self.model)
111
- else:
112
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
113
- self.accelerator = accelerator
114
- if self.accelerator.is_local_main_process:
115
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
116
- self._rank = self.accelerator.local_process_index
117
- self._world_size = self.accelerator.num_processes
118
- elif accelerator.num_processes == 1 and device_map == "auto":
119
- eval_logger.info(f"Using {accelerator.num_processes} devices with tensor parallelism")
120
- self._rank = 0
121
- self._word_size = 1
122
- else:
123
- eval_logger.info(f"Using single device: {self._device}")
124
- self.model.to(self._device)
125
- self._rank = 0
126
- self._world_size = 1
127
-
128
- def download_file(self, url, folder_path):
129
- # Create the folder if it doesn't exist
130
- if not os.path.exists(folder_path):
131
- os.makedirs(folder_path)
132
-
133
- # Extract filename from URL
134
- filename = url.split("/")[-1]
135
-
136
- # Define path to save the file
137
- file_path = os.path.join(folder_path, filename)
138
-
139
- # Send a GET request to the URL
140
- response = requests.get(url)
141
-
142
- # Check if request was successful (status code 200)
143
- if response.status_code == 200:
144
- # Save the file to the specified folder
145
- with open(file_path, "wb") as f:
146
- f.write(response.content)
147
- print(f"File downloaded successfully to {file_path}")
148
- else:
149
- print(f"Failed to download file. Status code: {response.status_code}")
150
-
151
- @property
152
- def config(self):
153
- # return the associated transformers.AutoConfig for the given pretrained model.
154
- return self._config
155
-
156
- @property
157
- def tokenizer(self):
158
- return self._tokenizer
159
-
160
- @property
161
- def model(self):
162
- # returns the model, unwrapping it if using Accelerate
163
- if hasattr(self, "accelerator"):
164
- return self.accelerator.unwrap_model(self._model)
165
- else:
166
- return self._model
167
-
168
- @property
169
- def eot_token_id(self):
170
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
171
- return self.tokenizer.eos_token_id
172
-
173
- @property
174
- def max_length(self):
175
- return self._max_length
176
-
177
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
178
- """ """
179
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
180
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
181
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
182
- if left_truncate_len:
183
- encoding = encoding[-left_truncate_len:]
184
- return encoding
185
-
186
- def tok_decode(self, tokens):
187
- return self.tokenizer.decode(tokens)
188
-
189
- def load_video(self, video_path):
190
- vr = VideoReader(video_path, ctx=cpu(0))
191
- total_frame_num = len(vr)
192
- fps = round(vr.get_avg_fps())
193
- frame_idx = [i for i in range(0, len(vr), fps)]
194
- spare_frames = vr.get_batch(frame_idx).asnumpy()
195
- return spare_frames
196
-
197
- def flatten(self, input):
198
- new_list = []
199
- for i in input:
200
- for j in i:
201
- new_list.append(j)
202
- return new_list
203
-
204
- def generate_until(self, requests) -> List[str]:
205
- res = []
206
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
207
-
208
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
209
- # encode, pad, and truncate contexts for this batch
210
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
211
- visuals = self.flatten(visuals)
212
- videos = []
213
- for visual in visuals:
214
- video = read_video_pyav(visual, num_frm=self.num_frames)
215
- video = self.image_processor.preprocess(video, return_tensors="pt")["pixel_values"].half().cuda()
216
- video = [video]
217
- videos += video
218
- qs = contexts
219
- if self.model.config.mm_use_im_start_end:
220
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "\n" + qs
221
- else:
222
- qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
223
-
224
- conv = conv_templates[self.conv_template].copy()
225
- conv.append_message(conv.roles[0], qs)
226
- conv.append_message(conv.roles[1], None)
227
- prompt = conv.get_prompt()
228
-
229
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
230
-
231
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
232
- keywords = [stop_str]
233
- stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)
234
-
235
- cur_prompt = contexts
236
- with torch.inference_mode():
237
- self.model.update_prompt([[cur_prompt]])
238
- output_ids = self.model.generate(input_ids, images=video, do_sample=True, temperature=0.2, max_new_tokens=1024, use_cache=True, stopping_criteria=[stopping_criteria])
239
-
240
- input_token_len = input_ids.shape[1]
241
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
242
- if n_diff_input_output > 0:
243
- print(f"[Warning] {n_diff_input_output} output_ids are not the same as the input_ids")
244
- outputs = self.tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
245
- outputs = outputs.strip()
246
- if outputs.endswith(stop_str):
247
- outputs = outputs[: -len(stop_str)]
248
- outputs = outputs.strip()
249
- pbar.update(1)
250
- res.append(outputs)
251
-
252
- return res
253
-
254
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
255
- return super().loglikelihood(requests)
256
-
257
- @property
258
- def batch_size(self):
259
- return self.batch_size_per_gpu
260
-
261
- @property
262
- def device(self):
263
- return self._device
264
-
265
- @property
266
- def rank(self):
267
- return self._rank
268
-
269
- @property
270
- def world_size(self):
271
- return self._world_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/llava.py DELETED
@@ -1,419 +0,0 @@
1
- import torch
2
-
3
- torch.backends.cuda.matmul.allow_tf32 = True
4
-
5
-
6
- import copy
7
- from tqdm import tqdm
8
- from datetime import timedelta
9
-
10
- from lmms_eval import utils
11
- from lmms_eval.api.instance import Instance
12
- from lmms_eval.api.model import lmms
13
- from lmms_eval.api.registry import register_model
14
- from lmms_eval.utils import stop_sequences_criteria
15
-
16
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
17
- from accelerate.state import AcceleratorState
18
- from typing import List, Optional, Union, Tuple
19
- from packaging import version
20
- import warnings
21
-
22
- warnings.filterwarnings("ignore")
23
-
24
- from loguru import logger as eval_logger
25
-
26
- try:
27
- from llava.model.builder import load_pretrained_model
28
- from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token
29
- from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
30
- from llava.conversation import conv_templates
31
- except Exception as e:
32
- eval_logger.debug("LLaVA is not installed. Please install LLaVA to use this model.\nError: %s" % e)
33
-
34
- # inference implementation for attention, can be "sdpa", "eager", "flash_attention_2". Seems FA2 is not effective during inference: https://discuss.huggingface.co/t/flash-attention-has-no-effect-on-inference/73453/5
35
- # if is_flash_attn_2_available:
36
- # best_fit_attn_implementation = "flash_attention_2" # flash_attn has a bug that says: ERROR Error query and key must have the same dtype in generating
37
-
38
- if version.parse(torch.__version__) >= version.parse("2.1.2"):
39
- best_fit_attn_implementation = "sdpa"
40
- else:
41
- best_fit_attn_implementation = "eager"
42
-
43
-
44
- @register_model("llava")
45
- class Llava(lmms):
46
- """
47
- Llava Model
48
- """
49
-
50
- def __init__(
51
- self,
52
- pretrained: str = "liuhaotian/llava-v1.5-7b",
53
- truncation: Optional[bool] = True,
54
- device: Optional[str] = "cuda:0",
55
- batch_size: Optional[Union[int, str]] = 1,
56
- model_name=None,
57
- attn_implementation=best_fit_attn_implementation,
58
- device_map="cuda:0",
59
- conv_template="vicuna_v1",
60
- use_cache=True,
61
- truncate_context=False, # whether to truncate the context in generation, set it False for LLaVA-1.6
62
- customized_config=None, # ends in json
63
- **kwargs,
64
- ) -> None:
65
- super().__init__()
66
- # Do not use kwargs for now
67
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
68
-
69
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
70
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
71
- if accelerator.num_processes > 1:
72
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
73
- self.device_map = f"cuda:{accelerator.local_process_index}"
74
- elif accelerator.num_processes == 1 and device_map == "auto":
75
- self._device = torch.device(device)
76
- self.device_map = device_map
77
- else:
78
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
79
- self.device_map = f"cuda:{accelerator.local_process_index}"
80
-
81
- llava_model_args = {
82
- "multimodal": True,
83
- }
84
- if customized_config is not None:
85
- llava_model_args["customized_config"] = customized_config
86
- if attn_implementation is not None:
87
- llava_model_args["attn_implementation"] = attn_implementation
88
- if "use_flash_attention_2" in kwargs:
89
- llava_model_args["use_flash_attention_2"] = kwargs["use_flash_attention_2"]
90
- model_name = model_name if model_name is not None else get_model_name_from_path(pretrained)
91
- try:
92
- # Try to load the model with the multimodal argument
93
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
94
- except TypeError:
95
- # for older versions of LLaVA that don't have multimodal argument
96
- llava_model_args.pop("multimodal", None)
97
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
98
- self._config = self._model.config
99
- self.model.eval()
100
- self.model.tie_weights()
101
- self.truncation = truncation
102
- self.batch_size_per_gpu = int(batch_size)
103
- self.conv_template = conv_template
104
- self.use_cache = use_cache
105
- self.truncate_context = truncate_context
106
- # assert self.batch_size_per_gpu == 1, "Llava currently does not support batched generation. See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue."
107
- if accelerator.num_processes > 1:
108
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
109
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
110
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
111
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
112
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
113
- kwargs = {
114
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
115
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
116
- }
117
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
118
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
119
-
120
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
121
- self._model = accelerator.prepare(self.model)
122
- else:
123
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
124
- self.accelerator = accelerator
125
- if self.accelerator.is_local_main_process:
126
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
127
- self._rank = self.accelerator.local_process_index
128
- self._world_size = self.accelerator.num_processes
129
- elif accelerator.num_processes == 1 and device_map == "auto":
130
- eval_logger.info(f"Using {accelerator.num_processes} devices with tensor parallelism")
131
- self._rank = 0
132
- self._word_size = 1
133
- else:
134
- eval_logger.info(f"Using single device: {self._device}")
135
- self.model.to(self._device)
136
- self._rank = 0
137
- self._world_size = 1
138
-
139
- @property
140
- def config(self):
141
- # return the associated transformers.AutoConfig for the given pretrained model.
142
- return self._config
143
-
144
- @property
145
- def tokenizer(self):
146
- return self._tokenizer
147
-
148
- @property
149
- def model(self):
150
- # returns the model, unwrapping it if using Accelerate
151
- if hasattr(self, "accelerator"):
152
- return self.accelerator.unwrap_model(self._model)
153
- else:
154
- return self._model
155
-
156
- @property
157
- def eot_token_id(self):
158
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
159
- return self.tokenizer.eos_token_id
160
-
161
- @property
162
- def max_length(self):
163
- return self._max_length
164
-
165
- def pad_sequence(self, input_ids, batch_first, padding_value):
166
- if self.tokenizer.padding_side == "left":
167
- input_ids = [torch.flip(_input_ids, [0]) for _input_ids in input_ids]
168
- input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=batch_first, padding_value=padding_value)
169
- if self.tokenizer.padding_side == "left":
170
- input_ids = torch.flip(input_ids, [1])
171
- return input_ids
172
-
173
- @property
174
- def batch_size(self):
175
- return self.batch_size_per_gpu
176
-
177
- @property
178
- def device(self):
179
- return self._device
180
-
181
- @property
182
- def rank(self):
183
- return self._rank
184
-
185
- @property
186
- def world_size(self):
187
- return self._world_size
188
-
189
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
190
- """ """
191
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
192
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
193
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
194
- if left_truncate_len:
195
- encoding = encoding[-left_truncate_len:]
196
- return encoding
197
-
198
- def tok_decode(self, tokens):
199
- try:
200
- return self.tokenizer.decode(tokens)
201
- except:
202
- return self.tokenizer.decode([tokens])
203
-
204
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
205
- # TODO
206
- res = []
207
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
208
-
209
- for contexts, doc_to_target, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
210
- # encode, pad, and truncate contexts for this batch
211
- if type(doc_to_target) == str:
212
- continuation = doc_to_target
213
- else:
214
- continuation = doc_to_target(self.task_dict[task][split][doc_id])
215
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
216
- visuals = self.flatten(visuals)
217
- image_sizes = [[visual.size[0], visual.size[1]] for visual in visuals]
218
- if visuals:
219
- image = process_images(visuals, self._image_processor, self._config)
220
- if type(image) is list:
221
- image = [_image.to(dtype=torch.float16, device=self.device) for _image in image]
222
- else:
223
- image = image.to(dtype=torch.float16, device=self.device)
224
- else:
225
- image = None
226
-
227
- prompts_input = contexts[0] if isinstance(contexts, list) else contexts
228
-
229
- if image is not None and len(image) != 0 and DEFAULT_IMAGE_TOKEN not in prompts_input:
230
- """
231
- Three senarios:
232
- 1. No image, and there for, no image token should be added.
233
- 2. image token is already specified in the context, so we don't need to add it.
234
- 3. image token is not specified in the context and there is image inputs, so we need to add it. In this case, we add the image token at the beginning of the context and add a new line.
235
- """
236
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
237
- image_tokens = " ".join(image_tokens)
238
- prompts_input = image_tokens + "\n" + (contexts[0] if isinstance(contexts, list) else contexts)
239
-
240
- # This is much safer for llama3, as we now have some object type in it
241
- if "llama_3" in self.conv_template:
242
- conv = copy.deepcopy(conv_templates[self.conv_template])
243
- else:
244
- conv = conv_templates[self.conv_template].copy()
245
- conv.append_message(conv.roles[0], prompts_input)
246
- conv.append_message(conv.roles[1], None)
247
- prompt = conv.get_prompt()
248
- pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
249
- contxt_id = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
250
- # Add the answer of the second role
251
- conv.messages[1][1] = continuation
252
-
253
- prompt = conv.get_prompt()
254
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
255
- labels = input_ids.clone()
256
- # Context part no need to calculate for loss
257
- labels[0, : contxt_id.shape[1]] = -100
258
- with torch.inference_mode():
259
- outputs = self.model(input_ids=input_ids, labels=labels, images=image, use_cache=True, image_sizes=image_sizes)
260
- loss = outputs["loss"]
261
- # loss = torch.exp(loss)
262
- logits = outputs["logits"]
263
- greedy_tokens = logits.argmax(dim=-1)
264
- cont_toks = input_ids[:, contxt_id.shape[1] :] # [1, seq]
265
- greedy_tokens = greedy_tokens[:, contxt_id.shape[1] : input_ids.shape[1]] # [1, seq]
266
- max_equal = (greedy_tokens == cont_toks).all()
267
- res.append((float(loss.item()), bool(max_equal)))
268
- pbar.update(1)
269
- pbar.close()
270
- return res
271
-
272
- def flatten(self, input):
273
- new_list = []
274
- for i in input:
275
- for j in i:
276
- new_list.append(j)
277
- return new_list
278
-
279
- def generate_until(self, requests: List[Instance]) -> List[str]:
280
- res = []
281
-
282
- def _collate(x):
283
- # the negative sign on len(toks) sorts descending - this has a few advantages:
284
- # - time estimates will always be over not underestimates, which is more useful for planning
285
- # - to know the size of a batch when going through the list, you know the first one is always the batch
286
- # padded context length. this is useful to simplify the batching logic and more importantly to make
287
- # automatic adaptive batches much much easier to implement
288
- # - any OOMs will happen right away rather than near the end
289
- toks = self.tok_encode(x[0])
290
- return -len(toks), x[0]
291
-
292
- # we group requests by their generation_kwargs,
293
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
294
- # in the same batch.
295
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
296
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
297
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
298
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
299
- for chunk in chunks:
300
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
301
- task = task[0]
302
- split = split[0]
303
- batched_visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id] # [B, N]
304
- flattened_visuals = self.flatten(batched_visuals) # [B*N]
305
- # we assume all gen kwargs in the batch are the same
306
- # this is safe to assume because the `grouper` object ensures it.
307
- gen_kwargs = all_gen_kwargs[0]
308
-
309
- # Set default values for until and max_new_tokens
310
- until = [self.tok_decode(self.eot_token_id)]
311
-
312
- # Update values from gen_kwargs if present
313
- if "until" in gen_kwargs:
314
- until = gen_kwargs.pop("until")
315
- if isinstance(until, str):
316
- until = [until]
317
- elif not isinstance(until, list):
318
- raise ValueError(f"Expected `gen_kwargs['until']` to be of type Union[str,list] but got {type(until)}")
319
-
320
- if "image_aspect_ratio" in gen_kwargs.keys() and "image_aspect_ratio" not in self._config.__dict__:
321
- # here we should pop it out of gen_kwargs so that it doesn't get passed to the model for next step of generation
322
- self._config.image_aspect_ratio = gen_kwargs.pop("image_aspect_ratio")
323
- eval_logger.info(f"Setting image aspect ratio: {self._config.image_aspect_ratio}")
324
- # encode, pad, and truncate contexts for this batch
325
- if flattened_visuals:
326
- image_tensor = process_images(flattened_visuals, self._image_processor, self._config)
327
- if type(image_tensor) is list:
328
- image_tensor = [_image.to(dtype=torch.float16, device=self.device) for _image in image_tensor]
329
- else:
330
- image_tensor = image_tensor.to(dtype=torch.float16, device=self.device)
331
- else:
332
- image_tensor = None
333
-
334
- # prompts_input = contexts[0]
335
-
336
- question_input = []
337
-
338
- for visual, context in zip(batched_visuals, contexts):
339
- if image_tensor is not None and len(image_tensor) != 0 and DEFAULT_IMAGE_TOKEN not in context:
340
- """
341
- Three senarios:
342
- 1. No image, and there for, no image token should be added.
343
- 2. image token is already specified in the context, so we don't need to add it.
344
- 3. image token is not specified in the context and there is image inputs, so we need to add it. In this case, we add the image token at the beginning of the context and add a new line.
345
- """
346
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visual) if isinstance(visual, list) else [DEFAULT_IMAGE_TOKEN]
347
- image_tokens = " ".join(image_tokens)
348
- question = image_tokens + "\n" + context
349
- else:
350
- question = context
351
- # This is much safer for llama3, as we now have some object type in it
352
- if "llama_3" in self.conv_template:
353
- conv = copy.deepcopy(conv_templates[self.conv_template])
354
- else:
355
- conv = conv_templates[self.conv_template].copy()
356
- conv.append_message(conv.roles[0], question)
357
- conv.append_message(conv.roles[1], None)
358
- prompt_question = conv.get_prompt()
359
- question_input.append(prompt_question)
360
-
361
- # input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
362
- # preconfigure gen_kwargs with defaults
363
- gen_kwargs["image_sizes"] = [flattened_visuals[idx].size for idx in range(len(flattened_visuals))]
364
- if "max_new_tokens" not in gen_kwargs:
365
- gen_kwargs["max_new_tokens"] = 1024
366
- if "temperature" not in gen_kwargs:
367
- gen_kwargs["temperature"] = 0
368
- if "top_p" not in gen_kwargs:
369
- gen_kwargs["top_p"] = None
370
- if "num_beams" not in gen_kwargs:
371
- gen_kwargs["num_beams"] = 1
372
-
373
- input_ids_list = [tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt") for prompt in question_input]
374
- pad_token_ids = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
375
- input_ids = self.pad_sequence(input_ids_list, batch_first=True, padding_value=pad_token_ids).to(self.device)
376
- attention_masks = input_ids.ne(pad_token_ids).to(self.device)
377
- # These steps are not in LLaVA's original code, but are necessary for generation to work
378
- # TODO: attention to this major generation step...
379
- try:
380
- cont = self.model.generate(
381
- input_ids,
382
- attention_mask=attention_masks,
383
- pad_token_id=pad_token_ids,
384
- images=image_tensor,
385
- image_sizes=gen_kwargs["image_sizes"],
386
- do_sample=True if gen_kwargs["temperature"] > 0 else False,
387
- temperature=gen_kwargs["temperature"],
388
- top_p=gen_kwargs["top_p"],
389
- num_beams=gen_kwargs["num_beams"],
390
- max_new_tokens=gen_kwargs["max_new_tokens"],
391
- use_cache=self.use_cache,
392
- )
393
- text_outputs = self.tokenizer.batch_decode(cont, skip_special_tokens=True)
394
- except Exception as e:
395
- raise e
396
- eval_logger.error(f"Error {e} in generating")
397
- cont = ""
398
- text_outputs = [""]
399
-
400
- # cont_toks_list = cont.tolist()
401
- # for cont_toks, context in zip(cont_toks_list, contexts):
402
- # discard context + left-padding toks if using causal decoder-only LMM
403
- # if self.truncate_context:
404
- # cont_toks = cont_toks[input_ids.shape[1] :]
405
- # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
406
- # if self.truncate_context:
407
- # for term in until:
408
- # if len(term) > 0:
409
- # # ignore '' separator,
410
- # # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
411
- # text_outputs = text_outputs.split(term)[0]
412
- res.extend(text_outputs)
413
- self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)
414
- pbar.update(1)
415
- # reorder this group of results back to original unsorted form
416
- res = re_ords.get_original(res)
417
-
418
- pbar.close()
419
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/llava_hf.py DELETED
@@ -1,341 +0,0 @@
1
- import torch
2
-
3
- from tqdm import tqdm
4
- from lmms_eval import utils
5
- from lmms_eval.api.instance import Instance
6
- from lmms_eval.api.model import lmms
7
- from lmms_eval.api.registry import register_model
8
- from accelerate import Accelerator, DistributedType
9
- from accelerate.state import AcceleratorState
10
- from typing import List, Optional, Union, Tuple
11
- from transformers import LlavaForConditionalGeneration, LlavaNextForConditionalGeneration, AutoProcessor
12
-
13
- import warnings
14
-
15
- warnings.filterwarnings("ignore")
16
-
17
- from loguru import logger as eval_logger
18
-
19
- DEFAULT_IMAGE_TOKEN = "<image>"
20
-
21
- # Default chat for llava-hf/llava-1.5 models: https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0
22
- VICUNA_CHAT_TEMPLATE = "{% for message in messages %}{% if loop.index0 == 0 %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {{ message['content'] }} {% elif message['role'] == 'user' %}USER: {{ message['content'] }} {% else %} ASSISTANT: {{ message['content'] }}{{ eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}"
23
-
24
-
25
- @register_model("llava_hf")
26
- class LlavaHf(lmms):
27
- """
28
- Llava Model for Hugging Face Transformers: https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/llava
29
-
30
- Adapted from the InstructBLIP model in lmms_eval/models/instructblip.py
31
-
32
- Example usage:
33
-
34
- accelerate launch --num_processes=8 --main_process_port 12345 -m lmms_eval \
35
- --model llava_hf \
36
- --model_args pretrained=llava-hf/llava-1.5-7b-hf \
37
- --tasks seedbench \
38
- --batch_size 1 \
39
- --output_path ./logs/ \
40
- --log_samples
41
- """
42
-
43
- def __init__(
44
- self,
45
- pretrained: str = "llava-hf/llava-1.5-7b-hf",
46
- revision: str = "main",
47
- device: str = "cuda",
48
- dtype: Optional[Union[str, torch.dtype]] = "auto",
49
- batch_size: int = 1,
50
- trust_remote_code: Optional[bool] = False,
51
- attn_implementation: Optional[str] = None,
52
- device_map: str = "",
53
- chat_template: Optional[str] = None,
54
- use_cache: bool = True,
55
- **kwargs,
56
- ) -> None:
57
- super().__init__()
58
- # Do not use kwargs for now
59
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
60
-
61
- accelerator = Accelerator()
62
- if accelerator.num_processes > 1 and device_map == "":
63
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
64
- self.device_map = f"cuda:{accelerator.local_process_index}"
65
- else:
66
- self._device = torch.device(device)
67
- self.device_map = device_map
68
- if isinstance(dtype, str) and dtype != "auto":
69
- dtype = getattr(torch, dtype)
70
-
71
- if "1.5" in pretrained:
72
- self._model = LlavaForConditionalGeneration.from_pretrained(pretrained, revision=revision, torch_dtype=dtype, device_map=self.device_map, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation)
73
- elif "1.6" in pretrained:
74
- self._model = LlavaNextForConditionalGeneration.from_pretrained(pretrained, revision=revision, torch_dtype=dtype, device_map=self.device_map, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation)
75
- else:
76
- eval_logger.info("Not sure whether you use 1.5 or 1.6. Use 1.5 by default. This might cause bugs if you are actually using 1.6")
77
- self._model = LlavaForConditionalGeneration.from_pretrained(pretrained, revision=revision, torch_dtype=dtype, device_map=self.device_map, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation)
78
-
79
- self.pretrained = pretrained
80
- self._image_processor = AutoProcessor.from_pretrained(pretrained, revision=revision, trust_remote_code=trust_remote_code)
81
- # Pad from left for batched generation: https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/llava#usage-tips
82
- self._image_processor.tokenizer.padding_side = "left"
83
- self._tokenizer = self._image_processor.tokenizer
84
- self._config = self._model.config
85
- self.batch_size_per_gpu = int(batch_size)
86
- self.chat_template = chat_template
87
- self.use_cache = use_cache
88
- if accelerator.num_processes > 1 and device_map == "":
89
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
90
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
91
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
92
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
93
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
94
- kwargs = {
95
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
96
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
97
- }
98
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
99
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
100
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
101
- self._model = accelerator.prepare(self.model)
102
- else:
103
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
104
- self.accelerator = accelerator
105
- if self.accelerator.is_local_main_process:
106
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
107
- self._rank = self.accelerator.local_process_index
108
- self._world_size = self.accelerator.num_processes
109
- elif accelerator.num_processes == 1 and device_map == "auto":
110
- eval_logger.info(f"Using {accelerator.num_processes} devices with pipeline parallelism")
111
- self._rank = 0
112
- self._word_size = 1
113
- else:
114
- eval_logger.info(f"Using single device: {self._device}")
115
- self.model.to(self._device)
116
- self._rank = 0
117
- self._word_size = 1
118
- self.accelerator = accelerator
119
-
120
- @property
121
- def config(self):
122
- # return the associated transformers.AutoConfig for the given pretrained model.
123
- return self._config
124
-
125
- @property
126
- def tokenizer(self):
127
- return self._tokenizer
128
-
129
- @property
130
- def model(self):
131
- # returns the model, unwrapping it if using Accelerate
132
- if hasattr(self, "accelerator"):
133
- return self.accelerator.unwrap_model(self._model)
134
- else:
135
- return self._model
136
-
137
- @property
138
- def eot_token_id(self):
139
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
140
- return self.tokenizer.eos_token_id
141
-
142
- @property
143
- def max_length(self):
144
- return self._max_length
145
-
146
- @property
147
- def batch_size(self):
148
- return self.batch_size_per_gpu
149
-
150
- @property
151
- def device(self):
152
- return self._device
153
-
154
- @property
155
- def rank(self):
156
- return self._rank
157
-
158
- @property
159
- def world_size(self):
160
- return self._world_size
161
-
162
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
163
- """ """
164
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
165
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
166
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
167
- if left_truncate_len:
168
- encoding = encoding[-left_truncate_len:]
169
- return encoding
170
-
171
- def tok_decode(self, tokens):
172
- return self.tokenizer.decode(tokens)
173
-
174
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
175
- res = []
176
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
177
-
178
- for context, doc_to_target, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
179
- # encode, pad, and truncate contexts for this batch
180
- if type(doc_to_target) == str:
181
- continuation = doc_to_target
182
- else:
183
- continuation = doc_to_target(self.task_dict[task][split][doc_id])
184
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
185
- visuals = self.flatten(visuals)
186
-
187
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
188
- image_tokens = " ".join(image_tokens)
189
- context = f"{image_tokens}\n{context}"
190
- # Apply chat template
191
- messages = [{"role": "user", "content": context}, {"role": "assistant", "content": continuation}]
192
- if self.chat_template is not None:
193
- self.tokenizer.chat_template = self.chat_template
194
- prompt = self.tokenizer.apply_chat_template(messages[:-1], tokenize=False, add_generation_prompt=True)
195
- prompt_and_continuation = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
196
- elif self.tokenizer.chat_template is not None:
197
- prompt = self.tokenizer.apply_chat_template(messages[:-1], tokenize=False, add_generation_prompt=True)
198
- prompt_and_continuation = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
199
- else:
200
- self.tokenizer.chat_template = VICUNA_CHAT_TEMPLATE
201
- prompt = self.tokenizer.apply_chat_template(messages[:-1], tokenize=False, add_generation_prompt=True)
202
- prompt_and_continuation = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
203
-
204
- formatted_contexts = [prompt]
205
- formatted_continuation = [prompt_and_continuation]
206
- model_inputs = self._image_processor(text=formatted_continuation, images=visuals).to(self._device, self.model.dtype)
207
- labels = model_inputs["input_ids"].clone()
208
- contxt_id = self._image_processor(text=formatted_contexts, return_tensors="pt")["input_ids"]
209
- labels[: len(contxt_id)] = -100
210
-
211
- if self.accelerator.is_main_process and doc_id % 100 == 0:
212
- eval_logger.debug(f"Prompt for doc ID {doc_id}:\n\n{formatted_contexts[0]}\n")
213
- eval_logger.debug(f"Prompt and continuation for doc ID {doc_id}:\n\n{formatted_continuation[0]}\n")
214
-
215
- with torch.inference_mode():
216
- outputs = self.model(**model_inputs, labels=labels)
217
- loss = outputs["loss"]
218
- logits = outputs["logits"]
219
- greedy_tokens = logits.argmax(dim=-1)
220
- cont_toks = model_inputs["input_ids"][:, contxt_id.shape[1] :] # [1, seq]
221
- greedy_tokens = greedy_tokens[:, contxt_id.shape[1] : model_inputs["input_ids"].shape[1]] # [1, seq]
222
- max_equal = (greedy_tokens == cont_toks).all()
223
- res.append((float(loss.item()), bool(max_equal)))
224
- pbar.update(1)
225
-
226
- pbar.close()
227
- return res
228
-
229
- def flatten(self, input):
230
- new_list = []
231
- for i in input:
232
- for j in i:
233
- new_list.append(j)
234
- return new_list
235
-
236
- def generate_until(self, requests: List[Instance]) -> List[str]:
237
- res = []
238
-
239
- def _collate(x):
240
- # the negative sign on len(toks) sorts descending - this has a few advantages:
241
- # - time estimates will always be over not underestimates, which is more useful for planning
242
- # - to know the size of a batch when going through the list, you know the first one is always the batch
243
- # padded context length. this is useful to simplify the batching logic and more importantly to make
244
- # automatic adaptive batches much much easier to implement
245
- # - any OOMs will happen right away rather than near the end
246
- toks = self.tok_encode(x[0])
247
- return -len(toks), x[0]
248
-
249
- # we group requests by their generation_kwargs,
250
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
251
- # in the same batch.
252
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
253
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
254
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
255
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
256
- for chunk in chunks:
257
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
258
- task = task[0]
259
- split = split[0]
260
- visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id]
261
- visuals = self.flatten(visuals)
262
- # we assume all gen kwargs in the batch are the same
263
- # this is safe to assume because the `grouper` object ensures it.
264
- gen_kwargs = all_gen_kwargs[0]
265
-
266
- # Set default values for until and max_new_tokens
267
- until = [self.tok_decode(self.eot_token_id)]
268
-
269
- # Update values from gen_kwargs if present
270
- if "until" in gen_kwargs:
271
- until = gen_kwargs.pop("until")
272
- if isinstance(until, str):
273
- until = [until]
274
- elif not isinstance(until, list):
275
- raise ValueError(f"Expected `gen_kwargs['until']` to be of type Union[str,list] but got {type(until)}")
276
- assert self.batch_size_per_gpu == 1, "Do not support batch_size_per_gpu > 1 for now"
277
- context = contexts[0]
278
-
279
- # Some benchmarks like MME do not contain image tokens, so we prepend them to the prompt.
280
- if DEFAULT_IMAGE_TOKEN not in context:
281
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
282
- image_tokens = " ".join(image_tokens)
283
- context = f"{image_tokens}\n{context}"
284
- # Apply chat template
285
- messages = [{"role": "user", "content": context}]
286
- if self.chat_template is not None:
287
- self.tokenizer.chat_template = self.chat_template
288
- text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
289
- elif self.tokenizer.chat_template is not None:
290
- text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
291
- else:
292
- self.tokenizer.chat_template = VICUNA_CHAT_TEMPLATE
293
- text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
294
-
295
- if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
296
- eval_logger.debug(f"Prompt for doc ID {doc_id[0]}:\n\n{text}\n")
297
-
298
- inputs = self._image_processor(images=visuals, text=text, return_tensors="pt").to(self._device, self.model.dtype)
299
-
300
- gen_kwargs["image_sizes"] = [visuals[idx].size for idx in range(len(visuals))]
301
- if "max_new_tokens" not in gen_kwargs:
302
- gen_kwargs["max_new_tokens"] = 1024
303
- if "temperature" not in gen_kwargs:
304
- gen_kwargs["temperature"] = 0
305
- if "top_p" not in gen_kwargs:
306
- gen_kwargs["top_p"] = None
307
- if "num_beams" not in gen_kwargs:
308
- gen_kwargs["num_beams"] = 1
309
- try:
310
- cont = self.model.generate(
311
- **inputs,
312
- do_sample=True if gen_kwargs["temperature"] > 0 else False,
313
- temperature=gen_kwargs["temperature"],
314
- top_p=gen_kwargs["top_p"],
315
- num_beams=gen_kwargs["num_beams"],
316
- max_new_tokens=gen_kwargs["max_new_tokens"],
317
- use_cache=self.use_cache,
318
- pad_token_id=self.tokenizer.eos_token_id,
319
- )
320
- except Exception as e:
321
- eval_logger.error(f"Error {e} in generating")
322
- cont = ""
323
- text_outputs = self.tokenizer.batch_decode(cont, skip_special_tokens=True)[0]
324
- if "1.5" in self.pretrained:
325
- text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()
326
- elif "mistral" in self.pretrained:
327
- text_outputs = text_outputs.split("[/INST]")[-1].strip()
328
- else:
329
- text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()
330
-
331
- if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
332
- eval_logger.debug(f"Generated text for doc ID {doc_id[0]}:\n\n{text_outputs}\n")
333
-
334
- res.append(text_outputs)
335
- self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)
336
- pbar.update(1)
337
- # reorder this group of results back to original unsorted form
338
- res = re_ords.get_original(res)
339
-
340
- pbar.close()
341
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/llava_sglang.py DELETED
@@ -1,161 +0,0 @@
1
- import torch
2
- import random
3
-
4
- torch.backends.cuda.matmul.allow_tf32 = True
5
-
6
-
7
- from tqdm import tqdm
8
- from datetime import timedelta
9
-
10
- from lmms_eval import utils
11
- from lmms_eval.api.instance import Instance
12
- from lmms_eval.api.model import lmms
13
- from lmms_eval.api.registry import register_model
14
-
15
- from typing import List, Optional, Union, Tuple
16
- import warnings
17
-
18
- warnings.filterwarnings("ignore")
19
- from concurrent.futures import ThreadPoolExecutor, as_completed
20
- import tempfile
21
-
22
- from loguru import logger as eval_logger
23
-
24
- try:
25
- import sglang as sgl
26
- from sglang.lang.chat_template import get_chat_template
27
- except ImportError:
28
- eval_logger.debug("SGLang is not installed. If you want to use llava_sglang, please install it using pip install 'sglang[all]' ")
29
-
30
- if torch.__version__ > "2.1.2":
31
- best_fit_attn_implementation = "sdpa"
32
- else:
33
- best_fit_attn_implementation = "eager"
34
-
35
-
36
- @register_model("llava_sglang")
37
- class LlavaSglang(lmms):
38
- """
39
- Llava Sglang Model
40
- """
41
-
42
- def __init__(
43
- self,
44
- pretrained: str = "liuhaotian/llava-v1.5-7b",
45
- tokenizer: str = "llava-hf/llava-1.5-7b-hf",
46
- tp_size: int = 1,
47
- parallel: Optional[Union[int, str]] = 64,
48
- conv_template="vicuna_v1.1",
49
- **kwargs,
50
- ) -> None:
51
- super().__init__()
52
- self.pretrained = pretrained
53
- self.tokenizer = tokenizer
54
- self.tp_size = tp_size
55
- self.conv_template = conv_template
56
- # torch.multiprocessing.set_start_method("spawn")
57
-
58
- # accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
59
- # accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
60
- # assert accelerator.num_processes == 1, "Llava-sglang does not support multi-processes yet (it does support tensor parallelism)."
61
- self._rank = 0
62
- self._world_size = 1
63
- self.parallel = parallel
64
-
65
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
66
- raise NotImplementedError("Llava-sglang does not support loglikelihood evaluation yet")
67
-
68
- def generate_until(self, requests: List[Instance]) -> List[str]:
69
- torch.multiprocessing.set_start_method("spawn", force=True)
70
- runtime = sgl.Runtime(model_path=self.pretrained, tokenizer_path=self.tokenizer, tp_size=self.tp_size, port=random.randint(10000, 50000))
71
- runtime.endpoint.chat_template = get_chat_template(self.conv_template)
72
- sgl.set_default_backend(runtime)
73
-
74
- @sgl.function
75
- def image_qa(s, image_file, question):
76
- s += sgl.user(sgl.image(image_file) + question)
77
- s += sgl.assistant(sgl.gen("answer"))
78
-
79
- res = []
80
-
81
- def _collate(x):
82
- # the negative sign on len(toks) sorts descending - this has a few advantages:
83
- # - time estimates will always be over not underestimates, which is more useful for planning
84
- # - to know the size of a batch when going through the list, you know the first one is always the batch
85
- # padded context length. this is useful to simplify the batching logic and more importantly to make
86
- # automatic adaptive batches much much easier to implement
87
- # - any OOMs will happen right away rather than near the end
88
- toks = x[0].split(" ")
89
- return -len(toks), x[0]
90
-
91
- # we group requests by their generation_kwargs,
92
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
93
- # in the same batch.
94
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
95
- chunks = re_ords.get_batched(n=self.parallel, batch_fn=None)
96
- num_iters = len(requests) // self.parallel if len(requests) % self.parallel == 0 else len(requests) // self.parallel + 1
97
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
98
- for chunk in chunks:
99
- contexts, all_gen_kwargs, doc_to_visuals, doc_id, tasks, splits = zip(*chunk)
100
- batched_visuals = [doc_to_visual(self.task_dict[task][split][ids]) for ids, task, split, doc_to_visual in zip(doc_id, tasks, splits, doc_to_visuals)] # [B, N]
101
- # we assume all gen kwargs in the batch are the same
102
- # this is safe to assume because the `grouper` object ensures it.
103
- gen_kwargs = all_gen_kwargs[0]
104
- if "max_new_tokens" not in gen_kwargs:
105
- gen_kwargs["max_new_tokens"] = 1024
106
- if "temperature" not in gen_kwargs:
107
- gen_kwargs["temperature"] = 0
108
- if "top_p" not in gen_kwargs:
109
- gen_kwargs["top_p"] = 1.0
110
- if "num_beams" not in gen_kwargs:
111
- gen_kwargs["num_beams"] = 1
112
- assert gen_kwargs["num_beams"] == 1
113
-
114
- def save_image_to_temp_file(image):
115
- temp_file = tempfile.NamedTemporaryFile(suffix=".jpeg", delete=True)
116
- image.save(temp_file.name)
117
- return temp_file
118
-
119
- def prepare_arguments_parallel(contexts, batched_visuals, max_workers=64):
120
- arguments = [None] * len(contexts) # Initialize with placeholders
121
- tmp_files = [None] * len(contexts) # Initialize with placeholders
122
-
123
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
124
- # Associate each future with its index and content
125
- future_to_info = {executor.submit(save_image_to_temp_file, pil_list[0]): (index, context, pil_list) for index, (context, pil_list) in enumerate(zip(contexts, batched_visuals))}
126
-
127
- for future in as_completed(future_to_info):
128
- index, context, pil_list = future_to_info[future]
129
- if len(pil_list) > 1:
130
- eval_logger.warning("Llava-sglang only supports one visual input per question. Using the first visual input.")
131
- try:
132
- temp_file = future.result()
133
- arguments[index] = {
134
- "image_file": temp_file.name,
135
- "question": context,
136
- }
137
- tmp_files[index] = temp_file
138
- except Exception as exc:
139
- print(f"Generated an exception: {exc}")
140
-
141
- # Filter out any None values in case of exceptions
142
- arguments = [arg for arg in arguments if arg is not None]
143
- tmp_files = [tmp_file for tmp_file in tmp_files if tmp_file is not None]
144
-
145
- return arguments, tmp_files
146
-
147
- arguments, tmp_files = prepare_arguments_parallel(contexts, batched_visuals, self.parallel)
148
- states = image_qa.run_batch(arguments, temperature=gen_kwargs["temperature"], max_new_tokens=gen_kwargs["max_new_tokens"], top_p=gen_kwargs["top_p"], num_threads=self.parallel, progress_bar=False)
149
-
150
- text_outputs = [state["answer"].strip() for state in states]
151
- # clean up the temporary files
152
- for tmp_file in tmp_files:
153
- tmp_file.close()
154
- res.extend(text_outputs)
155
- pbar.update(1)
156
- # reorder this group of results back to original unsorted form
157
- res = re_ords.get_original(res)
158
-
159
- pbar.close()
160
- runtime.shutdown()
161
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/llava_vid.py DELETED
@@ -1,404 +0,0 @@
1
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
2
- from accelerate.state import AcceleratorState
3
- from typing import List, Optional, Union, Tuple
4
- import torch
5
- from tqdm import tqdm
6
- from decord import VideoReader, cpu
7
- import numpy as np
8
- import math
9
- from datetime import timedelta
10
- from transformers import AutoConfig
11
- import copy
12
-
13
- from lmms_eval.api.instance import Instance
14
- from lmms_eval.api.model import lmms
15
- from lmms_eval.api.registry import register_model
16
- from lmms_eval.models.model_utils.load_video import read_video_pyav
17
-
18
- from loguru import logger as eval_logger
19
-
20
- try:
21
- from llavavid.model.builder import load_pretrained_model
22
- from llavavid.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
23
- from llavavid.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX
24
- from llavavid.conversation import conv_templates, SeparatorStyle
25
- from llavavid.mm_utils import tokenizer_image_token_qwen_merge, preprocess_qwen, preprocess_llama3
26
- except ImportError:
27
- eval_logger.debug("LLaVA-Video is not installed. Please install LLaVA-Video to use this model.")
28
-
29
- from llavavid.model.language_model.llava_qwen import LlavaQwenConfig
30
- from llavavid.model.language_model.llava_llama import LlavaConfig
31
-
32
- AutoConfig.register("llava_qwen", LlavaQwenConfig)
33
- AutoConfig.register("llava_llama", LlavaConfig)
34
-
35
-
36
- @register_model("llavavid")
37
- class LlavaVid(lmms):
38
- """
39
- LlavaVid Model
40
- """
41
-
42
- def __init__(
43
- self,
44
- pretrained: str = "liuhaotian/llava-v1.5-7b",
45
- truncation: Optional[bool] = True,
46
- device: Optional[str] = "cuda:0",
47
- batch_size: Optional[Union[int, str]] = 1,
48
- attn_implementation=(
49
- "sdpa" if torch.__version__ >= "2.1.2" else "eager"
50
- ), # inference implementation for attention, can be "sdpa", "eager", "flash_attention_2". Seems FA2 is not effective during inference: https://discuss.huggingface.co/t/flash-attention-has-no-effect-on-inference/73453/5
51
- device_map="cuda:0",
52
- conv_template="vicuna_v1",
53
- use_cache=True,
54
- truncate_context=False, # whether to truncate the context in generation, set it False for LLaVA-1.6
55
- max_frames_num: int = 3,
56
- mm_resampler_type: str = "spatial_pool",
57
- mm_spatial_pool_stride: int = 2,
58
- mm_spatial_pool_out_channels: int = 1024,
59
- mm_spatial_pool_mode: str = "average",
60
- overwrite: bool = True,
61
- video_decode_backend: str = "pyav",
62
- **kwargs,
63
- ) -> None:
64
- super().__init__()
65
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
66
-
67
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
68
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
69
- if accelerator.num_processes > 1:
70
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
71
- self.device_map = f"cuda:{accelerator.local_process_index}"
72
- elif accelerator.num_processes == 1 and device_map == "auto":
73
- self._device = torch.device(device)
74
- self.device_map = device_map
75
- else:
76
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
77
- self.device_map = f"cuda:{accelerator.local_process_index}"
78
-
79
- self.pretrained = pretrained
80
- self.model_name = get_model_name_from_path(pretrained)
81
- self.video_decode_backend = video_decode_backend
82
- # self._config = AutoConfig.from_pretrained(self.pretrained)
83
- self.overwrite = overwrite
84
- self.mm_resampler_type = mm_resampler_type
85
- self.mm_spatial_pool_stride = int(mm_spatial_pool_stride)
86
- self.mm_spatial_pool_out_channels = int(mm_spatial_pool_out_channels)
87
- self.mm_spatial_pool_mode = mm_spatial_pool_mode
88
- self.max_frames_num = int(max_frames_num)
89
- if self.overwrite == True:
90
- overwrite_config = {}
91
- overwrite_config["mm_resampler_type"] = self.mm_resampler_type
92
- overwrite_config["mm_spatial_pool_stride"] = self.mm_spatial_pool_stride
93
- overwrite_config["mm_spatial_pool_out_channels"] = self.mm_spatial_pool_out_channels
94
- overwrite_config["mm_spatial_pool_mode"] = self.mm_spatial_pool_mode
95
- overwrite_config["mm_resampler_location"] = "before"
96
- overwrite_config["patchify_video_feature"] = False
97
- overwrite_config["attn_implementation"] = attn_implementation
98
-
99
- cfg_pretrained = AutoConfig.from_pretrained(self.pretrained)
100
-
101
- if cfg_pretrained.architectures[0] == "LlavaLlamaForCausalLM": # Ugly code, only used in vicuna that needs ROPE
102
- if "224" in cfg_pretrained.mm_vision_tower:
103
- least_token_number = self.max_frames_num * (16 // self.mm_spatial_pool_stride) ** 2 + 1000
104
- else:
105
- least_token_number = self.max_frames_num * (24 // self.mm_spatial_pool_stride) ** 2 + 1000
106
-
107
- scaling_factor = math.ceil(least_token_number / 4096)
108
- if scaling_factor >= 2:
109
- overwrite_config["rope_scaling"] = {"factor": float(scaling_factor), "type": "linear"}
110
- overwrite_config["max_sequence_length"] = 4096 * scaling_factor
111
- overwrite_config["tokenizer_model_max_length"] = 4096 * scaling_factor
112
-
113
- if "v1.5" in pretrained: # A hardcode solution here to load v1.5 model, otherwise it will use LlavaConfig from hf transformers
114
- from transformers import AutoTokenizer
115
- from llavavid.model.language_model.llava_llama import LlavaConfig, LlavaLlamaForCausalLM
116
-
117
- self._tokenizer = AutoTokenizer.from_pretrained(pretrained, use_fast=False)
118
- cfg_pretrained = LlavaConfig.from_pretrained(pretrained)
119
- if overwrite_config is not None:
120
- print(f"Overwriting config with {overwrite_config}")
121
- for k, v in overwrite_config.items():
122
- setattr(cfg_pretrained, k, v)
123
- kwargs["torch_dtype"] = torch.float16
124
- self._model = LlavaLlamaForCausalLM.from_pretrained(pretrained, low_cpu_mem_usage=True, config=cfg_pretrained, device_map=self.device_map, **kwargs)
125
- vision_tower = self._model.get_vision_tower()
126
- if not vision_tower.is_loaded:
127
- vision_tower.load_model(device_map=self.device_map)
128
- if self.device_map != "auto":
129
- vision_tower.to(device="cuda", dtype=torch.float16)
130
- self._image_processor = vision_tower.image_processor
131
-
132
- if hasattr(self._model.config, "max_sequence_length"):
133
- self._max_length = self._model.config.max_sequence_length
134
- else:
135
- self._max_length = 2048
136
- else:
137
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, self.model_name, device_map=self.device_map, overwrite_config=overwrite_config)
138
- else:
139
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(
140
- pretrained,
141
- None,
142
- self.model_name,
143
- device_map=self.device_map,
144
- )
145
-
146
- self._config = self._model.config
147
- self.model.eval()
148
- self.model.tie_weights()
149
- self.truncation = truncation
150
- self.batch_size_per_gpu = int(batch_size)
151
- self.conv_template = conv_template
152
- self.use_cache = use_cache
153
- self.truncate_context = truncate_context
154
- # assert self.batch_size_per_gpu == 1, "Llava currently does not support batched generation. See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue."
155
- if accelerator.num_processes > 1:
156
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
157
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
158
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
159
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
160
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
161
- kwargs = {
162
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
163
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
164
- }
165
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
166
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
167
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
168
- self._model = accelerator.prepare(self.model)
169
- else:
170
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
171
- self.accelerator = accelerator
172
- if self.accelerator.is_local_main_process:
173
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
174
- self._rank = self.accelerator.local_process_index
175
- self._world_size = self.accelerator.num_processes
176
- elif accelerator.num_processes == 1 and device_map == "auto":
177
- eval_logger.info(f"Using {accelerator.num_processes} devices with tensor parallelism")
178
- self._rank = 0
179
- self._word_size = 1
180
- else:
181
- eval_logger.info(f"Using single device: {self._device}")
182
- self.model.to(self._device)
183
- self._rank = 0
184
- self._world_size = 1
185
-
186
- @property
187
- def config(self):
188
- # return the associated transformers.AutoConfig for the given pretrained model.
189
- return self._config
190
-
191
- @property
192
- def tokenizer(self):
193
- return self._tokenizer
194
-
195
- @property
196
- def model(self):
197
- # returns the model, unwrapping it if using Accelerate
198
- if hasattr(self, "accelerator"):
199
- return self.accelerator.unwrap_model(self._model)
200
- else:
201
- return self._model
202
-
203
- @property
204
- def eot_token_id(self):
205
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
206
- return self.tokenizer.eos_token_id
207
-
208
- @property
209
- def max_length(self):
210
- return self._max_length
211
-
212
- def pad_sequence(self, input_ids, batch_first, padding_value):
213
- if self.tokenizer.padding_side == "left":
214
- input_ids = [torch.flip(_input_ids, [0]) for _input_ids in input_ids]
215
- input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=batch_first, padding_value=padding_value)
216
- if self.tokenizer.padding_side == "left":
217
- input_ids = torch.flip(input_ids, [1])
218
- return input_ids
219
-
220
- @property
221
- def batch_size(self):
222
- return self.batch_size_per_gpu
223
-
224
- @property
225
- def device(self):
226
- return self._device
227
-
228
- @property
229
- def rank(self):
230
- return self._rank
231
-
232
- @property
233
- def world_size(self):
234
- return self._world_size
235
-
236
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
237
- """ """
238
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
239
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
240
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
241
- if left_truncate_len:
242
- encoding = encoding[-left_truncate_len:]
243
- return encoding
244
-
245
- def load_video(self, video_path, max_frames_num):
246
- vr = VideoReader(video_path, ctx=cpu(0))
247
- total_frame_num = len(vr)
248
- # fps = round(vr.get_avg_fps())
249
- # frame_idx = [i for i in range(0, len(vr), fps)]
250
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
251
- frame_idx = uniform_sampled_frames.tolist()
252
- spare_frames = vr.get_batch(frame_idx).asnumpy()
253
- return spare_frames # (frames, height, width, channels)
254
-
255
- def tok_decode(self, tokens):
256
- return self.tokenizer.decode(tokens)
257
-
258
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
259
- res = []
260
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
261
-
262
- for contexts, doc_to_target, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
263
- # encode, pad, and truncate contexts for this batch
264
- if type(doc_to_target) == str:
265
- continuation = doc_to_target
266
- else:
267
- continuation = doc_to_target(self.task_dict[task][split][doc_id])
268
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
269
- visuals = self.flatten(visuals)
270
- videos = []
271
- for visual in visuals:
272
- video = self.load_video(visual, self.max_frames_num)
273
- video = self._image_processor.preprocess(video, return_tensors="pt")["pixel_values"].half().cuda()
274
- videos.append(video)
275
-
276
- qs = contexts
277
- if self.model.config.mm_use_im_start_end:
278
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "\n" + qs
279
- else:
280
- qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
281
-
282
- conv = conv_templates[self.conv_template].copy()
283
- conv.append_message(conv.roles[0], qs)
284
- conv.append_message(conv.roles[1], None)
285
- prompt = conv.get_prompt()
286
-
287
- contxt_id = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
288
-
289
- conv = conv_templates[self.conv_template].copy()
290
- conv.append_message(conv.roles[0], qs)
291
- conv.append_message(conv.roles[1], continuation)
292
- prompt = conv.get_prompt()
293
-
294
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
295
- attention_masks = input_ids.ne(self.tokenizer.pad_token_id).long().cuda()
296
-
297
- labels = input_ids.clone()
298
- # Context part no need to calculate for loss
299
- labels[0, : contxt_id.shape[1]] = -100
300
-
301
- with torch.inference_mode():
302
- outputs = self.model(input_ids=input_ids, labels=labels, images=videos, modalities="video")
303
-
304
- loss = outputs["loss"]
305
- # loss = torch.exp(loss)
306
- logits = outputs["logits"]
307
- greedy_tokens = logits.argmax(dim=-1)
308
- cont_toks = input_ids[:, contxt_id.shape[1] :] # [1, seq]
309
- greedy_tokens = greedy_tokens[:, contxt_id.shape[1] : input_ids.shape[1]] # [1, seq]
310
- max_equal = (greedy_tokens == cont_toks).all()
311
- res.append((float(loss.item()), bool(max_equal)))
312
- pbar.update(1)
313
- pbar.close()
314
- return res
315
-
316
- def flatten(self, input):
317
- new_list = []
318
- for i in input:
319
- for j in i:
320
- new_list.append(j)
321
- return new_list
322
-
323
- def generate_until(self, requests) -> List[str]:
324
- res = []
325
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
326
-
327
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
328
- # encode, pad, and truncate contexts for this batch
329
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
330
- visuals = self.flatten(visuals)
331
- videos = []
332
- try:
333
- for visual in visuals:
334
- if self.video_decode_backend == "decord":
335
- video = self.load_video(visual, self.max_frames_num)
336
- elif self.video_decode_backend == "pyav":
337
- video = read_video_pyav(visual, num_frm=self.max_frames_num)
338
- # video = self.load_video(visual, self.max_frames_num)
339
- video = self._image_processor.preprocess(video, return_tensors="pt")["pixel_values"].half().cuda()
340
- videos.append(video)
341
- except Exception as e:
342
- eval_logger.info(f"{e}")
343
- eval_logger.info(f"Video {visuals} can not load, check the source")
344
- video_path = "\n".join(visuals)
345
- res.append(f"Video {video_path} can not load, check the source")
346
- pbar.update(1)
347
- continue
348
-
349
- qs = contexts
350
- if self.model.config.mm_use_im_start_end:
351
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "\n" + qs
352
- else:
353
- qs = DEFAULT_IMAGE_TOKEN * len(videos) + "\n" + qs
354
-
355
- # This is much safer for llama3, as we now have some object type in it
356
- if "llama_3" in self.conv_template:
357
- conv = copy.deepcopy(conv_templates[self.conv_template])
358
- else:
359
- conv = conv_templates[self.conv_template].copy()
360
-
361
- conv.append_message(conv.roles[0], qs)
362
- conv.append_message(conv.roles[1], None)
363
- prompt = conv.get_prompt()
364
-
365
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
366
- pad_token_ids = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
367
- if "llama_3" in self.conv_template:
368
- pad_token_ids = 0 # lmms-lab/llama3-llava-8b is trained on this pad token id. You may need to customize this for other models.
369
- attention_masks = input_ids.ne(pad_token_ids).long().cuda()
370
-
371
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
372
- keywords = [stop_str]
373
- stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)
374
-
375
- cur_prompt = contexts
376
-
377
- if "max_new_tokens" not in gen_kwargs:
378
- gen_kwargs["max_new_tokens"] = 1024
379
- if "temperature" not in gen_kwargs:
380
- gen_kwargs["temperature"] = 0
381
- if "top_p" not in gen_kwargs:
382
- gen_kwargs["top_p"] = None
383
- if "num_beams" not in gen_kwargs:
384
- gen_kwargs["num_beams"] = 1
385
- with torch.inference_mode():
386
- output_ids = self.model.generate(
387
- inputs=input_ids,
388
- images=videos,
389
- attention_mask=attention_masks,
390
- modalities="video",
391
- use_cache=self.use_cache,
392
- stopping_criteria=[stopping_criteria],
393
- do_sample=True if gen_kwargs["temperature"] > 0 else False,
394
- temperature=gen_kwargs["temperature"],
395
- top_p=gen_kwargs["top_p"],
396
- num_beams=gen_kwargs["num_beams"],
397
- max_new_tokens=gen_kwargs["max_new_tokens"],
398
- )
399
- # output_ids = model.generate(inputs=input_ids, images=video, attention_mask=attention_masks, modalities="video", do_sample=True, temperature=0.2, use_cache=True, stopping_criteria=[stopping_criteria])
400
-
401
- outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
402
- res.append(outputs)
403
- pbar.update(1)
404
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/longva.py DELETED
@@ -1,462 +0,0 @@
1
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
2
- from accelerate.state import AcceleratorState
3
- from transformers import AutoConfig
4
-
5
- import math
6
- import torch
7
-
8
- torch.backends.cuda.matmul.allow_tf32 = True
9
-
10
- from tqdm import tqdm
11
- from datetime import timedelta
12
- from decord import VideoReader, cpu
13
- import numpy as np
14
-
15
- import copy
16
- import PIL
17
- from typing import List, Optional, Union, Tuple
18
- from packaging import version
19
- import warnings
20
- import logging
21
-
22
- warnings.filterwarnings("ignore")
23
-
24
- eval_logger = logging.getLogger("lmms-eval")
25
-
26
- from lmms_eval import utils
27
- from lmms_eval.api.instance import Instance
28
- from lmms_eval.api.model import lmms
29
- from lmms_eval.api.registry import register_model
30
- from lmms_eval.models.model_utils.load_video import read_video_pyav
31
-
32
- try:
33
- from longva.model.builder import load_pretrained_model
34
- from longva.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token, KeywordsStoppingCriteria
35
- from longva.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX
36
- from longva.conversation import conv_templates, SeparatorStyle
37
-
38
- except Exception as e:
39
- eval_logger.debug("longva is not installed. Please install longva to use this model.\nError: %s" % e)
40
-
41
- # inference implementation for attention, can be "sdpa", "eager", "flash_attention_2". Seems FA2 is not effective during inference: https://discuss.huggingface.co/t/flash-attention-has-no-effect-on-inference/73453/5
42
- # if is_flash_attn_2_available:
43
- # best_fit_attn_implementation = "flash_attention_2" # flash_attn has a bug that says: ERROR Error query and key must have the same dtype in generating
44
-
45
- if version.parse(torch.__version__) >= version.parse("2.1.2"):
46
- best_fit_attn_implementation = "sdpa"
47
- else:
48
- best_fit_attn_implementation = "eager"
49
-
50
-
51
- @register_model("longva")
52
- class LongVA(lmms):
53
-
54
- def __init__(
55
- self,
56
- pretrained: str = "lmms-lab/LongVA-7B",
57
- truncation: Optional[bool] = True,
58
- device: Optional[str] = "cuda:0",
59
- batch_size: Optional[Union[int, str]] = 1,
60
- model_name: Optional[str] = None,
61
- attn_implementation: Optional[str] = best_fit_attn_implementation,
62
- device_map: Optional[str] = "cuda:0",
63
- conv_template: Optional[str] = "vicuna_v1",
64
- use_cache: Optional[bool] = True,
65
- truncate_context: Optional[bool] = False, # whether to truncate the context in generation, set it False for LLaVA-1.6
66
- customized_config: Optional[str] = None, # ends in json
67
- max_frames_num: Optional[int] = 32,
68
- mm_spatial_pool_stride: Optional[int] = 2,
69
- mm_spatial_pool_mode: Optional[str] = "average",
70
- token_strategy: Optional[str] = "single", # could be "single" or "multiple", "multiple" denotes adding multiple <image> tokens for each frame
71
- video_decode_backend: str = "pyav",
72
- **kwargs,
73
- ) -> None:
74
- super().__init__()
75
- # Do not use kwargs for now
76
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
77
-
78
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
79
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
80
- if accelerator.num_processes > 1:
81
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
82
- self.device_map = f"cuda:{accelerator.local_process_index}"
83
- elif accelerator.num_processes == 1 and device_map == "auto":
84
- self._device = torch.device(device)
85
- self.device_map = device_map
86
- else:
87
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
88
- self.device_map = f"cuda:{accelerator.local_process_index}"
89
-
90
- llava_model_args = {
91
- "multimodal": True,
92
- }
93
- if customized_config is not None:
94
- llava_model_args["customized_config"] = customized_config
95
- if attn_implementation is not None:
96
- llava_model_args["attn_implementation"] = attn_implementation
97
- if "use_flash_attention_2" in kwargs:
98
- llava_model_args["use_flash_attention_2"] = kwargs["use_flash_attention_2"]
99
- model_name = model_name if model_name is not None else get_model_name_from_path(pretrained)
100
-
101
- self.pretrained = pretrained
102
- self.token_strategy = token_strategy
103
- self.max_frames_num = max_frames_num
104
- self.mm_spatial_pool_stride = mm_spatial_pool_stride
105
- self.mm_spatial_pool_mode = mm_spatial_pool_mode
106
- self.video_decode_backend = video_decode_backend
107
-
108
- overwrite_config = {}
109
- overwrite_config["mm_spatial_pool_stride"] = self.mm_spatial_pool_stride
110
- overwrite_config["mm_spatial_pool_mode"] = self.mm_spatial_pool_mode
111
- cfg_pretrained = AutoConfig.from_pretrained(self.pretrained)
112
-
113
- llava_model_args["overwrite_config"] = overwrite_config
114
- try:
115
- # Try to load the model with the multimodal argument
116
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
117
- except TypeError:
118
- # for older versions of LLaVA that don't have multimodal argument
119
- llava_model_args.pop("multimodal", None)
120
- self._tokenizer, self._model, self._image_processor, self._max_length = load_pretrained_model(pretrained, None, model_name, device_map=self.device_map, **llava_model_args)
121
-
122
- self._config = self._model.config
123
- self.model.eval()
124
- self.model.tie_weights()
125
- self.truncation = truncation
126
- self.batch_size_per_gpu = int(batch_size)
127
- self.conv_template = conv_template
128
- self.use_cache = use_cache
129
- self.truncate_context = truncate_context
130
- assert self.batch_size_per_gpu == 1, "Llava currently does not support batched generation. See https://github.com/haotian-liu/LLaVA/issues/754. HF Llava also has this issue."
131
-
132
- if accelerator.num_processes > 1:
133
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
134
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
135
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
136
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
137
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
138
- kwargs = {
139
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
140
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
141
- }
142
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
143
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
144
-
145
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
146
- self._model = accelerator.prepare(self.model)
147
- else:
148
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
149
- self.accelerator = accelerator
150
- if self.accelerator.is_local_main_process:
151
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
152
- self._rank = self.accelerator.local_process_index
153
- self._world_size = self.accelerator.num_processes
154
-
155
- elif accelerator.num_processes == 1 and device_map == "auto":
156
- eval_logger.info(f"Using {accelerator.num_processes} devices with tensor parallelism")
157
- self._rank = 0
158
- self._word_size = 1
159
-
160
- else:
161
- eval_logger.info(f"Using single device: {self._device}")
162
- self.model.to(self._device)
163
- self._rank = 0
164
- self._world_size = 1
165
-
166
- @property
167
- def config(self):
168
- # return the associated transformers.AutoConfig for the given pretrained model.
169
- return self._config
170
-
171
- @property
172
- def tokenizer(self):
173
- return self._tokenizer
174
-
175
- @property
176
- def model(self):
177
- # returns the model, unwrapping it if using Accelerate
178
- if hasattr(self, "accelerator"):
179
- return self.accelerator.unwrap_model(self._model)
180
- else:
181
- return self._model
182
-
183
- @property
184
- def eot_token_id(self):
185
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
186
- return self.tokenizer.eos_token_id
187
-
188
- @property
189
- def max_length(self):
190
- return self._max_length
191
-
192
- def pad_sequence(self, input_ids, batch_first, padding_value):
193
- if self.tokenizer.padding_side == "left":
194
- input_ids = [torch.flip(_input_ids, [0]) for _input_ids in input_ids]
195
- input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=batch_first, padding_value=padding_value)
196
- if self.tokenizer.padding_side == "left":
197
- input_ids = torch.flip(input_ids, [1])
198
- return input_ids
199
-
200
- @property
201
- def batch_size(self):
202
- return self.batch_size_per_gpu
203
-
204
- @property
205
- def device(self):
206
- return self._device
207
-
208
- @property
209
- def rank(self):
210
- return self._rank
211
-
212
- @property
213
- def world_size(self):
214
- return self._world_size
215
-
216
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
217
- """ """
218
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
219
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
220
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
221
- if left_truncate_len:
222
- encoding = encoding[-left_truncate_len:]
223
- return encoding
224
-
225
- def tok_decode(self, tokens):
226
- try:
227
- return self.tokenizer.decode(tokens)
228
- except:
229
- return self.tokenizer.decode([tokens])
230
-
231
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
232
- # TODO
233
- res = []
234
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
235
-
236
- for contexts, doc_to_target, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
237
- # encode, pad, and truncate contexts for this batch
238
- if type(doc_to_target) == str:
239
- continuation = doc_to_target
240
- else:
241
- continuation = doc_to_target(self.task_dict[task][split][doc_id])
242
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
243
- visuals = self.flatten(visuals)
244
- image_sizes = [[visual.size[0], visual.size[1]] for visual in visuals]
245
- if visuals:
246
- image = process_images(visuals, self._image_processor, self._config)
247
- if type(image) is list:
248
- image = [_image.to(dtype=torch.float16, device=self.device) for _image in image]
249
- else:
250
- image = image.to(dtype=torch.float16, device=self.device)
251
- else:
252
- image = None
253
-
254
- prompts_input = contexts[0] if isinstance(contexts, list) else contexts
255
-
256
- if image is not None and len(image) != 0 and DEFAULT_IMAGE_TOKEN not in prompts_input:
257
- """
258
- Three senarios:
259
- 1. No image, and there for, no image token should be added.
260
- 2. image token is already specified in the context, so we don't need to add it.
261
- 3. image token is not specified in the context and there is image inputs, so we need to add it. In this case, we add the image token at the beginning of the context and add a new line.
262
- """
263
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visuals)
264
- image_tokens = " ".join(image_tokens)
265
- prompts_input = image_tokens + "\n" + (contexts[0] if isinstance(contexts, list) else contexts)
266
-
267
- # This is much safer for llama3, as we now have some object type in it
268
- if "llama_3" in self.conv_template:
269
- conv = copy.deepcopy(conv_templates[self.conv_template])
270
- else:
271
- conv = conv_templates[self.conv_template].copy()
272
-
273
- conv.append_message(conv.roles[0], prompts_input)
274
- conv.append_message(conv.roles[1], None)
275
- prompt = conv.get_prompt()
276
- pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
277
- contxt_id = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
278
- # Add the answer of the second role
279
- conv.messages[1][1] = continuation
280
-
281
- prompt = conv.get_prompt()
282
- input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(self.device)
283
- labels = input_ids.clone()
284
- # Context part no need to calculate for loss
285
- labels[0, : contxt_id.shape[1]] = -100
286
- with torch.inference_mode():
287
- outputs = self.model(input_ids=input_ids, labels=labels, images=image, use_cache=True, image_sizes=image_sizes)
288
- loss = outputs["loss"]
289
- # loss = torch.exp(loss)
290
- logits = outputs["logits"]
291
- greedy_tokens = logits.argmax(dim=-1)
292
- cont_toks = input_ids[:, contxt_id.shape[1] :] # [1, seq]
293
- greedy_tokens = greedy_tokens[:, contxt_id.shape[1] : input_ids.shape[1]] # [1, seq]
294
- max_equal = (greedy_tokens == cont_toks).all()
295
- res.append((float(loss.item()), bool(max_equal)))
296
- pbar.update(1)
297
-
298
- pbar.close()
299
- return res
300
-
301
- def flatten(self, input):
302
- new_list = []
303
- for i in input:
304
- for j in i:
305
- new_list.append(j)
306
- return new_list
307
-
308
- def load_video(self, video_path, max_frames_num):
309
- if type(video_path) == str:
310
- vr = VideoReader(video_path, ctx=cpu(0))
311
- else:
312
- vr = VideoReader(video_path[0], ctx=cpu(0))
313
- total_frame_num = len(vr)
314
- uniform_sampled_frames = np.linspace(0, total_frame_num - 1, max_frames_num, dtype=int)
315
- frame_idx = uniform_sampled_frames.tolist()
316
- spare_frames = vr.get_batch(frame_idx).asnumpy()
317
- return spare_frames # (frames, height, width, channels)
318
-
319
- def generate_until(self, requests: List[Instance]) -> List[str]:
320
- res = []
321
-
322
- def _collate(x):
323
- # the negative sign on len(toks) sorts descending - this has a few advantages:
324
- # - time estimates will always be over not underestimates, which is more useful for planning
325
- # - to know the size of a batch when going through the list, you know the first one is always the batch
326
- # padded context length. this is useful to simplify the batching logic and more importantly to make
327
- # automatic adaptive batches much much easier to implement
328
- # - any OOMs will happen right away rather than near the end
329
- toks = self.tok_encode(x[0])
330
- return -len(toks), x[0]
331
-
332
- # we group requests by their generation_kwargs,
333
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
334
- # in the same batch.
335
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
336
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
337
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
338
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
339
- for chunk in chunks:
340
- batched_contexts, all_gen_kwargs, batched_doc_to_visual, batched_doc_id, batched_task, batched_split = zip(*chunk)
341
- task = batched_task[0]
342
- split = batched_split[0]
343
- batched_visuals = [batched_doc_to_visual[0](self.task_dict[task][split][ids]) for ids in batched_doc_id] # [B, N]
344
- flattened_visuals = self.flatten(batched_visuals) # [B*N]
345
- assert len(batched_visuals) == 1
346
-
347
- # we assume all gen kwargs in the batch are the same
348
- # this is safe to assume because the `grouper` object ensures it.
349
- gen_kwargs = all_gen_kwargs[0]
350
- if "until" in gen_kwargs:
351
- gen_kwargs.pop("until")
352
-
353
- question_input = []
354
-
355
- for visual, context in zip(batched_visuals, batched_contexts):
356
- if "image_aspect_ratio" in gen_kwargs.keys() and "image_aspect_ratio" not in self._config.__dict__:
357
- # here we should pop it out of gen_kwargs so that it doesn't get passed to the model for next step of generation
358
- self._config.image_aspect_ratio = gen_kwargs.pop("image_aspect_ratio")
359
- eval_logger.info(f"Setting image aspect ratio: {self._config.image_aspect_ratio}")
360
-
361
- # encode, pad, and truncate contexts for this batch
362
- if type(visual[0]) == PIL.Image.Image: # For image task
363
- image_tensor = process_images(visual, self._image_processor, self._config)
364
- if type(image_tensor) is list:
365
- image_tensor = [_image.to(dtype=torch.float16, device=self.device) for _image in image_tensor]
366
- else:
367
- image_tensor = image_tensor.to(dtype=torch.float16, device=self.device)
368
-
369
- task_type = "image"
370
-
371
- elif type(visual[0]) == str: # For video task
372
- image_tensor = []
373
- try:
374
- if self.video_decode_backend == "decord":
375
- frames = self.load_video(visual, self.max_frames_num)
376
- elif self.video_decode_backend == "pyav":
377
- frames = read_video_pyav(visual[0], num_frm=self.max_frames_num)
378
- frames = self._image_processor.preprocess(frames, return_tensors="pt")["pixel_values"].half().cuda()
379
- image_tensor.append(frames)
380
- except Exception as e:
381
- eval_logger.error(f"Error {e} in loading video")
382
- image_tensor = None
383
-
384
- task_type = "video"
385
-
386
- if image_tensor is not None and len(image_tensor) != 0 and DEFAULT_IMAGE_TOKEN not in context:
387
- """
388
- Three senarios:
389
- 1. No image, and there for, no image token should be added.
390
- 2. image token is already specified in the context, so we don't need to add it.
391
- 3. image token is not specified in the context and there is image inputs, so we need to add it. In this case, we add the image token at the beginning of the context and add a new line.
392
- 4. For video tasks, we could add a <image> token or multiple <image> tokens for each frame in the context. This depends on the training strategy and should balance in test to decide which is better
393
- """
394
- if task_type == "image":
395
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(visual) if isinstance(visual, list) else [DEFAULT_IMAGE_TOKEN]
396
- elif task_type == "video":
397
- image_tokens = [DEFAULT_IMAGE_TOKEN] * len(frames) if self.token_strategy == "multiple" else [DEFAULT_IMAGE_TOKEN]
398
-
399
- image_tokens = " ".join(image_tokens)
400
- question = image_tokens + "\n" + context
401
- else:
402
- question = context
403
-
404
- # This is much safer for llama3, as we now have some object type in it
405
- if "llama_3" in self.conv_template:
406
- conv = copy.deepcopy(conv_templates[self.conv_template])
407
- else:
408
- conv = conv_templates[self.conv_template].copy()
409
- conv.append_message(conv.roles[0], question)
410
- conv.append_message(conv.roles[1], None)
411
- prompt_question = conv.get_prompt()
412
- question_input.append(prompt_question)
413
-
414
- # preconfigure gen_kwargs with defaults
415
- if "max_new_tokens" not in gen_kwargs:
416
- gen_kwargs["max_new_tokens"] = 1024
417
- if "temperature" not in gen_kwargs:
418
- gen_kwargs["temperature"] = 0
419
- if "do_sample" not in gen_kwargs:
420
- gen_kwargs["do_sample"] = False
421
- if "top_p" not in gen_kwargs:
422
- gen_kwargs["top_p"] = None
423
- if "num_beams" not in gen_kwargs:
424
- gen_kwargs["num_beams"] = 1
425
-
426
- input_ids_list = [tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt") for prompt in question_input]
427
- pad_token_ids = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
428
- input_ids = self.pad_sequence(input_ids_list, batch_first=True, padding_value=pad_token_ids).to(self.device)
429
- attention_masks = input_ids.ne(pad_token_ids).to(self.device)
430
-
431
- if task_type == "image":
432
- gen_kwargs["image_sizes"] = [flattened_visuals[idx].size for idx in range(len(flattened_visuals))]
433
- elif task_type == "video":
434
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
435
- keywords = [stop_str]
436
- stopping_criteria = KeywordsStoppingCriteria(keywords, self.tokenizer, input_ids)
437
- gen_kwargs["modalities"] = ["video"]
438
- gen_kwargs["stopping_criteria"] = [stopping_criteria]
439
- self._config.mm_spatial_pool_stride = self.mm_spatial_pool_stride
440
- self._config.mm_spatial_pool_mode = self.mm_spatial_pool_mode
441
-
442
- # These steps are not in LLaVA's original code, but are necessary for generation to work
443
- # TODO: attention to this major generation step...
444
- if "image_aspect_ratio" in gen_kwargs.keys():
445
- gen_kwargs.pop("image_aspect_ratio")
446
- try:
447
- with torch.inference_mode():
448
- cont = self.model.generate(input_ids, attention_mask=attention_masks, pad_token_id=pad_token_ids, images=image_tensor, use_cache=self.use_cache, **gen_kwargs)
449
-
450
- text_outputs = self.tokenizer.batch_decode(cont, skip_special_tokens=True)
451
- except Exception as e:
452
- raise e
453
-
454
- text_outputs = [response.strip() for response in text_outputs]
455
- res.extend(text_outputs)
456
- self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)
457
- pbar.update(1)
458
- # reorder this group of results back to original unsorted form
459
- res = re_ords.get_original(res)
460
-
461
- pbar.close()
462
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/minicpm_v.py DELETED
@@ -1,222 +0,0 @@
1
- import torch
2
-
3
- from tqdm import tqdm
4
- from lmms_eval import utils
5
- from lmms_eval.api.instance import Instance
6
- from lmms_eval.api.model import lmms
7
- from lmms_eval.api.registry import register_model
8
- from accelerate import Accelerator, DistributedType
9
- from accelerate.state import AcceleratorState
10
- from typing import List, Optional, Union, Tuple
11
- from transformers import AutoModel, AutoTokenizer
12
-
13
-
14
- import warnings
15
-
16
- warnings.filterwarnings("ignore")
17
-
18
- from loguru import logger as eval_logger
19
-
20
-
21
- @register_model("minicpm_v")
22
- class MiniCPM_V(lmms):
23
- """
24
- MiniCPM_V Model
25
- """
26
-
27
- def __init__(
28
- self,
29
- pretrained: str = "openbmb/MiniCPM-V",
30
- device: Optional[str] = "cuda",
31
- dtype: Optional[Union[str, torch.dtype]] = torch.bfloat16,
32
- batch_size: Optional[Union[int, str]] = 1,
33
- trust_remote_code: Optional[bool] = True,
34
- **kwargs,
35
- ) -> None:
36
- super().__init__()
37
- # Do not use kwargs for now
38
- assert kwargs == {}, f"Unexpected kwargs: {kwargs}"
39
-
40
- accelerator = Accelerator()
41
- if accelerator.num_processes > 1:
42
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
43
- else:
44
- self._device = device
45
- self._model = AutoModel.from_pretrained(pretrained, trust_remote_code=trust_remote_code, torch_dtype=dtype, device_map=self._device).to(dtype)
46
- self._tokenizer = AutoTokenizer.from_pretrained(pretrained, trust_remote_code=trust_remote_code)
47
- self._config = self._model.config
48
- self.model.eval()
49
- self.model.tie_weights()
50
- self.batch_size_per_gpu = int(batch_size)
51
- if accelerator.num_processes > 1:
52
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
53
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
54
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
55
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
56
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
57
- kwargs = {
58
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
59
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
60
- }
61
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
62
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
63
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
64
- self._model = accelerator.prepare(self.model)
65
- else:
66
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
67
- self.accelerator = accelerator
68
- if self.accelerator.is_local_main_process:
69
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
70
- self._rank = self.accelerator.local_process_index
71
- self._world_size = self.accelerator.num_processes
72
- else:
73
- self.model.to(self._device)
74
- self._rank = 0
75
- self._word_size = 1
76
-
77
- @property
78
- def config(self):
79
- # return the associated transformers.AutoConfig for the given pretrained model.
80
- return self._config
81
-
82
- @property
83
- def tokenizer(self):
84
- return self._tokenizer
85
-
86
- @property
87
- def model(self):
88
- # returns the model, unwrapping it if using Accelerate
89
- if hasattr(self, "accelerator"):
90
- return self.accelerator.unwrap_model(self._model)
91
- else:
92
- return self._model
93
-
94
- @property
95
- def eot_token_id(self):
96
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
97
- return self.tokenizer.eos_token_id
98
-
99
- @property
100
- def max_length(self):
101
- return self._max_length
102
-
103
- @property
104
- def batch_size(self):
105
- return self.batch_size_per_gpu
106
-
107
- @property
108
- def device(self):
109
- return self._device
110
-
111
- @property
112
- def rank(self):
113
- return self._rank
114
-
115
- @property
116
- def world_size(self):
117
- return self._world_size
118
-
119
- def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None) -> List[int]:
120
- """ """
121
- add_special_tokens = False if add_special_tokens is None else add_special_tokens
122
- encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
123
- # left-truncate the encoded context to be at most `left_truncate_len` tokens long
124
- if left_truncate_len:
125
- encoding = encoding[-left_truncate_len:]
126
- return encoding
127
-
128
- def tok_decode(self, tokens):
129
- return self.tokenizer.decode(tokens)
130
-
131
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
132
- # TODO
133
- assert False, "We have not implemented this function for MiniCPM_V yet"
134
-
135
- def flatten(self, input):
136
- new_list = []
137
- for i in input:
138
- for j in i:
139
- new_list.append(j)
140
- return new_list
141
-
142
- def generate_until(self, requests: List[Instance]) -> List[str]:
143
- res = []
144
-
145
- def _collate(x):
146
- # the negative sign on len(toks) sorts descending - this has a few advantages:
147
- # - time estimates will always be over not underestimates, which is more useful for planning
148
- # - to know the size of a batch when going through the list, you know the first one is always the batch
149
- # padded context length. this is useful to simplify the batching logic and more importantly to make
150
- # automatic adaptive batches much much easier to implement
151
- # - any OOMs will happen right away rather than near the end
152
- toks = self.tok_encode(x[0])
153
- return -len(toks), x[0]
154
-
155
- # we group requests by their generation_kwargs,
156
- # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
157
- # in the same batch.
158
- re_ords = utils.Collator([reg.args for reg in requests], _collate, grouping=True)
159
- chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
160
- num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1
161
- pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding")
162
- for chunk in chunks:
163
- contexts, all_gen_kwargs, doc_to_visual, doc_id, task, split = zip(*chunk)
164
- task = task[0]
165
- split = split[0]
166
- visuals = [doc_to_visual[0](self.task_dict[task][split][ids]) for ids in doc_id]
167
- visuals = self.flatten(visuals)
168
- # we assume all gen kwargs in the batch are the same
169
- # this is safe to assume because the `grouper` object ensures it.
170
- gen_kwargs = all_gen_kwargs[0]
171
-
172
- # Set default values for until and max_new_tokens
173
- until = [self.tok_decode(self.eot_token_id)]
174
-
175
- # Update values from gen_kwargs if present
176
- if "until" in gen_kwargs:
177
- until = gen_kwargs.pop("until")
178
- if isinstance(until, str):
179
- until = [until]
180
- elif not isinstance(until, list):
181
- raise ValueError(f"Expected `gen_kwargs['until']` to be of type Union[str,list] but got {type(until)}")
182
- assert self.batch_size_per_gpu == 1, "Do not support batch_size_per_gpu > 1 for now"
183
- assert len(visuals) == 1, "MiniCPM_V interface does not support bn_image > 1 for now"
184
- context = contexts[0]
185
- if "<image>" in context:
186
- # minicpm does not expect the <image> tag
187
- context = context.replace("<image>", "")
188
- msgs = [{"role": "user", "content": context}]
189
-
190
- gen_kwargs["image_sizes"] = [visuals[idx].size for idx in range(len(visuals))]
191
- if "max_new_tokens" not in gen_kwargs:
192
- gen_kwargs["max_new_tokens"] = 1024
193
- if "temperature" not in gen_kwargs:
194
- gen_kwargs["temperature"] = 0
195
- if "top_p" not in gen_kwargs:
196
- gen_kwargs["top_p"] = None
197
- if "num_beams" not in gen_kwargs:
198
- gen_kwargs["num_beams"] = 1
199
- try:
200
- # ominicpm does not give much information on how they do eval so I just use the chat format.
201
- response, context, _ = self.model.chat(
202
- image=visuals[0],
203
- msgs=msgs,
204
- context=None,
205
- tokenizer=self.tokenizer,
206
- sampling=True if gen_kwargs["temperature"] > 0 else False,
207
- temperature=gen_kwargs["temperature"],
208
- top_p=gen_kwargs["top_p"],
209
- num_beams=gen_kwargs["num_beams"],
210
- max_new_tokens=gen_kwargs["max_new_tokens"],
211
- )
212
- except Exception as e:
213
- eval_logger.error(f"Error {e} in generating")
214
- cont = ""
215
- res.append(response)
216
- self.cache_hook.add_partial("generate_until", (context, gen_kwargs), response)
217
- pbar.update(1)
218
- # reorder this group of results back to original unsorted form
219
- res = re_ords.get_original(res)
220
-
221
- pbar.close()
222
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/__init__.py DELETED
File without changes
lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/load_video.py DELETED
@@ -1,55 +0,0 @@
1
- import av
2
- from av.codec.context import CodecContext
3
- import numpy as np
4
-
5
-
6
- # This one is faster
7
- def record_video_length_stream(container, indices):
8
- frames = []
9
- start_index = indices[0]
10
- end_index = indices[-1]
11
- for i, frame in enumerate(container.decode(video=0)):
12
- if i > end_index:
13
- break
14
- if i >= start_index and i in indices:
15
- frames.append(frame)
16
- return frames
17
-
18
-
19
- # This one works for all types of video
20
- def record_video_length_packet(container):
21
- frames = []
22
- # https://github.com/PyAV-Org/PyAV/issues/1269
23
- # https://www.cnblogs.com/beyond-tester/p/17641872.html
24
- # context = CodecContext.create("libvpx-vp9", "r")
25
- for packet in container.demux(video=0):
26
- for frame in packet.decode():
27
- frames.append(frame)
28
- return frames
29
-
30
-
31
- def read_video_pyav(video_path, num_frm=8):
32
-
33
- if "webm" not in video_path and "mkv" not in video_path:
34
- # For mp4, we try loading with stream first
35
- try:
36
- container = av.open(video_path)
37
- total_frames = container.streams.video[0].frames
38
- sampled_frm = min(total_frames, num_frm)
39
- indices = np.linspace(0, total_frames - 1, sampled_frm, dtype=int)
40
- frames = record_video_length_stream(container, indices)
41
- except:
42
- container = av.open(video_path)
43
- frames = record_video_length_packet(container)
44
- total_frames = len(frames)
45
- sampled_frm = min(total_frames, num_frm)
46
- indices = np.linspace(0, total_frames - 1, sampled_frm, dtype=int)
47
- frames = [frames[i] for i in indices]
48
- else:
49
- container = av.open(video_path)
50
- frames = record_video_length_packet(container)
51
- total_frames = len(frames)
52
- sampled_frm = min(total_frames, num_frm)
53
- indices = np.linspace(0, total_frames - 1, sampled_frm, dtype=int)
54
- frames = [frames[i] for i in indices]
55
- return np.stack([x.to_ndarray(format="rgb24") for x in frames])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/model_utils/qwen/qwen_generate_utils.py DELETED
@@ -1,370 +0,0 @@
1
- # Copyright (c) Alibaba Cloud.
2
- #
3
- # This source code is licensed under the license found in the
4
- # LICENSE file in the root directory of this source tree.
5
-
6
- """Generation support."""
7
- import warnings
8
-
9
- warnings.simplefilter("ignore", category=DeprecationWarning)
10
- warnings.filterwarnings("ignore")
11
-
12
- from typing import Tuple, List, Union, Iterable
13
-
14
- import numpy as np
15
- import torch
16
- import torch.nn.functional as F
17
- from transformers import PreTrainedTokenizer
18
- from transformers.generation import LogitsProcessor
19
-
20
- from loguru import logger
21
-
22
- # Types.
23
- HistoryType = List[Tuple[str, str]]
24
- TokensType = List[int]
25
- BatchTokensType = List[List[int]]
26
-
27
-
28
- def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
29
- for tokens in batch:
30
- context_length = len(tokens)
31
- if context_length < seq_length:
32
- tokens.extend([pad_id] * (seq_length - context_length))
33
- return batch
34
-
35
-
36
- def get_ltor_masks_and_position_ids(
37
- data,
38
- eod_token,
39
- reset_position_ids,
40
- reset_attention_mask,
41
- eod_mask_loss,
42
- ):
43
- """Build masks and position id for left to right model."""
44
-
45
- # Extract batch size and sequence length.
46
- micro_batch_size, seq_length = data.size()
47
-
48
- # Attention mask (lower triangular).
49
- if reset_attention_mask:
50
- att_mask_batch = micro_batch_size
51
- else:
52
- att_mask_batch = 1
53
- attention_mask = torch.tril(torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)).view(att_mask_batch, 1, seq_length, seq_length)
54
-
55
- # Loss mask.
56
- loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
57
- if eod_mask_loss:
58
- loss_mask[data == eod_token] = 0.0
59
-
60
- # Position ids.
61
- position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
62
- position_ids = position_ids.unsqueeze(0).expand_as(data)
63
- # We need to clone as the ids will be modifed based on batch index.
64
- if reset_position_ids:
65
- position_ids = position_ids.clone()
66
-
67
- if reset_position_ids or reset_attention_mask:
68
- # Loop through the batches:
69
- for b in range(micro_batch_size):
70
- # Find indecies where EOD token is.
71
- eod_index = position_ids[b, data[b] == eod_token]
72
- # Detach indecies from positions if going to modify positions.
73
- if reset_position_ids:
74
- eod_index = eod_index.clone()
75
-
76
- # Loop through EOD indecies:
77
- prev_index = 0
78
- for j in range(eod_index.size()[0]):
79
- i = eod_index[j]
80
- # Mask attention loss.
81
- if reset_attention_mask:
82
- attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
83
- # Reset positions.
84
- if reset_position_ids:
85
- position_ids[b, (i + 1) :] -= i + 1 - prev_index
86
- prev_index = i + 1
87
-
88
- # Convert attention mask to binary:
89
- attention_mask = attention_mask < 0.5
90
-
91
- return attention_mask, loss_mask, position_ids
92
-
93
-
94
- def get_batch(context_tokens: torch.LongTensor, eod_id: int):
95
- """Generate batch from context tokens."""
96
- # Move to GPU.
97
- tokens = context_tokens.contiguous().to(context_tokens.device)
98
- # Get the attention mask and postition ids.
99
- attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
100
- tokens,
101
- eod_id,
102
- reset_position_ids=False,
103
- reset_attention_mask=False,
104
- eod_mask_loss=False,
105
- )
106
- return tokens, attention_mask, position_ids
107
-
108
-
109
- def get_stop_words_ids(chat_format, tokenizer):
110
- if chat_format == "raw":
111
- stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
112
- elif chat_format == "chatml":
113
- stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
114
- else:
115
- raise NotImplementedError(f"Unknown chat format {chat_format!r}")
116
- return stop_words_ids
117
-
118
-
119
- def make_context(
120
- tokenizer: PreTrainedTokenizer,
121
- query: str,
122
- history: List[Tuple[str, str]] = None,
123
- system: str = "",
124
- max_window_size: int = 6144,
125
- chat_format: str = "chatml",
126
- ):
127
- if history is None:
128
- history = []
129
-
130
- if chat_format == "chatml":
131
- im_start, im_end = "<|im_start|>", "<|im_end|>"
132
- im_start_tokens = [tokenizer.im_start_id]
133
- im_end_tokens = [tokenizer.im_end_id]
134
- nl_tokens = tokenizer.encode("\n")
135
-
136
- def _tokenize_str(role, content):
137
- return f"{role}\n{content}", tokenizer.encode(role, allowed_special=set(tokenizer.IMAGE_ST)) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))
138
-
139
- system_text, system_tokens_part = _tokenize_str("system", system)
140
- system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
141
-
142
- raw_text = ""
143
- context_tokens = []
144
-
145
- for turn_query, turn_response in reversed(history):
146
- query_text, query_tokens_part = _tokenize_str("user", turn_query)
147
- query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
148
- if turn_response is not None:
149
- response_text, response_tokens_part = _tokenize_str("assistant", turn_response)
150
- response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
151
-
152
- next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
153
- prev_chat = f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
154
- else:
155
- next_context_tokens = nl_tokens + query_tokens + nl_tokens
156
- prev_chat = f"\n{im_start}{query_text}{im_end}\n"
157
-
158
- current_context_size = len(system_tokens) + len(next_context_tokens) + len(context_tokens)
159
- if current_context_size < max_window_size:
160
- context_tokens = next_context_tokens + context_tokens
161
- raw_text = prev_chat + raw_text
162
- else:
163
- break
164
-
165
- context_tokens = system_tokens + context_tokens
166
- raw_text = f"{im_start}{system_text}{im_end}" + raw_text
167
- context_tokens += nl_tokens + im_start_tokens + _tokenize_str("user", query)[1] + im_end_tokens + nl_tokens + im_start_tokens + tokenizer.encode("assistant") + nl_tokens
168
- raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
169
-
170
- elif chat_format == "raw":
171
- raw_text = query
172
- context_tokens = tokenizer.encode(raw_text)
173
- else:
174
- raise NotImplementedError(f"Unknown chat format {chat_format!r}")
175
-
176
- return raw_text, context_tokens
177
-
178
-
179
- def _decode_default(
180
- tokens: List[int],
181
- *,
182
- stop_words: List[str],
183
- eod_words: List[str],
184
- tokenizer: PreTrainedTokenizer,
185
- raw_text_len: int,
186
- verbose: bool = False,
187
- return_end_reason: bool = False,
188
- errors: str = "replace",
189
- ):
190
- trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
191
- if verbose:
192
- print("\nRaw Generate: ", trim_decode_tokens)
193
-
194
- end_reason = f"Gen length {len(tokens)}"
195
- for stop_word in stop_words:
196
- trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
197
- for eod_word in eod_words:
198
- if eod_word in trim_decode_tokens:
199
- end_reason = f"Gen {eod_word!r}"
200
- trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
201
- trim_decode_tokens = trim_decode_tokens.strip()
202
- if verbose:
203
- print("\nEnd Reason:", end_reason)
204
- print("\nGenerate: ", trim_decode_tokens)
205
-
206
- if return_end_reason:
207
- return trim_decode_tokens, end_reason
208
- else:
209
- return trim_decode_tokens
210
-
211
-
212
- def _decode_chatml(
213
- tokens: List[int], *, stop_words: List[str], eod_token_ids: List[int], tokenizer: PreTrainedTokenizer, raw_text_len: int, context_length: int, verbose: bool = False, return_end_reason: bool = False, errors: str = "replace"
214
- ):
215
- end_reason = f"Gen length {len(tokens)}"
216
- eod_token_idx = context_length
217
- for eod_token_idx in range(context_length, len(tokens)):
218
- if tokens[eod_token_idx] in eod_token_ids:
219
- end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
220
- break
221
-
222
- trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
223
- if verbose:
224
- print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
225
- print("\nRaw Generate:", trim_decode_tokens)
226
- print("\nEnd Reason:", end_reason)
227
- for stop_word in stop_words:
228
- trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
229
- trim_decode_tokens = trim_decode_tokens.strip()
230
- if verbose:
231
- print("\nGenerate:", trim_decode_tokens)
232
-
233
- if return_end_reason:
234
- return trim_decode_tokens, end_reason
235
- else:
236
- return trim_decode_tokens
237
-
238
-
239
- def decode_tokens(
240
- tokens: Union[torch.LongTensor, TokensType],
241
- tokenizer: PreTrainedTokenizer,
242
- raw_text_len: int,
243
- context_length: int,
244
- chat_format: str,
245
- verbose: bool = False,
246
- return_end_reason: bool = False,
247
- errors: str = "replace",
248
- ) -> str:
249
- if torch.is_tensor(tokens):
250
- tokens = tokens.cpu().numpy().tolist()
251
-
252
- if chat_format == "chatml":
253
- return _decode_chatml(
254
- tokens,
255
- stop_words=[],
256
- eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
257
- tokenizer=tokenizer,
258
- raw_text_len=raw_text_len,
259
- context_length=context_length,
260
- verbose=verbose,
261
- return_end_reason=return_end_reason,
262
- errors=errors,
263
- )
264
- elif chat_format == "raw":
265
- return _decode_default(
266
- tokens,
267
- stop_words=["<|endoftext|>"],
268
- eod_words=["<|endoftext|>"],
269
- tokenizer=tokenizer,
270
- raw_text_len=raw_text_len,
271
- verbose=verbose,
272
- return_end_reason=return_end_reason,
273
- errors=errors,
274
- )
275
- else:
276
- raise NotImplementedError(f"Unknown chat format {chat_format!r}")
277
-
278
-
279
- class StopWordsLogitsProcessor(LogitsProcessor):
280
- """
281
- :class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
282
- Args:
283
- stop_words_ids (:obj:`List[List[int]]`):
284
- List of list of token ids of stop ids. In order to get the tokens of the words
285
- that should not appear in the generated text, use :obj:`tokenizer(bad_word,
286
- add_prefix_space=True).input_ids`.
287
- eos_token_id (:obj:`int`):
288
- The id of the `end-of-sequence` token.
289
- """
290
-
291
- def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
292
- if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
293
- raise ValueError(f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}.")
294
- if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
295
- raise ValueError(f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}.")
296
- if any(any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in stop_word_ids) for stop_word_ids in stop_words_ids):
297
- raise ValueError(f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}.")
298
-
299
- self.stop_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids))
300
- self.eos_token_id = eos_token_id
301
- for stop_token_seq in self.stop_words_ids:
302
- assert len(stop_token_seq) > 0, "Stop words token sequences {} cannot have an empty list".format(stop_words_ids)
303
-
304
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
305
- stopped_samples = self._calc_stopped_samples(input_ids)
306
- for i, should_stop in enumerate(stopped_samples):
307
- if should_stop:
308
- scores[i, self.eos_token_id] = float(2**15)
309
- return scores
310
-
311
- def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
312
- if len(tokens) == 0:
313
- # if bad word tokens is just one token always ban it
314
- return True
315
- elif len(tokens) > len(prev_tokens):
316
- # if bad word tokens are longer then prev input_ids they can't be equal
317
- return False
318
- elif prev_tokens[-len(tokens) :].tolist() == tokens:
319
- # if tokens match
320
- return True
321
- else:
322
- return False
323
-
324
- def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
325
- stopped_samples = []
326
- for prev_input_ids_slice in prev_input_ids:
327
- match = False
328
- for stop_token_seq in self.stop_words_ids:
329
- if self._tokens_match(prev_input_ids_slice, stop_token_seq):
330
- # if tokens do not match continue
331
- match = True
332
- break
333
- stopped_samples.append(match)
334
-
335
- return stopped_samples
336
-
337
-
338
- def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
339
- """This function has been mostly taken from huggingface conversational
340
- ai code at
341
- https://medium.com/huggingface/how-to-build-a-state-of-the-art-
342
- conversational-ai-with-transfer-learning-2d818ac26313"""
343
-
344
- if top_k > 0:
345
- # Remove all tokens with a probability less than the
346
- # last token of the top-k
347
- indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
348
- logits[indices_to_remove] = filter_value
349
-
350
- if top_p > 0.0:
351
- # Cconvert to 1D
352
- sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
353
- cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
354
-
355
- # Remove tokens with cumulative probability above the threshold
356
- sorted_indices_to_remove = cumulative_probs > top_p
357
- # Shift the indices to the right to keep also the first token
358
- # above the threshold
359
- sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
360
- sorted_indices_to_remove[..., 0] = 0
361
- for i in range(sorted_indices.size(0)):
362
- indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
363
- logits[i][indices_to_remove] = filter_value
364
-
365
- return logits
366
-
367
-
368
- def switch(val1, val2, boolean):
369
- boolean = boolean.type_as(val1)
370
- return (1 - boolean) * val1 + boolean * val2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lmms-eval-0.2.0.post1/lmms_eval/models/mplug_owl_video.py DELETED
@@ -1,193 +0,0 @@
1
- from accelerate import Accelerator, DistributedType, InitProcessGroupKwargs
2
- from accelerate.state import AcceleratorState
3
- from typing import List, Optional, Union, Tuple
4
- import torch
5
- from transformers import AutoTokenizer
6
- from tqdm import tqdm
7
- from datetime import timedelta
8
-
9
- from lmms_eval.api.instance import Instance
10
- from lmms_eval.api.model import lmms
11
- from lmms_eval.api.registry import register_model
12
-
13
- from lmms_eval.models.mplug_owl_video.modeling_mplug_owl import MplugOwlForConditionalGeneration
14
- from lmms_eval.models.mplug_owl_video.processing_mplug_owl import MplugOwlImageProcessor, MplugOwlProcessor
15
-
16
-
17
- from loguru import logger
18
-
19
- eval_logger = logger
20
-
21
-
22
- @register_model("mplug_owl_video")
23
- class mplug_Owl(lmms):
24
- def __init__(
25
- self,
26
- pretrained: str = "MAGAer13/mplug-owl-llama-7b-video",
27
- device: Optional[str] = "cuda:0",
28
- dtype: Optional[Union[str, torch.dtype]] = "auto",
29
- batch_size: Optional[Union[int, str]] = 1,
30
- device_map="cuda:0",
31
- num_frames: Union[str, int] = 4,
32
- **kwargs,
33
- ) -> None:
34
- """
35
- Install instructions:
36
- 1. Install lmms-eval
37
- cd lmms-eval
38
- pip install -e .;
39
- 2. Install other packages with restricted versions
40
- pip install av sentencepiece protobuf==3.20 transformers==4.28.1 einops;
41
- """
42
- super().__init__()
43
-
44
- accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
45
- accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
46
- if accelerator.num_processes > 1:
47
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
48
- self.device_map = f"cuda:{accelerator.local_process_index}"
49
- elif accelerator.num_processes == 1 and device_map == "auto":
50
- self._device = torch.device(device)
51
- self.device_map = device_map
52
- else:
53
- self._device = torch.device(f"cuda:{accelerator.local_process_index}")
54
- self.device_map = f"cuda:{accelerator.local_process_index}"
55
-
56
- # import pdb; pdb.set_trace()
57
- # This is very slow. Their issue, not mine
58
- # Also, keep transformers in version 4.28.1
59
- # They put a Config object inside a config object, this is not acceptable
60
- # for transformers == 4.39.1, object type not serializable
61
- # Protobuf needs to be in 3.20.x otherwise error
62
- # ヽ(`Д´)ノ
63
- self._model = MplugOwlForConditionalGeneration.from_pretrained(
64
- pretrained,
65
- torch_dtype=torch.bfloat16,
66
- )
67
- self.image_processor = MplugOwlImageProcessor.from_pretrained(pretrained)
68
- self._tokenizer = AutoTokenizer.from_pretrained(pretrained)
69
- self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer)
70
- self.model.eval()
71
- self.batch_size_per_gpu = batch_size
72
- self.num_frames = num_frames
73
-
74
- self.model.to(self.device)
75
-
76
- if accelerator.num_processes > 1:
77
- assert accelerator.distributed_type in [DistributedType.FSDP, DistributedType.MULTI_GPU, DistributedType.DEEPSPEED], "Unsupported distributed type provided. Only DDP and FSDP are supported."
78
- # If you want to use DistributedType.DEEPSPEED, you have to run accelerate config before using the model
79
- # Also, you have to select zero stage 0 (equivalent to DDP) in order to make the prepare model works
80
- # I tried to set different parameters in the kwargs to let default zero 2 stage works, but it didn't work.
81
- if accelerator.distributed_type == DistributedType.DEEPSPEED:
82
- kwargs = {
83
- "train_micro_batch_size_per_gpu": self.batch_size_per_gpu,
84
- "train_batch_size": self.batch_size_per_gpu * accelerator.num_processes,
85
- }
86
- AcceleratorState().deepspeed_plugin.deepspeed_config_process(must_match=True, **kwargs)
87
- eval_logger.info("Detected that you are using DistributedType.DEEPSPEED. Make sure you run `accelerate config` and set zero stage to 0")
88
- if accelerator.distributed_type == DistributedType.FSDP or accelerator.distributed_type == DistributedType.DEEPSPEED:
89
- self._model = accelerator.prepare(self.model)
90
- else:
91
- self._model = accelerator.prepare_model(self.model, evaluation_mode=True)
92
- self.accelerator = accelerator
93
- if self.accelerator.is_local_main_process:
94
- eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism")
95
- self._rank = self.accelerator.local_process_index
96
- self._world_size = self.accelerator.num_processes
97
- else:
98
- eval_logger.info(f"Using single device: {self._device}")
99
- self.model.to(self._device)
100
- self._rank = 0
101
- self._world_size = 1
102
-
103
- @property
104
- def config(self):
105
- # return the associated transformers.AutoConfig for the given pretrained model.
106
- return self._config
107
-
108
- @property
109
- def tokenizer(self):
110
- return self._tokenizer
111
-
112
- @property
113
- def model(self):
114
- # returns the model, unwrapping it if using Accelerate
115
- if hasattr(self, "accelerator"):
116
- return self.accelerator.unwrap_model(self._model)
117
- else:
118
- return self._model
119
-
120
- @property
121
- def eot_token_id(self):
122
- # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
123
- return self.tokenizer.eos_token_id
124
-
125
- @property
126
- def max_length(self):
127
- return self._max_length
128
-
129
- @property
130
- def batch_size(self):
131
- return self.batch_size_per_gpu
132
-
133
- @property
134
- def device(self):
135
- return self._device
136
-
137
- @property
138
- def rank(self):
139
- return self._rank
140
-
141
- @property
142
- def world_size(self):
143
- return self._world_size
144
-
145
- def flatten(self, input):
146
- new_list = []
147
- for i in input:
148
- for j in i:
149
- new_list.append(j)
150
- return new_list
151
-
152
- def format_prompt(self, question):
153
- prompts = [f" <|video|> Question : {question} Answer : "]
154
- return prompts
155
-
156
- def generate_until(self, requests) -> List[str]:
157
- res = []
158
- pbar = tqdm(total=len(requests), disable=(self.rank != 0), desc="Model Responding")
159
-
160
- for contexts, gen_kwargs, doc_to_visual, doc_id, task, split in [reg.args for reg in requests]:
161
- # encode, pad, and truncate contexts for this batch
162
- visuals = [doc_to_visual(self.task_dict[task][split][doc_id])]
163
- visuals = self.flatten(visuals)
164
- inputs = self.processor(text=self.format_prompt(contexts), videos=visuals, num_frames=self.num_frames, return_tensors="pt")
165
- pixel_values_videos = inputs["video_pixel_values"]
166
- if pixel_values_videos.shape[2] != self.num_frames:
167
- empty_frames = torch.zeros((1, pixel_values_videos.shape[1], self.num_frames - pixel_values_videos.shape[2], *pixel_values_videos.shape[3:]), dtype=pixel_values_videos.dtype)
168
- pixel_values_videos = torch.cat([pixel_values_videos, empty_frames], dim=2)
169
- inputs["video_pixel_values"] = pixel_values_videos
170
- inputs = {k: v.bfloat16() if v.dtype == torch.float else v for k, v in inputs.items()}
171
- inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
172
-
173
- if "max_new_tokens" in gen_kwargs:
174
- gen_kwargs["max_length"] = gen_kwargs["max_new_tokens"]
175
- if "max_new_tokens" not in gen_kwargs:
176
- gen_kwargs["max_length"] = 128
177
- if "do_sample" not in gen_kwargs:
178
- gen_kwargs["do_sample"] = False
179
- if "top_k" not in gen_kwargs:
180
- gen_kwargs["top_k"] = 1
181
-
182
- generate_kwargs = {"do_sample": gen_kwargs["do_sample"], "top_k": gen_kwargs["top_k"], "max_length": gen_kwargs["max_length"]}
183
-
184
- with torch.no_grad():
185
- outputs = self.model.generate(**inputs, **generate_kwargs)
186
- sentence = self.tokenizer.decode(outputs.tolist()[0], skip_special_tokens=True)
187
- pbar.update(1)
188
- res.append(sentence)
189
- pbar.close()
190
- return res
191
-
192
- def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
193
- return super().loglikelihood(requests)