Minchael commited on
Commit
da792de
·
verified ·
1 Parent(s): 0cc7e9d

Upload folder using huggingface_hub

Browse files
env.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ conda create -n qwen_ft python=3.10 -y
2
+ conda activate qwen_ft
3
+ conda install nvidia/label/cuda-12.2.2::cuda-toolkit -y
4
+ pip install torch==2.6.0 torchvision==0.21.0 deepspeed==0.17.1 triton==3.2.0 accelerate==1.7.0 torchcodec==0.2 peft==0.17.1
5
+ pip install transformers==5.0.0.dev0
6
+ pip install -e /root/Desktop/workspace/kwon/pinpoint/qwen_ft
7
+ pip install qwen-vl-utils==0.0.14
8
+ pip install matplotlib
qwen_ft.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e28d63bfee1a31bad10cc180e03b57cfe4401ff5f61eebd61cb83cfe373eb77
3
+ size 1221066752
qwen_ft_env.txt ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # packages in environment at /opt/conda/envs/qwen_ft:
2
+ #
3
+ # Name Version Build Channel
4
+ _libgcc_mutex 0.1 main conda-forge
5
+ _openmp_mutex 5.1 1_gnu defaults
6
+ accelerate 1.7.0 pypi_0 pypi
7
+ alsa-lib 1.2.14 hb9d3cd8_0 conda-forge
8
+ anls 0.0.2 pypi_0 pypi
9
+ annotated-types 0.7.0 pypi_0 pypi
10
+ anyio 4.11.0 pypi_0 pypi
11
+ attr 2.5.2 h39aace5_0 conda-forge
12
+ av 16.0.1 pypi_0 pypi
13
+ bzip2 1.0.8 hda65f42_8 conda-forge
14
+ ca-certificates 2025.10.5 hbd8a1cb_0 conda-forge
15
+ certifi 2025.10.5 pypi_0 pypi
16
+ charset-normalizer 3.4.4 pypi_0 pypi
17
+ click 8.3.0 pypi_0 pypi
18
+ contourpy 1.3.2 pypi_0 pypi
19
+ cuda-cccl_linux-64 13.0.85 ha770c72_0 conda-forge
20
+ cuda-command-line-tools 13.0.2 ha770c72_0 conda-forge
21
+ cuda-compiler 12.2.2 0 nvidia/label/cuda-12.2.2
22
+ cuda-crt-dev_linux-64 13.0.88 ha770c72_0 conda-forge
23
+ cuda-cudart 13.0.96 hecca717_0 conda-forge
24
+ cuda-cudart-dev 13.0.96 hecca717_0 conda-forge
25
+ cuda-cudart-dev_linux-64 13.0.96 h376f20c_0 conda-forge
26
+ cuda-cudart-static 13.0.96 hecca717_0 conda-forge
27
+ cuda-cudart-static_linux-64 13.0.96 h376f20c_0 conda-forge
28
+ cuda-cudart_linux-64 13.0.96 h376f20c_0 conda-forge
29
+ cuda-culibos-static 13.0.85 h676940d_0 conda-forge
30
+ cuda-cuobjdump 13.0.85 hffce074_0 conda-forge
31
+ cuda-cupti 13.0.85 h676940d_0 conda-forge
32
+ cuda-cupti-dev 13.0.85 h676940d_0 conda-forge
33
+ cuda-cuxxfilt 13.0.85 hffce074_0 conda-forge
34
+ cuda-documentation 12.2.140 0 nvidia/label/cuda-12.2.2
35
+ cuda-driver-dev 13.0.96 hecca717_0 conda-forge
36
+ cuda-driver-dev_linux-64 13.0.96 h376f20c_0 conda-forge
37
+ cuda-gdb 13.0.85 h1b59fc5_0 conda-forge
38
+ cuda-libraries 13.0.2 ha770c72_0 conda-forge
39
+ cuda-libraries-dev 13.0.2 ha770c72_0 conda-forge
40
+ cuda-libraries-static 13.0.2 ha770c72_0 conda-forge
41
+ cuda-nsight 13.0.85 h7938cbb_0 conda-forge
42
+ cuda-nvcc 12.2.140 0 nvidia/label/cuda-12.2.2
43
+ cuda-nvdisasm 13.0.85 hffce074_0 conda-forge
44
+ cuda-nvml-dev 13.0.87 hffce074_0 conda-forge
45
+ cuda-nvprune 13.0.85 hffce074_0 conda-forge
46
+ cuda-nvrtc 13.0.88 hecca717_0 conda-forge
47
+ cuda-nvrtc-dev 13.0.88 hecca717_0 conda-forge
48
+ cuda-nvrtc-static 13.0.88 hecca717_0 conda-forge
49
+ cuda-nvtx 13.0.85 hecca717_0 conda-forge
50
+ cuda-opencl 13.0.85 hecca717_0 conda-forge
51
+ cuda-opencl-dev 13.0.85 hecca717_0 conda-forge
52
+ cuda-profiler-api 13.0.85 h7938cbb_0 conda-forge
53
+ cuda-sanitizer-api 13.0.85 h10ca0ad_0 conda-forge
54
+ cuda-toolkit 12.2.2 0 nvidia/label/cuda-12.2.2
55
+ cuda-tools 13.0.2 ha770c72_0 conda-forge
56
+ cuda-version 13.0 hc7b4dd1_3 conda-forge
57
+ cuda-visual-tools 13.0.2 ha770c72_0 conda-forge
58
+ cycler 0.12.1 pypi_0 pypi
59
+ dbus 1.16.2 h3c4dab8_0 conda-forge
60
+ deepspeed 0.17.1 pypi_0 pypi
61
+ einops 0.8.1 pypi_0 pypi
62
+ exceptiongroup 1.3.0 pypi_0 pypi
63
+ filelock 3.20.0 pypi_0 pypi
64
+ flash-attn 2.7.4.post1 pypi_0 pypi
65
+ font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge
66
+ font-ttf-inconsolata 3.000 h77eed37_0 conda-forge
67
+ font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge
68
+ font-ttf-ubuntu 0.83 h77eed37_3 conda-forge
69
+ fontconfig 2.15.0 h7e30c49_1 conda-forge
70
+ fonts-conda-ecosystem 1 0 conda-forge
71
+ fonts-conda-forge 1 0 conda-forge
72
+ fonttools 4.60.1 pypi_0 pypi
73
+ freetype 2.14.1 ha770c72_0 conda-forge
74
+ fsspec 2025.10.0 pypi_0 pypi
75
+ gds-tools 1.15.1.6 hecca717_0 conda-forge
76
+ gitdb 4.0.12 pypi_0 pypi
77
+ gitpython 3.1.45 pypi_0 pypi
78
+ gmp 6.3.0 hac33072_2 conda-forge
79
+ h11 0.16.0 pypi_0 pypi
80
+ hf-xet 1.2.0 pypi_0 pypi
81
+ hjson 3.1.0 pypi_0 pypi
82
+ httpcore 1.0.9 pypi_0 pypi
83
+ httpx 0.28.1 pypi_0 pypi
84
+ huggingface-hub 1.0.0rc6 pypi_0 pypi
85
+ idna 3.11 pypi_0 pypi
86
+ jinja2 3.1.6 pypi_0 pypi
87
+ keyutils 1.6.3 hb9d3cd8_0 conda-forge
88
+ kiwisolver 1.4.9 pypi_0 pypi
89
+ krb5 1.21.3 h659f571_0 conda-forge
90
+ ld_impl_linux-64 2.44 h1aa0949_4 conda-forge
91
+ libcap 2.76 h0b2e76d_0 conda-forge
92
+ libcublas 13.1.0.3 h676940d_0 conda-forge
93
+ libcublas-dev 13.1.0.3 h676940d_0 conda-forge
94
+ libcublas-static 13.1.0.3 h676940d_0 conda-forge
95
+ libcufft 12.0.0.61 hecca717_0 conda-forge
96
+ libcufft-dev 12.0.0.61 hecca717_0 conda-forge
97
+ libcufft-static 12.0.0.61 hecca717_0 conda-forge
98
+ libcufile 1.15.1.6 hbc026e6_0 conda-forge
99
+ libcufile-dev 1.15.1.6 hecca717_0 conda-forge
100
+ libcufile-static 1.15.1.6 hecca717_0 conda-forge
101
+ libcurand 10.4.0.35 h676940d_1 conda-forge
102
+ libcurand-dev 10.4.0.35 h676940d_1 conda-forge
103
+ libcurand-static 10.4.0.35 h676940d_1 conda-forge
104
+ libcusolver 12.0.4.66 h676940d_1 conda-forge
105
+ libcusolver-dev 12.0.4.66 h676940d_1 conda-forge
106
+ libcusolver-static 12.0.4.66 h676940d_1 conda-forge
107
+ libcusparse 12.6.3.3 hecca717_0 conda-forge
108
+ libcusparse-dev 12.6.3.3 hecca717_0 conda-forge
109
+ libcusparse-static 12.6.3.3 hecca717_0 conda-forge
110
+ libedit 3.1.20250104 pl5321h7949ede_0 conda-forge
111
+ libexpat 2.7.1 hecca717_0 conda-forge
112
+ libffi 3.5.2 h9ec8514_0 conda-forge
113
+ libfreetype 2.14.1 ha770c72_0 conda-forge
114
+ libfreetype6 2.14.1 h73754d4_0 conda-forge
115
+ libgcc 15.2.0 h767d61c_7 conda-forge
116
+ libgcc-ng 15.2.0 h69a702a_7 conda-forge
117
+ libgcrypt-lib 1.11.1 hb9d3cd8_0 conda-forge
118
+ libglib 2.86.1 h32235b2_1 conda-forge
119
+ libglvnd 1.7.0 ha4b6fd6_2 conda-forge
120
+ libgomp 15.2.0 h767d61c_7 conda-forge
121
+ libgpg-error 1.55 h3f2d84a_0 conda-forge
122
+ libiconv 1.18 h3b78370_2 conda-forge
123
+ liblzma 5.8.1 hb9d3cd8_2 conda-forge
124
+ libnl 3.11.0 hb9d3cd8_0 conda-forge
125
+ libnpp 13.0.1.2 h676940d_0 conda-forge
126
+ libnpp-dev 13.0.1.2 h676940d_0 conda-forge
127
+ libnpp-static 13.0.1.2 h676940d_0 conda-forge
128
+ libnsl 2.0.1 hb9d3cd8_1 conda-forge
129
+ libnuma 2.0.19 hee96239_0 defaults
130
+ libnvfatbin 13.0.85 hecca717_0 conda-forge
131
+ libnvfatbin-dev 13.0.85 hecca717_0 conda-forge
132
+ libnvfatbin-static 13.0.85 hecca717_0 conda-forge
133
+ libnvjitlink 13.0.88 hecca717_0 conda-forge
134
+ libnvjitlink-dev 13.0.88 hecca717_0 conda-forge
135
+ libnvjitlink-static 13.0.88 hecca717_0 conda-forge
136
+ libnvjpeg 13.0.1.86 hecca717_0 conda-forge
137
+ libnvjpeg-dev 13.0.1.86 ha770c72_0 conda-forge
138
+ libnvjpeg-static 13.0.1.86 ha770c72_0 conda-forge
139
+ libopengl 1.7.0 ha4b6fd6_2 conda-forge
140
+ libpng 1.6.50 h421ea60_1 conda-forge
141
+ libsqlite 3.50.4 h0c1763c_0 conda-forge
142
+ libstdcxx 15.2.0 h8f9b012_7 conda-forge
143
+ libstdcxx-ng 15.2.0 h4852527_7 conda-forge
144
+ libsystemd0 257.9 h996ca69_0 conda-forge
145
+ libudev1 257.9 h085a93f_0 conda-forge
146
+ libuuid 2.41.2 he9a06e4_0 conda-forge
147
+ libxcb 1.17.0 h8a09558_0 conda-forge
148
+ libxcrypt 4.4.36 hd590300_1 conda-forge
149
+ libxkbcommon 1.12.3 hca5e8e5_0 conda-forge
150
+ libxkbfile 1.1.0 h166bdaf_1 conda-forge
151
+ libxml2 2.15.1 h031cc0b_0 conda-forge
152
+ libxml2-16 2.15.1 hf2a90c1_0 conda-forge
153
+ libzlib 1.3.1 hb9d3cd8_2 conda-forge
154
+ lz4-c 1.10.0 h5888daf_1 conda-forge
155
+ markupsafe 3.0.3 pypi_0 pypi
156
+ matplotlib 3.10.7 pypi_0 pypi
157
+ mpmath 1.3.0 pypi_0 pypi
158
+ msgpack 1.1.2 pypi_0 pypi
159
+ ncurses 6.5 h2d0b736_3 conda-forge
160
+ networkx 3.4.2 pypi_0 pypi
161
+ ninja 1.13.0 pypi_0 pypi
162
+ nsight-compute 2025.3.1.4 h6a507f3_0 conda-forge
163
+ nspr 4.37 h29cc59b_0 conda-forge
164
+ nss 3.117 h445c969_0 conda-forge
165
+ numpy 2.2.6 pypi_0 pypi
166
+ nvidia-cublas-cu12 12.4.5.8 pypi_0 pypi
167
+ nvidia-cuda-cupti-cu12 12.4.127 pypi_0 pypi
168
+ nvidia-cuda-nvrtc-cu12 12.4.127 pypi_0 pypi
169
+ nvidia-cuda-runtime-cu12 12.4.127 pypi_0 pypi
170
+ nvidia-cudnn-cu12 9.1.0.70 pypi_0 pypi
171
+ nvidia-cufft-cu12 11.2.1.3 pypi_0 pypi
172
+ nvidia-curand-cu12 10.3.5.147 pypi_0 pypi
173
+ nvidia-cusolver-cu12 11.6.1.9 pypi_0 pypi
174
+ nvidia-cusparse-cu12 12.3.1.170 pypi_0 pypi
175
+ nvidia-cusparselt-cu12 0.6.2 pypi_0 pypi
176
+ nvidia-nccl-cu12 2.21.5 pypi_0 pypi
177
+ nvidia-nvjitlink-cu12 12.4.127 pypi_0 pypi
178
+ nvidia-nvtx-cu12 12.4.127 pypi_0 pypi
179
+ ocl-icd 2.3.3 hb9d3cd8_0 conda-forge
180
+ opencl-headers 2025.06.13 h5888daf_0 conda-forge
181
+ opencv-python 4.12.0.88 pypi_0 pypi
182
+ openssl 3.5.4 h26f9b46_0 conda-forge
183
+ packaging 25.0 pypi_0 pypi
184
+ parmap 1.7.0 pypi_0 pypi
185
+ pcre2 10.46 h1321c63_0 conda-forge
186
+ peft 0.17.1 pypi_0 pypi
187
+ pillow 12.0.0 pypi_0 pypi
188
+ pip 25.2 pyh8b19718_0 conda-forge
189
+ platformdirs 4.5.0 pypi_0 pypi
190
+ protobuf 6.33.0 pypi_0 pypi
191
+ psutil 7.1.2 pypi_0 pypi
192
+ pthread-stubs 0.4 hb9d3cd8_1002 conda-forge
193
+ py-cpuinfo 9.0.0 pypi_0 pypi
194
+ pydantic 2.12.3 pypi_0 pypi
195
+ pydantic-core 2.41.4 pypi_0 pypi
196
+ pyparsing 3.2.5 pypi_0 pypi
197
+ python 3.10.19 h3c07f61_2_cpython conda-forge
198
+ python-dateutil 2.9.0.post0 pypi_0 pypi
199
+ pyyaml 6.0.3 pypi_0 pypi
200
+ qwen-vl-utils 0.0.14 pypi_0 pypi
201
+ rdma-core 60.0 hecca717_0 conda-forge
202
+ readline 8.3 hc2a1206_0 defaults
203
+ regex 2025.10.23 pypi_0 pypi
204
+ requests 2.32.5 pypi_0 pypi
205
+ safetensors 0.6.2 pypi_0 pypi
206
+ sentry-sdk 2.43.0 pypi_0 pypi
207
+ setuptools 80.9.0 pyhff2d567_0 conda-forge
208
+ six 1.17.0 pypi_0 pypi
209
+ smmap 5.0.2 pypi_0 pypi
210
+ sniffio 1.3.1 pypi_0 pypi
211
+ sympy 1.13.1 pypi_0 pypi
212
+ tk 8.6.15 h54e0aa7_0 defaults
213
+ tokenizers 0.22.1 pypi_0 pypi
214
+ torch 2.6.0 pypi_0 pypi
215
+ torchcodec 0.2.0 pypi_0 pypi
216
+ torchvision 0.21.0 pypi_0 pypi
217
+ tqdm 4.67.1 pypi_0 pypi
218
+ transformers 5.0.0.dev0 pypi_0 pypi
219
+ triton 3.2.0 pypi_0 pypi
220
+ typer-slim 0.20.0 pypi_0 pypi
221
+ typing-extensions 4.15.0 pypi_0 pypi
222
+ typing-inspection 0.4.2 pypi_0 pypi
223
+ tzdata 2025b h78e105d_0 conda-forge
224
+ urllib3 2.5.0 pypi_0 pypi
225
+ wandb 0.22.3 pypi_0 pypi
226
+ wayland 1.24.0 hd6090a7_1 conda-forge
227
+ wheel 0.45.1 pyhd8ed1ab_1 conda-forge
228
+ xcb-util 0.4.1 h4f16b4b_2 conda-forge
229
+ xcb-util-cursor 0.1.5 hb9d3cd8_0 conda-forge
230
+ xcb-util-image 0.4.0 hb711507_2 conda-forge
231
+ xcb-util-keysyms 0.4.1 hb711507_0 conda-forge
232
+ xcb-util-renderutil 0.3.10 hb711507_0 conda-forge
233
+ xcb-util-wm 0.4.2 hb711507_0 conda-forge
234
+ xkeyboard-config 2.46 hb03c661_0 conda-forge
235
+ xorg-libice 1.1.2 hb9d3cd8_0 conda-forge
236
+ xorg-libsm 1.2.6 he73a12e_0 conda-forge
237
+ xorg-libx11 1.8.12 h4f16b4b_0 conda-forge
238
+ xorg-libxau 1.0.12 hb9d3cd8_0 conda-forge
239
+ xorg-libxcomposite 0.4.6 hb9d3cd8_2 conda-forge
240
+ xorg-libxdamage 1.1.6 hb9d3cd8_0 conda-forge
241
+ xorg-libxdmcp 1.1.5 hb9d3cd8_0 conda-forge
242
+ xorg-libxext 1.3.6 hb9d3cd8_0 conda-forge
243
+ xorg-libxfixes 6.0.2 hb03c661_0 conda-forge
244
+ xorg-libxi 1.8.2 hb9d3cd8_0 conda-forge
245
+ xorg-libxrandr 1.5.4 hb9d3cd8_0 conda-forge
246
+ xorg-libxrender 0.9.12 hb9d3cd8_0 conda-forge
247
+ xorg-libxtst 1.2.5 hb9d3cd8_3 conda-forge
248
+ zlib 1.3.1 hb9d3cd8_2 conda-forge
249
+ zstd 1.5.7 hb8e6e7a_2 conda-forge
qwen_vanilla/gqa.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
2
+ from qwen_vl_utils import process_vision_info
3
+
4
+ from PIL import Image
5
+ import copy
6
+ import torch
7
+ from tqdm import tqdm
8
+ import json
9
+ from anls import anls_score
10
+ import torch.profiler
11
+ import os
12
+
13
+ # os.environ["CUDA_VISIBLE_DEVICES"] = '1'
14
+
15
+ ### Dataset Information ###
16
+ data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/temp/image/images/"
17
+ qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/gqa/pinpoint_gqa_val.json"
18
+
19
+ device_map = "auto"
20
+ model_path = "Qwen/Qwen2-VL-7B-Instruct"
21
+ # model_path = "/root/Desktop/workspace/kwon/pinpoint/qwen_pinpoint/ckpt/info_pinpoint02"
22
+
23
+ # default: Load the model on the available device(s)
24
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
25
+ model_path, torch_dtype=torch.bfloat16, device_map=device_map
26
+ )
27
+
28
+ # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
29
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
30
+ # "Qwen/Qwen2-VL-7B-Instruct",
31
+ # torch_dtype=torch.bfloat16,
32
+ # attn_implementation="flash_attention_2",
33
+ # device_map="auto",
34
+ # )
35
+
36
+ # default processer
37
+ processor = AutoProcessor.from_pretrained(model_path)
38
+
39
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
40
+ # min_pixels = 12544 # original
41
+ # min_pixels = 1204224
42
+ min_pixels = 2408448
43
+ # max_pixels = 1605632
44
+ max_pixels = 3211264
45
+ processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
46
+
47
+ with open(qa_path, "r", encoding="utf-8") as file:
48
+ qa_data = json.load(file)
49
+
50
+ total_ANLS = 0
51
+ total_processed = 0
52
+ total_len = 0
53
+ total_time = 0.0
54
+ total_flops = 0.0
55
+
56
+ pbar = tqdm(qa_data)
57
+ start_event = torch.cuda.Event(enable_timing=True)
58
+ end_event = torch.cuda.Event(enable_timing=True)
59
+
60
+ for entry in pbar:
61
+ image_path = data_path + entry['image']
62
+ image = Image.open(image_path).convert("RGB")
63
+ ques = entry['question']
64
+
65
+ messages = [
66
+ {
67
+ "role": "user",
68
+ "content": [
69
+ {
70
+ "type": "image",
71
+ "image": image,
72
+ },
73
+ {"type": "text", "text": f"{ques} \n Give me just an answer."},
74
+ ],
75
+ }
76
+ ]
77
+
78
+ # Preparation for inference
79
+ start_event.record()
80
+ text = processor.apply_chat_template(
81
+ messages, tokenize=False, add_generation_prompt=True
82
+ )
83
+ image_inputs, video_inputs = process_vision_info(messages)
84
+ inputs = processor(
85
+ text=[text],
86
+ images=image_inputs,
87
+ videos=video_inputs,
88
+ padding=True,
89
+ return_tensors="pt",
90
+ )
91
+ inputs = inputs.to("cuda")
92
+
93
+ # Inference: Generation of the output
94
+
95
+ # with torch.no_grad():
96
+ # with torch.profiler.profile(
97
+ # activities=[torch.profiler.ProfilerActivity.CPU,torch.profiler.ProfilerActivity.CUDA],
98
+ # with_flops=True,
99
+ # profile_memory=False,
100
+ # record_shapes=False
101
+ # ) as prof:
102
+ # generated_ids = model.generate(**inputs, max_new_tokens=128)
103
+ # current_flops = sum([event.flops for event in prof.key_averages() if event.flops is not None])
104
+
105
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
106
+ current_flops = 0
107
+
108
+ generated_ids_trimmed = [
109
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
110
+ ]
111
+ output_text = processor.batch_decode(
112
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
113
+ )
114
+ text_outputs = output_text[0]
115
+ end_event.record()
116
+ torch.cuda.synchronize()
117
+ elapsed_time = start_event.elapsed_time(end_event)
118
+ total_time += elapsed_time
119
+
120
+ ANLS_Score = anls_score(prediction=text_outputs, gold_labels=[entry['answer']])
121
+ print(entry['question'])
122
+ print(text_outputs)
123
+ print(ANLS_Score)
124
+ print("\n")
125
+ # Update counters
126
+ total_processed += 1
127
+ print(f"{total_time/ total_processed}ms")
128
+ total_ANLS += ANLS_Score
129
+ total_len += 0
130
+ total_flops += current_flops
131
+
132
+ # Calculate and update the accuracy in the progress bar description
133
+ if total_processed > 0:
134
+ pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs")
135
+
136
+ print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}")
137
+ print(f"Final Token Length: {total_len / len(qa_data):.2f}")
138
+ print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs")
139
+ print(f"Average Response Time : {total_time / len(qa_data):.4f}ms")
qwen_vanilla/info.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
2
+ from qwen_vl_utils import process_vision_info
3
+
4
+ from PIL import Image
5
+ import copy
6
+ import torch
7
+ from tqdm import tqdm
8
+ import json
9
+ from anls import anls_score
10
+ import torch.profiler
11
+ import os
12
+
13
+ ### Dataset Information ###
14
+ data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/infographic/images/"
15
+ qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/info/pinpoint_info_val.json"
16
+ device_map = "auto"
17
+ model_path = "Qwen/Qwen2-VL-7B-Instruct"
18
+ # model_path = "/root/Desktop/workspace/kwon/pinpoint/qwen_pinpoint/ckpt/info_pinpoint02"
19
+
20
+ # default: Load the model on the available device(s)
21
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
22
+ model_path, torch_dtype=torch.bfloat16, device_map=device_map
23
+ )
24
+
25
+ # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
26
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
27
+ # "Qwen/Qwen2-VL-7B-Instruct",
28
+ # torch_dtype=torch.bfloat16,
29
+ # attn_implementation="flash_attention_2",
30
+ # device_map="auto",
31
+ # )
32
+
33
+ # default processer
34
+ processor = AutoProcessor.from_pretrained(model_path)
35
+
36
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
37
+ min_pixels = 12544
38
+ max_pixels = 3211264 # 4096 x 28 x 28
39
+ processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
40
+
41
+ with open(qa_path, "r", encoding="utf-8") as file:
42
+ qa_data = json.load(file)
43
+
44
+ total_ANLS = 0
45
+ total_processed = 0
46
+ total_len = 0
47
+ total_time = 0.0
48
+ total_flops = 0.0
49
+
50
+ pbar = tqdm(qa_data)
51
+ start_event = torch.cuda.Event(enable_timing=True)
52
+ end_event = torch.cuda.Event(enable_timing=True)
53
+
54
+ for entry in pbar:
55
+ image_path = data_path + entry['image']
56
+ image = Image.open(image_path).convert("RGB")
57
+ ques = entry['question']
58
+
59
+ messages = [
60
+ {
61
+ "role": "user",
62
+ "content": [
63
+ {
64
+ "type": "image",
65
+ "image": image,
66
+ },
67
+ {"type": "text", "text": f"{ques} \n Give me just an answer."},
68
+ ],
69
+ }
70
+ ]
71
+
72
+ # Preparation for inference
73
+ start_event.record()
74
+ text = processor.apply_chat_template(
75
+ messages, tokenize=False, add_generation_prompt=True
76
+ )
77
+ image_inputs, video_inputs = process_vision_info(messages)
78
+ inputs = processor(
79
+ text=[text],
80
+ images=image_inputs,
81
+ videos=video_inputs,
82
+ padding=True,
83
+ return_tensors="pt",
84
+ )
85
+ inputs = inputs.to("cuda")
86
+
87
+ # Inference: Generation of the output
88
+
89
+ # with torch.no_grad():
90
+ # with torch.profiler.profile(
91
+ # activities=[torch.profiler.ProfilerActivity.CPU,torch.profiler.ProfilerActivity.CUDA],
92
+ # with_flops=True,
93
+ # profile_memory=False,
94
+ # record_shapes=False
95
+ # ) as prof:
96
+ # generated_ids = model.generate(**inputs, max_new_tokens=128)
97
+ # current_flops = sum([event.flops for event in prof.key_averages() if event.flops is not None])
98
+
99
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
100
+ current_flops = 0
101
+
102
+ generated_ids_trimmed = [
103
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
104
+ ]
105
+ output_text = processor.batch_decode(
106
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
107
+ )
108
+ text_outputs = output_text[0]
109
+ end_event.record()
110
+ torch.cuda.synchronize()
111
+ elapsed_time = start_event.elapsed_time(end_event)
112
+ total_time += elapsed_time
113
+
114
+ ANLS_Score = anls_score(prediction=text_outputs, gold_labels=entry['answers'])
115
+ print(entry['question'])
116
+ print(text_outputs)
117
+ print(ANLS_Score)
118
+ print("\n")
119
+ # Update counters
120
+ total_processed += 1
121
+ print(f"{total_time/ total_processed}ms")
122
+ total_ANLS += ANLS_Score
123
+ total_len += 0
124
+ total_flops += current_flops
125
+
126
+ # Calculate and update the accuracy in the progress bar description
127
+ if total_processed > 0:
128
+ pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs")
129
+
130
+ print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}")
131
+ print(f"Final Token Length: {total_len / len(qa_data):.2f}")
132
+ print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs")
133
+ print(f"Average Response Time : {total_time / len(qa_data):.4f}ms")
qwen_vanilla/mp.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
2
+ from qwen_vl_utils import process_vision_info
3
+
4
+ from PIL import Image
5
+ import copy
6
+ import torch
7
+ from tqdm import tqdm
8
+ import json
9
+ from anls import anls_score
10
+ import torch.profiler
11
+ import os
12
+ Image.MAX_IMAGE_PIXELS = None
13
+
14
+ # os.environ["CUDA_VISIBLE_DEVICES"] = '1'
15
+
16
+ ### Dataset Information ###
17
+ data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/mpdoc/combined_mpdoc/images/"
18
+ qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/mpdoc/combined_mpdoc/val.json"
19
+ device_map = "auto"
20
+ model_path = "Qwen/Qwen2-VL-7B-Instruct"
21
+ # model_path = "/root/Desktop/workspace/kwon/pinpoint/qwen_pinpoint/ckpt/info_pinpoint02"
22
+
23
+ # default: Load the model on the available device(s)
24
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
25
+ model_path, torch_dtype=torch.bfloat16, device_map=device_map
26
+ )
27
+
28
+ # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
29
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
30
+ # "Qwen/Qwen2-VL-7B-Instruct",
31
+ # torch_dtype=torch.bfloat16,
32
+ # attn_implementation="flash_attention_2",
33
+ # device_map="auto",
34
+ # )
35
+
36
+ # default processer
37
+ processor = AutoProcessor.from_pretrained(model_path)
38
+
39
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
40
+ min_pixels = 12544
41
+ max_pixels = 3211264
42
+ processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
43
+
44
+ with open(qa_path, "r", encoding="utf-8") as file:
45
+ qa_data = json.load(file)
46
+
47
+ total_ANLS = 0
48
+ total_processed = 0
49
+ total_len = 0
50
+ total_time = 0.0
51
+ total_flops = 0.0
52
+
53
+ pbar = tqdm(qa_data)
54
+ start_event = torch.cuda.Event(enable_timing=True)
55
+ end_event = torch.cuda.Event(enable_timing=True)
56
+
57
+ for entry in pbar:
58
+ image_path = data_path + entry['image']
59
+ image = Image.open(image_path).convert("RGB")
60
+ ques = entry['question']
61
+
62
+ messages = [
63
+ {
64
+ "role": "user",
65
+ "content": [
66
+ {
67
+ "type": "image",
68
+ "image": image,
69
+ },
70
+ {"type": "text", "text": f"{ques} \n Give me just an answer."},
71
+ ],
72
+ }
73
+ ]
74
+
75
+ # Preparation for inference
76
+ start_event.record()
77
+ text = processor.apply_chat_template(
78
+ messages, tokenize=False, add_generation_prompt=True
79
+ )
80
+ image_inputs, video_inputs = process_vision_info(messages)
81
+ inputs = processor(
82
+ text=[text],
83
+ images=image_inputs,
84
+ videos=video_inputs,
85
+ padding=True,
86
+ return_tensors="pt",
87
+ )
88
+ inputs = inputs.to("cuda")
89
+
90
+ # Inference: Generation of the output
91
+
92
+ # with torch.no_grad():
93
+ # with torch.profiler.profile(
94
+ # activities=[torch.profiler.ProfilerActivity.CPU,torch.profiler.ProfilerActivity.CUDA],
95
+ # with_flops=True,
96
+ # profile_memory=False,
97
+ # record_shapes=False
98
+ # ) as prof:
99
+ # generated_ids = model.generate(**inputs, max_new_tokens=128)
100
+ # current_flops = sum([event.flops for event in prof.key_averages() if event.flops is not None])
101
+
102
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
103
+ current_flops = 0
104
+
105
+ generated_ids_trimmed = [
106
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
107
+ ]
108
+ output_text = processor.batch_decode(
109
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
110
+ )
111
+ text_outputs = output_text[0]
112
+ end_event.record()
113
+ torch.cuda.synchronize()
114
+ elapsed_time = start_event.elapsed_time(end_event)
115
+ total_time += elapsed_time
116
+
117
+ ANLS_Score = anls_score(prediction=text_outputs, gold_labels=entry['answers'])
118
+ print(entry['question'])
119
+ print(text_outputs)
120
+ print(ANLS_Score)
121
+ print("\n")
122
+ # Update counters
123
+ total_processed += 1
124
+ print(f"{total_time/ total_processed}ms")
125
+ total_ANLS += ANLS_Score
126
+ total_len += 0
127
+ total_flops += current_flops
128
+
129
+ # Calculate and update the accuracy in the progress bar description
130
+ if total_processed > 0:
131
+ pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs")
132
+
133
+ print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}")
134
+ print(f"Final Token Length: {total_len / len(qa_data):.2f}")
135
+ print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs")
136
+ print(f"Average Response Time : {total_time / len(qa_data):.4f}ms")
qwen_vanilla/sp.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
2
+ from qwen_vl_utils import process_vision_info
3
+
4
+ from PIL import Image
5
+ import copy
6
+ import torch
7
+ from tqdm import tqdm
8
+ import json
9
+ from anls import anls_score
10
+ import torch.profiler
11
+ import os
12
+
13
+ # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
14
+
15
+ ### Dataset Information ###
16
+ data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/spdoc/"
17
+ qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/spdoc/pinpoint_spdoc_val.json"
18
+ device_map = "auto"
19
+ model_path = "Qwen/Qwen2-VL-7B-Instruct"
20
+ # model_path = "/root/Desktop/workspace/kwon/pinpoint/qwen_pinpoint/ckpt/info_pinpoint02"
21
+
22
+ # default: Load the model on the available device(s)
23
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
24
+ model_path, torch_dtype=torch.bfloat16, device_map=device_map
25
+ )
26
+
27
+ # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
28
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
29
+ # "Qwen/Qwen2-VL-7B-Instruct",
30
+ # torch_dtype=torch.bfloat16,
31
+ # attn_implementation="flash_attention_2",
32
+ # device_map="auto",
33
+ # )
34
+
35
+ # default processer
36
+ processor = AutoProcessor.from_pretrained(model_path)
37
+
38
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
39
+ min_pixels = 12544
40
+ max_pixels = 3211264
41
+ processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
42
+
43
+ with open(qa_path, "r", encoding="utf-8") as file:
44
+ qa_data = json.load(file)
45
+
46
+ total_ANLS = 0
47
+ total_processed = 0
48
+ total_len = 0
49
+ total_time = 0.0
50
+ total_flops = 0.0
51
+
52
+ pbar = tqdm(qa_data)
53
+ start_event = torch.cuda.Event(enable_timing=True)
54
+ end_event = torch.cuda.Event(enable_timing=True)
55
+
56
+ for entry in pbar:
57
+ image_path = data_path + entry['image']
58
+ image = Image.open(image_path).convert("RGB")
59
+ ques = entry['question']
60
+
61
+ messages = [
62
+ {
63
+ "role": "user",
64
+ "content": [
65
+ {
66
+ "type": "image",
67
+ "image": image,
68
+ },
69
+ {"type": "text", "text": f"{ques} \n Give me just an answer."},
70
+ ],
71
+ }
72
+ ]
73
+
74
+ # Preparation for inference
75
+ start_event.record()
76
+ text = processor.apply_chat_template(
77
+ messages, tokenize=False, add_generation_prompt=True
78
+ )
79
+ image_inputs, video_inputs = process_vision_info(messages)
80
+ inputs = processor(
81
+ text=[text],
82
+ images=image_inputs,
83
+ videos=video_inputs,
84
+ padding=True,
85
+ return_tensors="pt",
86
+ )
87
+ inputs = inputs.to("cuda")
88
+
89
+ # Inference: Generation of the output
90
+
91
+ # with torch.no_grad():
92
+ # with torch.profiler.profile(
93
+ # activities=[torch.profiler.ProfilerActivity.CPU,torch.profiler.ProfilerActivity.CUDA],
94
+ # with_flops=True,
95
+ # profile_memory=False,
96
+ # record_shapes=False
97
+ # ) as prof:
98
+ # generated_ids = model.generate(**inputs, max_new_tokens=128)
99
+ # current_flops = sum([event.flops for event in prof.key_averages() if event.flops is not None])
100
+
101
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
102
+ current_flops = 0
103
+
104
+ generated_ids_trimmed = [
105
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
106
+ ]
107
+ output_text = processor.batch_decode(
108
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
109
+ )
110
+ text_outputs = output_text[0]
111
+ end_event.record()
112
+ torch.cuda.synchronize()
113
+ elapsed_time = start_event.elapsed_time(end_event)
114
+ total_time += elapsed_time
115
+
116
+ ANLS_Score = anls_score(prediction=text_outputs, gold_labels=entry['answers'])
117
+ print(entry['question'])
118
+ print(text_outputs)
119
+ print(ANLS_Score)
120
+ print("\n")
121
+ # Update counters
122
+ total_processed += 1
123
+ print(f"{total_time/ total_processed}ms")
124
+ total_ANLS += ANLS_Score
125
+ total_len += 0
126
+ total_flops += current_flops
127
+
128
+ # Calculate and update the accuracy in the progress bar description
129
+ if total_processed > 0:
130
+ pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs")
131
+
132
+ print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}")
133
+ print(f"Final Token Length: {total_len / len(qa_data):.2f}")
134
+ print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs")
135
+ print(f"Average Response Time : {total_time / len(qa_data):.4f}ms")
textvqa.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:001a754102a81c95d33ea6d92b9b608f7292f8f2915a47e84b6ff62fda1a3eaa
3
+ size 7078769179