File size: 15,687 Bytes
a9e1e1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
from llava.conversation import conv_templates, SeparatorStyle
from llava.utils import disable_torch_init
from transformers import CLIPVisionModel, CLIPImageProcessor, StoppingCriteria
from llava.model import *

import json
from PIL import Image
import os
import requests
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import argparse
import datasets
from llava.model import LlavaLlamaForCausalLM
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, KeywordsStoppingCriteria,process_images
from transformers.generation.streamers import TextIteratorStreamer

from PIL import Image

import requests
from io import BytesIO

from typing import List, Optional, Tuple, Union

import torch
import torch.nn as nn

from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig

from torch.nn import CrossEntropyLoss


# , LlamaModel, LlamaForCausalLM, GenerationConfig
# from .modeling_llama import LlamaModel, LlamaForCausalLM
from transformers import LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.generation.utils import GenerateOutput

from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM

import time
import subprocess
from threading import Thread

import os
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str,default="/jizhicfs/bojoli/mmpe/mmpe-main/checkpoints/mmpe_finetune_vicuna-7b-1.5_clip-vit-large-patch14-336/")
parser.add_argument('--image', type=str)
parser.add_argument('--prompt', type=str, default="Describe this image.")
parser.add_argument('--output', type=str,default='attention_map/mmpe')
parser.add_argument('--layer', type=int, default=32)
parser.add_argument('--w', action='store_true', help="Enable some feature")
parser.add_argument('--position', type=int, default=0)
parser.add_argument('--target_text', type=str, default=None)
args = parser.parse_args()


DEFAULT_IMAGE_TOKEN = "<image>"
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
DEFAULT_IM_START_TOKEN = "<im_start>"
DEFAULT_IM_END_TOKEN = "<im_end>"






from PIL import Image

def new_forward(
    self,
    input_ids: torch.LongTensor = None,
    attention_mask: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.LongTensor] = None,
    past_key_values: Optional[List[torch.FloatTensor]] = None,
    inputs_embeds: Optional[torch.FloatTensor] = None,
    labels: Optional[torch.LongTensor] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    images: Optional[torch.FloatTensor] = None,
    image_sizes: Optional[List[List[int]]] = None,
    return_dict: Optional[bool] = None,
    modalities: Optional[List[str]] = ["image"],
    dpo_forward: Optional[bool] = None,
    cache_position=None,
) -> Union[Tuple, CausalLMOutputWithPast]:

    if inputs_embeds is None:
        (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes)

    if dpo_forward:
        outputs = self.model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        hidden_states = outputs[0]
        logits = self.lm_head(hidden_states)
        return logits, labels

    else:
        return position_ids,LlavaLlamaForCausalLM.forward(
            self,
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            labels=labels,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

if __name__ == "__main__":
    dataset_path = "/jizhicfs/bojoli/dataset/mmbench/en"
    args = parser.parse_args()
    if args.w:
        args.model_path = "/jizhicfs/bojoli/mmpe/mmpe-main/checkpoints/final_mmpe_finetune_vicuna-7b-1.5_clip-vit-large-patch14-336/"
        args.output = "attention_map/mmpe"
    else:
        args.model_path = "/jizhicfs/bojoli/mmpe/mmpe-main/checkpoints/final_without_mmpe_finetune_vicuna-7b-1.5_clip-vit-large-patch14-336/"
        args.output = "attention_map/without_mmpe"
    mmbench_data = datasets.load_dataset(dataset_path, split='validation')
    
    for i in range(len(mmbench_data)):
        if mmbench_data[i]['question'] == 'Think about the magnetic force between the magnets in each pair. Which of the following statements is true?':
            print(mmbench_data[i])
            break

    width, height = mmbench_data[i]['image'].size
    mmbench_data[i]['image'].save('example.jpg')
    disable_torch_init()
    
    model = LlavaLlamaForCausalLM.from_pretrained(args.model_path).cuda()
    tokenizer = AutoTokenizer.from_pretrained(args.model_path)
    image_processor = CLIPImageProcessor.from_pretrained(model.config.mm_vision_tower)
    
    conv_mode = "llava_v1"
    conv = conv_templates[conv_mode].copy()
    
    image_data = [mmbench_data[i]['image']]
    image_tensor = process_images(image_data, image_processor, model.config).cuda()
    
    inp = DEFAULT_IMAGE_TOKEN + "\n" + mmbench_data[i]['question']
    conv.append_message(conv.roles[0], inp)
    conv.append_message(conv.roles[1], None)
    prompt = conv.get_prompt()
    
    input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
    stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
    keywords = [stop_str]
    model.forward = new_forward.__get__(model, LlavaLlamaForCausalLM)
    image_size_list = torch.Tensor([[width, height]]).cuda()
    
    with torch.no_grad():
        position_ids,output = model(
            input_ids,
            images=image_tensor,
            image_sizes=image_size_list,
            output_attentions=True,
            return_dict=True
            )

    print(position_ids)
    
    list=[]
    for j in position_ids[0]:
        list.append(j.item())
    print(list)
    

    target_index=36+574
    flag=False
    target_position = args.position
    if target_position!=0:
        for j in range(0,position_ids.shape[1]):
            if list[j]==target_position:
                if flag==False:
                    flag=True
                else:
                    target_position=j
                    break
    num_layer=len(output.attentions)
    input_ids_list = input_ids[0].cpu().tolist()
# 如果IMAGE_TOKEN_INDEX在input_ids中,找到其索引;否则设为0
    instruction_begin_index = input_ids_list.index(IMAGE_TOKEN_INDEX)
    len_instruc = len(input_ids[0]) - instruction_begin_index - 1
    begin_2=612
    end_2=position_ids.shape[1]-len_instruc
    target_position=[target_position]
    img = mmbench_data[i]['image']
    orig_width, orig_height = img.size
    img = img.resize((336, 336), Image.BILINEAR)
    
    # 计算图像的宽高比
    aspect_ratio = orig_width / orig_height
    
    # 根据 position_ids 在 begin_2 到 end_2 范围内的数量,计算网格尺寸
    position_region_length = end_2 - begin_2
    
    # 考虑宽高比确定网格维度
    # 如果宽高比大于1,则水平方向有更多的网格
    if aspect_ratio >= 1.0:
        grid_width = int(np.sqrt(position_region_length * aspect_ratio))
        grid_height = int(position_region_length / grid_width)
        
        # 确保尺寸乘积能容纳所有position_ids
        while grid_width * grid_height < position_region_length:
            grid_height += 1
    else:
        # 如果高大于宽,垂直方向有更多的网格
        grid_height = int(np.sqrt(position_region_length / aspect_ratio))
        grid_width = int(position_region_length / grid_height)
        
        # 确保尺寸乘积能容纳所有position_ids
        while grid_width * grid_height < position_region_length:
            grid_width += 1
        # 填充 position_ids
    position_grid = np.zeros((grid_height, grid_width), dtype=int)
    position_img = Image.fromarray(position_grid.astype(np.uint8) * 255 // (end_2 - begin_2))
    position_img = position_img.resize((336, 336), Image.BILINEAR)
    
    # 创建覆盖了position_ids的图像
    plt.figure(figsize=(10, 10))
    plt.imshow(img)
    
    # 首先找到位置950在网格中的位置
    highlighted_position = 950
    highlighted_x = None
    highlighted_y = None
    
    for y in range(grid_height):
        for x in range(grid_width):
            if y * grid_width + x < position_region_length:
                pos_id = position_grid[y, x]
                
                # 计算在调整大小后的图像中的位置
                img_x = int(x * 336 / grid_width + 336 / (2 * grid_width))
                img_y = int(y * 336 / grid_height + 336 / (2 * grid_height))
                
                # 保存位置950的坐标
                if pos_id == highlighted_position:
                    highlighted_x = img_x
                    highlighted_y = img_y
                
                # 计算网格单元的大小
                cell_width = 336 / grid_width
                cell_height = 336 / grid_height
     # 如果找到了位置950,在该位置绘制绿色圆圈
    if highlighted_x is not None and highlighted_y is not None:
        # 计算圆圈大小(根据网格单元大小)
        circle_radius = min(336 / grid_width, 336 / grid_height) / 2
        
        # 绘制绿色圆圈
        circle = plt.Circle((highlighted_x, highlighted_y), circle_radius, 
                           edgecolor='lime', facecolor='none', linewidth=3)
        plt.gca().add_patch(circle)
        
        # 可选:标注该位置
        plt.text(highlighted_x, highlighted_y - circle_radius - 5, 
               f"Position {highlighted_position}", 
               ha='center', va='center', color='lime',
               fontweight='bold', fontsize=12,
               bbox=dict(facecolor='black', alpha=0.7, pad=1))
    else:
        print(f"警告:在网格中找不到位置 {highlighted_position}")
    
    plt.axis('off')
    plt.title("Highlighted Position 950 on Image")
    os.makedirs(args.output, exist_ok=True)
    plt.savefig(f"{args.output}/position_950_highlight.png")
    plt.close()
    for idx, pos_id in enumerate(range(begin_2, end_2)):
        if idx < grid_width * grid_height:
            row = idx // grid_width
            col = idx % grid_width
            position_grid[row, col] = pos_id
    
    # 将position_ids网格调整为图像大小
    position_img = Image.fromarray(position_grid.astype(np.uint8) * 255 // (end_2 - begin_2))
    position_img = position_img.resize((336, 336), Image.BILINEAR)
    
    # 创建覆盖了position_ids的图像
    plt.figure(figsize=(10, 10))
    plt.imshow(img)
    
    for y in range(grid_height):
        for x in range(grid_width):
            if y * grid_width + x < position_region_length:
                pos_id = position_grid[y, x]
                
                # 计算在调整大小后的图像中的位置
                img_x = int(x * 336 / grid_width + 336 / (2 * grid_width))
                img_y = int(y * 336 / grid_height + 336 / (2 * grid_height))
                
                # 计算网格单元的大小
                cell_width = 336 / grid_width
                cell_height = 336 / grid_height
                
                # 绘制网格边框
                rect = plt.Rectangle((img_x - cell_width/2, img_y - cell_height/2), 
                                   cell_width, cell_height,
                                   linewidth=1, edgecolor='white', facecolor='none', alpha=0.3)
                plt.gca().add_patch(rect)
                
                # 添加position_id文本(仅显示部分id避免过于拥挤)
                if grid_width <= 10 or (x % 3 == 0 and y % 3 == 0):  # 根据网格密度调整显示频率
                    plt.text(img_x, img_y, str(pos_id), 
                           ha='center', va='center', color='white',
                           bbox=dict(facecolor='black', alpha=0.5, pad=1))
    
    plt.axis('off')
    plt.title("Position IDs from begin_2 to end_2 overlaid on image")
    os.makedirs(args.output, exist_ok=True)
    plt.savefig(f"{args.output}/position_ids_overlay.png")
    plt.close()
    
    target_position = [948]
    if args.target_text is not None:
        target_position=[]
        target_tokens=tokenizer.tokenize(args.target_text)
        target_tokens_ids=tokenizer.convert_tokens_to_ids(target_tokens)
        for j in range(0,input_ids.shape[1]):
            if input_ids[0][j] in target_tokens_ids:
                target_position.append(j+len(position_ids[0])-len(input_ids[0]))
    print(f"target_position:{target_position}")
    for k in range(num_layer):
        attention = output.attentions[k].squeeze(0)  # 获取指定层的注意力权重
        
        # 对多个目标位置取平均注意力
        if len(target_position) > 0:
            # 创建所有目标位置的平均注意力
            avg_attention = torch.zeros_like(attention[:, 0, 36:612])
            for pos in target_position:
                avg_attention += attention[:, pos, 36:612]
            avg_attention = avg_attention / len(target_position)  # 取平均
            
            # 提取目标位置相较于36:612位置的平均注意力权重
            attention_target = avg_attention.mean(dim=0)  # 对所有头取平均
        else:
            # 如果没有找到目标位置,使用原始的单一位置
            attention_target = attention[:, target_position[0], 36:612].mean(dim=0)
            
        # 计算热力图
        attention_target = torch.softmax(attention_target * 200, dim=0).view(24, 24)  # 假设36:612位置对应24x24的patch
        
        attention_target = np.array(attention_target.cpu(), dtype=np.float32) * 100

        # 读取图像
        img = mmbench_data[i]['image']
        img = img.resize((336, 336), Image.BILINEAR)
        print(type(img))
        img.save('example.jpg')
        resized_attention = np.array(Image.fromarray((attention_target * 255).astype(np.uint8)).resize(img.size, resample=Image.BILINEAR))
        smoothed_attention = gaussian_filter(resized_attention, sigma=2)

        # 使用 seaborn 绘制热力图
        plt.figure(figsize=(img.size[0] / 100, img.size[1] / 100))
        sns.heatmap(smoothed_attention, cmap="jet", alpha=0.5, zorder=2)
        plt.imshow(img, aspect='auto', zorder=1)
        plt.axis('off')
        os.makedirs(args.output, exist_ok=True)
        plt.savefig(f"{args.output}/attn_layer{k}_{'_'.join(args.target_text.split()) if args.target_text else target_position[0]}.png")
        plt.close()


    print('done')