WUBIAO commited on
Commit
856189e
·
verified ·
1 Parent(s): 50f3563

Upload inference_llava.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference_llava.py +198 -0
inference_llava.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+ import shortuuid
7
+
8
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
9
+ from llava.conversation import conv_templates, SeparatorStyle
10
+ from llava.model.builder import load_pretrained_model
11
+ from llava.utils import disable_torch_init
12
+ from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
13
+
14
+ from PIL import Image
15
+ import math
16
+ ########################################################
17
+ os.environ["CUDA_VISIBLE_DEVICES"] = "7"
18
+ ################################################∂########
19
+
20
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_lora8_demo'
21
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_iter2000_0709_merge'
22
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_10000_multidig_v1_P020_0709_merge'
23
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_10000_multidig_v1_e1_0709_merge'
24
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_motivation_iter400_e3_merge'
25
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_motivation_iter400_merge'
26
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e1000_merge'
27
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e050_merge'
28
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e020_merge'
29
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e010_merge'
30
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e005_merge'
31
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e5_merge'
32
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_merge'
33
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b'
34
+ # args_model_path = '/code/ICLR_2024/Model/llava-v1.6-mistral-7b'
35
+
36
+
37
+
38
+ args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_e1_mistral_0807'
39
+ # args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_e5_0802'
40
+
41
+ # args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_blip_e10_H800_0806'
42
+
43
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_iter2000_0715_03'
44
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_iter2000_0715'
45
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_non_iter2000_0715'
46
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_template_H800'
47
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_template'
48
+ # args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_single_blip_iter2000_0709'
49
+ # args_model_path = '/code/ICLR_2024/Model/checkpoints/llava-v1.6-7b-task-lora_single_blip_a100_e1000_0703'
50
+
51
+
52
+ args_model_base = '/data/zbz5349/ICLR_2024/Model/llava-v1.6-mistral-7b'
53
+ # args_model_base = '/scratch/zbz5349/ICLR_2024/LLaVA_Mobile_V1/init_model/llava-v1.6-vicuna-7b'
54
+ # args_model_base = 'xtuner/llava-phi-3-mini'
55
+
56
+
57
+
58
+ disable_torch_init()
59
+ model_path = os.path.expanduser(args_model_path)
60
+ model_name = get_model_name_from_path(model_path)
61
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args_model_base, model_name)
62
+
63
+ # ############################
64
+ # # model = model.bfloat16()
65
+ # tokenizer.pad_token = "[PAD]"
66
+ # tokenizer.padding_side = "left"
67
+ # ############################
68
+
69
+ def split_list(lst, n):
70
+ """Split a list into n (roughly) equal-sized chunks"""
71
+ chunk_size = math.ceil(len(lst) / n) # integer division
72
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
73
+
74
+
75
+ def get_chunk(lst, n, k):
76
+ chunks = split_list(lst, n)
77
+ return chunks[k]
78
+
79
+
80
+ import json
81
+
82
+ def read_json(file_path):
83
+ with open(file_path, 'r', encoding='utf-8') as file:
84
+ data = json.load(file)
85
+ return data
86
+
87
+ def write_json(file_path, data):
88
+ with open(file_path, 'w', encoding='utf-8') as file:
89
+ json.dump(data, file, ensure_ascii=False, indent=4)
90
+
91
+
92
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800_caption_history_without_label_v3.json'
93
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800.json'
94
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava.json'
95
+
96
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800.json'
97
+ # args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/single_blip_test_llava_800_all_e1_H800.json'
98
+
99
+ args_question_file = '/data/zbz5349/ICLR_2024/data/general_blip_test_llava.json'
100
+ args_answers_file = '/data/zbz5349/ICLR_2024/json/general_blip_test_llava_all_e1_mistral_0808.json'
101
+
102
+
103
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/install_blip_test_llava.json'
104
+ # args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/install_blip_test_llava_all_e1_H800.json'
105
+
106
+
107
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_non_400.json'
108
+ # args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_non_400_2000iter.json'
109
+
110
+ # args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_400.json'
111
+ # args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_400_2000iter.json'
112
+
113
+
114
+
115
+ args_num_chunks = 1
116
+ args_chunk_idx = 0
117
+
118
+ questions = json.load(open(os.path.expanduser(args_question_file), "r"))
119
+ questions = get_chunk(questions, args_num_chunks, args_chunk_idx)
120
+ answers_file = os.path.expanduser(args_answers_file)
121
+ os.makedirs(os.path.dirname(answers_file), exist_ok=True)
122
+ ans_file = open(answers_file, "w")
123
+
124
+
125
+
126
+ args_image_folder = '/data/zbz5349/ICLR_2024/data/'
127
+ args_single_pred_prompt = True
128
+ args_conv_mode = "llava_v0"
129
+
130
+
131
+
132
+ right_answer = []
133
+ for i, line in enumerate(tqdm(questions)):
134
+
135
+
136
+ idx = line["id"]
137
+ question = line['conversations'][0]
138
+ qs = question['value'].replace('<image>', '').strip()
139
+ cur_prompt = qs
140
+
141
+ if 'image' in line:
142
+ image_file = line["image"]
143
+ image = Image.open(os.path.join(args_image_folder, image_file))
144
+ image_tensor = process_images([image], image_processor, model.config)[0]
145
+ images = image_tensor.unsqueeze(0).half().cuda()
146
+ image_sizes = [image.size]
147
+ if getattr(model.config, 'mm_use_im_start_end', False):
148
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
149
+ else:
150
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
151
+ cur_prompt = '<image>' + '\n' + cur_prompt
152
+ else:
153
+ images = None
154
+ image_sizes = None
155
+
156
+ if args_single_pred_prompt:
157
+ # qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
158
+ # cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly."
159
+
160
+ qs = qs + '\n'
161
+ cur_prompt = cur_prompt
162
+
163
+
164
+ conv = conv_templates[args_conv_mode].copy()
165
+ conv.append_message(conv.roles[0], qs)
166
+ conv.append_message(conv.roles[1], None)
167
+ prompt = conv.get_prompt()
168
+
169
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
170
+
171
+
172
+
173
+ # import pdb; pdb.set_trace()
174
+ with torch.inference_mode():
175
+ output_ids = model.generate(
176
+ input_ids,
177
+ images=images,
178
+ image_sizes=image_sizes,
179
+ do_sample=True if 0.2 > 0 else False,
180
+ temperature=0.2,
181
+ max_new_tokens=1024,
182
+ use_cache=True,
183
+ )
184
+
185
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
186
+
187
+ # import pprint
188
+ # pprint.pprint(outputs)
189
+ # print('-------------------------------')
190
+ # pprint.pprint(line['conversations'][1]['value'])
191
+ # print('===========================================================')
192
+
193
+ temp = {}
194
+ temp['gt'] = line['conversations'][1]['value']
195
+ temp['pred'] = outputs
196
+ right_answer.append(temp)
197
+
198
+ write_json(args_answers_file, right_answer)