NAACL-2024 / inference_llava_mistral.py
WUBIAO's picture
Upload inference_llava_mistral.py with huggingface_hub
f7fcf4e verified
import argparse
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
from PIL import Image
import math
########################################################
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
################################################∂########
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_lora8_demo'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_iter2000_0709_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_10000_multidig_v1_P020_0709_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_10000_multidig_v1_e1_0709_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_motivation_iter400_e3_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_motivation_iter400_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e1000_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e050_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e020_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e010_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e005_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_single_e5_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b-aitw_merge'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-vicuna-7b'
# args_model_path = '/code/ICLR_2024/Model/llava-v1.6-mistral-7b'
args_model_path = '/scratch/zbz5349/ICLR_2024/LLaVA_Mobile_V1/checkpoints/llava-v1.6-7b-task-lora_three_blip_e1_lre4_mistral_0823'
# args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_e1_mistral_0807'
# args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_e5_0802'
# args_model_path = '/data/zbz5349/ICLR_2024/checkpoints/llava-v1.6-7b-task-lora_all_blip_e10_H800_0806'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_iter2000_0715_03'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_iter2000_0715'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_general_dual_non_iter2000_0715'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_template_H800'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_template'
# args_model_path = '/code/ICLR_2024/LLaVA/checkpoints/llava-v1.6-7b-task-lora_single_blip_iter2000_0709'
# args_model_path = '/code/ICLR_2024/Model/checkpoints/llava-v1.6-7b-task-lora_single_blip_a100_e1000_0703'
args_model_base = '/data/zbz5349/ICLR_2024/Model/llava-v1.6-mistral-7b'
# args_model_base = '/scratch/zbz5349/ICLR_2024/LLaVA_Mobile_V1/init_model/llava-v1.6-vicuna-7b'
# args_model_base = 'xtuner/llava-phi-3-mini'
disable_torch_init()
model_path = os.path.expanduser(args_model_path)
model_name = get_model_name_from_path(model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args_model_base, model_name)
# ############################
# # model = model.bfloat16()
# tokenizer.pad_token = "[PAD]"
# tokenizer.padding_side = "left"
# ############################
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
import json
def read_json(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
return data
def write_json(file_path, data):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800_caption_history_without_label_v3.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/single_blip_test_llava_800.json'
# args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/single_blip_test_llava_800_all_e1_H800.json'
args_question_file = '/data/zbz5349/ICLR_2024/data/general_blip_test_llava.json'
args_answers_file = '/data/zbz5349/ICLR_2024/json/general_blip_test_llava_three_lre4_e1_mistral_0822.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/blip/install_blip_test_llava.json'
# args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/install_blip_test_llava_all_e1_H800.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_non_400.json'
# args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_non_400_2000iter.json'
# args_question_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_400.json'
# args_answers_file = '/code/ICLR_2024/Auto-GUI/dataset/json/general_blip_test_llava_dual_400_2000iter.json'
args_num_chunks = 1
args_chunk_idx = 0
questions = json.load(open(os.path.expanduser(args_question_file), "r"))
questions = get_chunk(questions, args_num_chunks, args_chunk_idx)
answers_file = os.path.expanduser(args_answers_file)
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
ans_file = open(answers_file, "w")
args_image_folder = '/data/zbz5349/ICLR_2024/data/'
args_single_pred_prompt = True
args_conv_mode = "llava_v0"
right_answer = []
for i, line in enumerate(tqdm(questions)):
idx = line["id"]
question = line['conversations'][0]
qs = question['value'].replace('<image>', '').strip()
cur_prompt = qs
if 'image' in line:
image_file = line["image"]
image = Image.open(os.path.join(args_image_folder, image_file))
image_tensor = process_images([image], image_processor, model.config)[0]
images = image_tensor.unsqueeze(0).half().cuda()
image_sizes = [image.size]
if getattr(model.config, 'mm_use_im_start_end', False):
qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
else:
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
cur_prompt = '<image>' + '\n' + cur_prompt
else:
images = None
image_sizes = None
if args_single_pred_prompt:
# qs = qs + '\n' + "Answer with the option's letter from the given choices directly."
# cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly."
qs = qs + '\n'
cur_prompt = cur_prompt
conv = conv_templates[args_conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
# import pdb; pdb.set_trace()
# try:
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=images,
image_sizes=image_sizes,
do_sample=True if 0.2 > 0 else False,
temperature=0.2,
max_new_tokens=1024,
use_cache=True,
)
# except:
# continue
# data = [[1, 9123, 8402, 16747, 29918, 3149]]
# output_ids = torch.tensor(data, device=input_ids.device)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
# import pprint
# pprint.pprint(outputs)
# print('-------------------------------')
# pprint.pprint(line['conversations'][1]['value'])
# print('===========================================================')
temp = {}
temp['gt'] = line['conversations'][1]['value']
temp['pred'] = outputs
right_answer.append(temp)
write_json(args_answers_file, right_answer)