| import argparse |
| import torch |
| import os |
| import json |
| from tqdm import tqdm |
| import shortuuid |
|
|
| from ChatUniVi.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN |
| from ChatUniVi.conversation import conv_templates, SeparatorStyle |
| from ChatUniVi.model.builder import load_pretrained_model |
| from ChatUniVi.utils import disable_torch_init |
| from ChatUniVi.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria |
|
|
| from PIL import Image |
| import math |
| import numpy as np |
|
|
|
|
| def split_list(lst, n): |
| """Split a list into n (roughly) equal-sized chunks""" |
| chunk_size = math.ceil(len(lst) / n) |
| return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] |
|
|
|
|
| def get_chunk(lst, n, k): |
| chunks = split_list(lst, n) |
| return chunks[k] |
|
|
|
|
| def eval_model(args): |
| |
| disable_torch_init() |
| model_path = os.path.expanduser(args.model_path) |
| model_name = "ChatUniVi" |
| tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) |
|
|
| vision_tower = model.get_vision_tower() |
| if not vision_tower.is_loaded: |
| vision_tower.load_model() |
| image_processor = vision_tower.image_processor |
|
|
| questions = json.load(open(os.path.expanduser(args.question_file), "r")) |
| questions = get_chunk(questions, args.num_chunks, args.chunk_idx) |
| answers_file = os.path.expanduser(args.answers_file) |
| os.makedirs(os.path.dirname(answers_file), exist_ok=True) |
| ans_file = open(answers_file, "w") |
| for i, line in enumerate(tqdm(questions)): |
| idx = line["id"] |
| question = line['conversations'][0] |
| gt_ans = line["conversations"][1] |
| qs = question['value'].replace('<image>', '').strip() |
| cur_prompt = qs |
|
|
| if 'image' in line: |
| image_file = line["image"].replace("\\", "/") |
| image = Image.open(os.path.join(args.image_folder, image_file)) |
| image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] |
| images = image_tensor.unsqueeze(0).half().cuda() |
| if getattr(model.config, 'mm_use_im_start_end', False): |
| qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs |
| else: |
| qs = DEFAULT_IMAGE_TOKEN + '\n' + qs |
| cur_prompt = '<image>' + '\n' + cur_prompt |
| else: |
| images = None |
|
|
| conv = conv_templates[args.conv_mode].copy() |
| conv.append_message(conv.roles[0], qs) |
| conv.append_message(conv.roles[1], None) |
| prompt = conv.get_prompt() |
|
|
| input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() |
|
|
| stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 |
| keywords = [stop_str] |
| stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) |
|
|
| with torch.inference_mode(): |
| output_ids = model.generate( |
| input_ids, |
| images=images, |
| do_sample=True, |
| temperature=0.2, |
| max_new_tokens=1024, |
| use_cache=True, |
| stopping_criteria=[stopping_criteria]) |
|
|
| input_token_len = input_ids.shape[1] |
| n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() |
| if n_diff_input_output > 0: |
| print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') |
| outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] |
| outputs = outputs.strip() |
| if outputs.endswith(stop_str): |
| outputs = outputs[:-len(stop_str)] |
| outputs = outputs.strip() |
|
|
| outputs_reasoning = outputs |
| input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() |
|
|
| with torch.inference_mode(): |
| output_ids = model.generate( |
| input_ids, |
| images=images, |
| do_sample=True, |
| temperature=0.2, |
| max_new_tokens=64, |
| use_cache=True, |
| output_scores=True, |
| return_dict_in_generate=True, |
| stopping_criteria=[stopping_criteria]) |
|
|
| scores = output_ids.scores[0][0].to(torch.float32) |
| label_score = [] |
|
|
| candidates = [] |
| answers_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"] |
| for i in answers_list: |
| if "(" + i + ")" in cur_prompt: |
| candidates.append(i) |
|
|
| for can in candidates: |
| can_id = tokenizer.encode(can)[-1] |
| label_score.append(scores[can_id].item()) |
| outputs_answer = candidates[np.argmax(label_score)] |
|
|
| output_ids = output_ids.sequences |
|
|
| input_token_len = input_ids.shape[1] |
| n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() |
| if n_diff_input_output > 0: |
| print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') |
| outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] |
| outputs = outputs.strip() |
| if outputs.endswith(stop_str): |
| outputs = outputs[:-len(stop_str)] |
| outputs = outputs.strip() |
| outputs = outputs_reasoning + '\n The answer is ' + outputs |
|
|
| ans_id = shortuuid.uuid() |
| ans_file.write(json.dumps({"question_id": idx, |
| "prompt": cur_prompt, |
| "text": outputs, |
| "answer_id": ans_id, |
| "model_id": model_name, |
| "pred": outputs_answer, |
| "metadata": {}}) + "\n") |
| ans_file.flush() |
| ans_file.close() |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--model-path", type=str, default="facebook/opt-350m") |
| parser.add_argument("--model-base", type=str, default=None) |
| parser.add_argument("--image-folder", type=str, default="") |
| parser.add_argument("--question-file", type=str, default="tables/question.json") |
| parser.add_argument("--answers-file", type=str, default="answer.jsonl") |
| parser.add_argument("--conv-mode", type=str, default="simple") |
| parser.add_argument("--num-chunks", type=int, default=1) |
| parser.add_argument("--chunk-idx", type=int, default=0) |
| args = parser.parse_args() |
|
|
| eval_model(args) |
|
|