| from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor |
| from qwen_vl_utils import process_vision_info |
|
|
| from PIL import Image |
| import copy |
| import torch |
| from tqdm import tqdm |
| import json |
| from anls import anls_score |
| import torch.profiler |
| import os |
|
|
| |
|
|
| |
| data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/temp/image/images/" |
| qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/gqa/pinpoint_gqa_val.json" |
|
|
| device_map = "auto" |
| model_path = "Qwen/Qwen2-VL-7B-Instruct" |
| |
|
|
| |
| model = Qwen2VLForConditionalGeneration.from_pretrained( |
| model_path, torch_dtype=torch.bfloat16, device_map=device_map |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| processor = AutoProcessor.from_pretrained(model_path) |
|
|
| |
| |
| |
| min_pixels = 2408448 |
| |
| max_pixels = 3211264 |
| processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels) |
|
|
| with open(qa_path, "r", encoding="utf-8") as file: |
| qa_data = json.load(file) |
|
|
| total_ANLS = 0 |
| total_processed = 0 |
| total_len = 0 |
| total_time = 0.0 |
| total_flops = 0.0 |
|
|
| pbar = tqdm(qa_data) |
| start_event = torch.cuda.Event(enable_timing=True) |
| end_event = torch.cuda.Event(enable_timing=True) |
|
|
| for entry in pbar: |
| image_path = data_path + entry['image'] |
| image = Image.open(image_path).convert("RGB") |
| ques = entry['question'] |
|
|
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| { |
| "type": "image", |
| "image": image, |
| }, |
| {"type": "text", "text": f"{ques} \n Give me just an answer."}, |
| ], |
| } |
| ] |
|
|
| |
| start_event.record() |
| text = processor.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
| image_inputs, video_inputs = process_vision_info(messages) |
| inputs = processor( |
| text=[text], |
| images=image_inputs, |
| videos=video_inputs, |
| padding=True, |
| return_tensors="pt", |
| ) |
| inputs = inputs.to("cuda") |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| generated_ids = model.generate(**inputs, max_new_tokens=128) |
| current_flops = 0 |
|
|
| generated_ids_trimmed = [ |
| out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) |
| ] |
| output_text = processor.batch_decode( |
| generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False |
| ) |
| text_outputs = output_text[0] |
| end_event.record() |
| torch.cuda.synchronize() |
| elapsed_time = start_event.elapsed_time(end_event) |
| total_time += elapsed_time |
|
|
| ANLS_Score = anls_score(prediction=text_outputs, gold_labels=[entry['answer']]) |
| print(entry['question']) |
| print(text_outputs) |
| print(ANLS_Score) |
| print("\n") |
| |
| total_processed += 1 |
| print(f"{total_time/ total_processed}ms") |
| total_ANLS += ANLS_Score |
| total_len += 0 |
| total_flops += current_flops |
|
|
| |
| if total_processed > 0: |
| pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs") |
|
|
| print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}") |
| print(f"Final Token Length: {total_len / len(qa_data):.2f}") |
| print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs") |
| print(f"Average Response Time : {total_time / len(qa_data):.4f}ms") |