move2 / qwen_vanilla /gqa.py
Minchael's picture
Upload folder using huggingface_hub
da792de verified
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
from PIL import Image
import copy
import torch
from tqdm import tqdm
import json
from anls import anls_score
import torch.profiler
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = '1'
### Dataset Information ###
data_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/temp/image/images/"
qa_path = "/root/Desktop/workspace/kwon/pinpoint/pinpoint_dataset/dataset_final/gqa/pinpoint_gqa_val.json"
device_map = "auto"
model_path = "Qwen/Qwen2-VL-7B-Instruct"
# model_path = "/root/Desktop/workspace/kwon/pinpoint/qwen_pinpoint/ckpt/info_pinpoint02"
# default: Load the model on the available device(s)
model = Qwen2VLForConditionalGeneration.from_pretrained(
model_path, torch_dtype=torch.bfloat16, device_map=device_map
)
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
# model = Qwen2VLForConditionalGeneration.from_pretrained(
# "Qwen/Qwen2-VL-7B-Instruct",
# torch_dtype=torch.bfloat16,
# attn_implementation="flash_attention_2",
# device_map="auto",
# )
# default processer
processor = AutoProcessor.from_pretrained(model_path)
# The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
# min_pixels = 12544 # original
# min_pixels = 1204224
min_pixels = 2408448
# max_pixels = 1605632
max_pixels = 3211264
processor = AutoProcessor.from_pretrained(model_path, min_pixels=min_pixels, max_pixels=max_pixels)
with open(qa_path, "r", encoding="utf-8") as file:
qa_data = json.load(file)
total_ANLS = 0
total_processed = 0
total_len = 0
total_time = 0.0
total_flops = 0.0
pbar = tqdm(qa_data)
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for entry in pbar:
image_path = data_path + entry['image']
image = Image.open(image_path).convert("RGB")
ques = entry['question']
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image,
},
{"type": "text", "text": f"{ques} \n Give me just an answer."},
],
}
]
# Preparation for inference
start_event.record()
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference: Generation of the output
# with torch.no_grad():
# with torch.profiler.profile(
# activities=[torch.profiler.ProfilerActivity.CPU,torch.profiler.ProfilerActivity.CUDA],
# with_flops=True,
# profile_memory=False,
# record_shapes=False
# ) as prof:
# generated_ids = model.generate(**inputs, max_new_tokens=128)
# current_flops = sum([event.flops for event in prof.key_averages() if event.flops is not None])
generated_ids = model.generate(**inputs, max_new_tokens=128)
current_flops = 0
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
text_outputs = output_text[0]
end_event.record()
torch.cuda.synchronize()
elapsed_time = start_event.elapsed_time(end_event)
total_time += elapsed_time
ANLS_Score = anls_score(prediction=text_outputs, gold_labels=[entry['answer']])
print(entry['question'])
print(text_outputs)
print(ANLS_Score)
print("\n")
# Update counters
total_processed += 1
print(f"{total_time/ total_processed}ms")
total_ANLS += ANLS_Score
total_len += 0
total_flops += current_flops
# Calculate and update the accuracy in the progress bar description
if total_processed > 0:
pbar.set_description(f"Processing | ANLS: {total_ANLS / total_processed:.3f} | Token Length: {total_len / total_processed:.2f} | FLOPs: {(total_flops / (total_processed *1e12)):.2f} TFLOPs")
print(f"\nFinal ANLS: {(total_ANLS / len(qa_data)):.4f}")
print(f"Final Token Length: {total_len / len(qa_data):.2f}")
print(f"Average FLOPs: {total_flops / (len(qa_data) *1e12):.2f} TFLOPs")
print(f"Average Response Time : {total_time / len(qa_data):.4f}ms")