CS381V-hardest-vqa / vinoground_subset /imagebind_eval_altered.py
Nguyencent's picture
Upload imagebind_eval_altered.py
0e22d26 verified
from imagebind import data
import torch
from imagebind.models import imagebind_model
from imagebind.models.imagebind_model import ModalityType
import pandas as pd
import os
import argparse
# Create an ArgumentParser object
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument('--data', type=str, default="./Vinoground", help='Path to Vinoground dataset (from Huggingface)')
# Parse arguments
args = parser.parse_args()
data_path = args.data
vino = pd.read_csv(os.path.join(data_path, "vinoground_hardest.csv"))
num_examples = len(vino.index) # original dataset was 500, but this should be 215 after filtering
# print("num examples:", num_examples) # debug
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# Instantiate model
model = imagebind_model.imagebind_huge(True)
model.eval()
model.to(device)
text_correct = 0
video_correct = 0
group_correct = 0
from tqdm import tqdm
for row_num in tqdm(range(num_examples)):
video_num = vino["index"][row_num] # after filtering, row number changes from original row number. But keep the index value from the original dataset and extract that so we can append the correct video (in original, row number corresponded to video number)
videos = []
texts = []
videos.append(os.path.join(data_path, f"vinoground_videos/{video_num}_pos.mp4"))
videos.append(os.path.join(data_path, f"vinoground_videos/{video_num}_neg.mp4"))
texts.append(vino["pos_cap"][row_num])
texts.append(vino["neg_cap"][row_num])
# debug
# print("row num:", row_num)
# print("video num:", video_num)
# print("texts:", texts)
# print("videos:", videos)
# debug
inputs = {
ModalityType.TEXT: data.load_and_transform_text(texts, device),
ModalityType.VISION: data.load_and_transform_video_data(videos, device),
}
with torch.no_grad():
embeddings = model(inputs)
results = embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T
# print(results)
video_correct += results[0][0] > results[1][0] and results[1][1] > results[0][1]
text_correct += results[0][0] > results[0][1] and results[1][1] > results[1][0]
group_correct += results[0][0] > results[1][0] and results[1][1] > results[0][1] and results[0][0] > results[0][1] and results[1][1] > results[1][0]
print(text_correct / num_examples, video_correct / num_examples, group_correct / num_examples)