Datasets:

ArXiv:
File size: 2,546 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import os
import json

import torch
import numpy as np
from tqdm import tqdm
from vbench.utils import load_video, load_dimension_info, CACHE_DIR
from vbench.third_party.grit_model import DenseCaptioning

import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def get_dect_from_grit(model, image_arrays):
    pred = []
    if type(image_arrays) is not list:
        image_arrays = image_arrays.numpy()
    with torch.no_grad():
        for frame in image_arrays:
            ret = model.run_caption_tensor(frame)
            if len(ret[0])>0:
                pred.append(set(ret[0][0][2]))
            else:
                pred.append(set([]))
    return pred

def check_generate(key_info, predictions):
    cur_cnt = 0
    key_a, key_b = key_info.split(' and ')
    key_a = key_a.strip()
    key_b = key_b.strip()
    for pred in predictions:
        if key_a in pred and key_b in pred:
            cur_cnt+=1
    return cur_cnt


def multiple_objects(model, video_pairs):
    success_frame_count, frame_count = 0,0
    video_results = []
    for info in tqdm(video_pairs):
        if 'auxiliary_info' not in info:
            raise "Auxiliary info is not in json, please check your json."
        object_info = info['auxiliary_info']
        video_path = info['content_path']
        query = info["prompt"]
        
        video_tensor = load_video(video_path, num_frames=16)
        cur_video_pred = get_dect_from_grit(model, video_tensor.permute(0,2,3,1))
        cur_success_frame_count = check_generate(object_info, cur_video_pred)
        cur_success_frame_rate = cur_success_frame_count/len(cur_video_pred)
        success_frame_count += cur_success_frame_count
        frame_count += len(cur_video_pred)
        video_results.append({'prompt':query, 'video_path': video_path, 'video_results': cur_success_frame_rate})
            
            
    success_rate = success_frame_count / frame_count
    
    return {
        "score":[success_rate, video_results] 
    }

        
        

def compute_multiple_objects(video_pairs):
    device = torch.device("cuda")
    dense_caption_model = DenseCaptioning(device)
    submodules_dict = {
        "model_weight": f'{CACHE_DIR}/grit_model/grit_b_densecap_objectdet.pth'
    }
    dense_caption_model.initialize_model_det(**submodules_dict)
    logger.info("Initialize detection model success")
    
    results = multiple_objects(dense_caption_model, video_pairs)
    return results