Datasets:

Modalities:
Video
Audio
Languages:
English
ArXiv:
License:
AVATAR / code /evaluation /evaluator.py
hahyeon610's picture
Add zipped video and metadata files
f89df01
import os
import json
import torch
import numpy as np
from tqdm import tqdm
from torch import Tensor
from typing import List, Dict, Set
from evaluation.utils import heatmap_interpolation, video_interpolation, \
convert_ann_to_mask
task_dict = {
"single": "Single-Sound",
"mixed": "Mixed-Sound",
"multi": "Multi-Entity",
"off-screen": "Off-Screen"
}
class Evaluator:
def __init__(self, data_root: str, model: str, benchmark_path: str,
heatmap_thresholds: List=[0.1, 0.15, 0.2], height: int=360, width: int=640,
frame_sample_rate: int=8, video: bool=True):
# data path
self.data_root = data_root
self.model = model
self.benchmark_path = benchmark_path
self.heatmap_thresholds = heatmap_thresholds
self.height = height
self.width = width
self.frame_sample_rate = frame_sample_rate
self.video = video
self.threshold_results = json.load(open(os.path.join(self.data_root, self.model, f"heatmap_threshold.json"), "r"))
self.evaluation_results = {} # {frame_id: {heatmap_percent: {metric: value}, ...}}
def compute(self):
data_path = os.path.join(self.data_root, self.model, "heatmap")
print(f"Evaluating {'Video' if self.video else 'Image'} Heatmap")
self.evaluate_video(data_path) if self.video else self.evaluate_image(data_path)
self.save_results()
def evaluate_image(self, data_path: str):
for data_file in tqdm(os.listdir(data_path)):
if not data_file.endswith(".npy"):
continue
frame_num = int(data_file.split("_")[-1].split(".")[0])
video_id = "_".join(data_file.split("_")[:-1])
metadata_path = os.path.join(self.benchmark_path, video_id, f"{frame_num:05d}.json")
infer = torch.tensor(np.load(os.path.join(data_path, data_file))).unsqueeze(0).unsqueeze(0)
infer = heatmap_interpolation(infer, self.height, self.width)
metadata = json.load(open(metadata_path, "r"))
self.evaluate(video_id, frame_num, infer, metadata["annotations"])
def evaluate_video(self, data_path: str):
for data_file in tqdm(os.listdir(data_path)):
if not data_file.endswith(".npy"):
continue
video_id = data_file.split(".")[0]
metadata_path = os.path.join(self.benchmark_path, video_id)
infer = torch.tensor(np.load(os.path.join(data_path, data_file)))
infer = video_interpolation(infer, self.frame_sample_rate)
for frame_data in os.listdir(metadata_path):
if frame_data.endswith(".jpg"):
continue
frame_num = int(frame_data.split(".")[0])
if frame_num >= infer.shape[0]:
print(f"Frame number is larger than infer shape: {frame_num} >= {infer.shape[0]}")
infer_map = infer[frame_num].unsqueeze(0)
infer_map = heatmap_interpolation(infer_map, self.height, self.width)
metadata = json.load(open(os.path.join(metadata_path, frame_data), "r"))
self.evaluate(video_id, frame_num, infer_map, metadata["annotations"])
def evaluate(self, video_id: str, frame_id: int, infer_map: np.ndarray, annotations: Dict):
heatmap_dict = dict() # {percent: map}
for percent_thr in self.heatmap_thresholds:
thr = self.threshold_results[str(percent_thr)]
heatmap_dict[percent_thr] = (infer_map > thr)
accumulated_mask = np.zeros((self.height, self.width))
off_screen = False
pixel_statistics = {thr: {"tp": 0, "fp": 0, "fn": 0} for thr in self.heatmap_thresholds}
instance_statistics = {thr: {
"total_tp": 0, "total_fn": 0,
"single_tp": 0, "single_fn": 0,
"mixed_tp": 0, "mixed_fn": 0,
"multi_tp": 0, "multi_fn": 0
} for thr in self.heatmap_thresholds}
cious = {thr: 0 for thr in self.heatmap_thresholds}
# Instance-Level Evaluation
for ann in annotations:
if ann["task"] == task_dict["off-screen"]:
off_screen = True
continue
mask = convert_ann_to_mask(ann, self.height, self.width)
accumulated_mask = np.logical_or(accumulated_mask, mask)
for percent_thr in self.heatmap_thresholds:
heatmap = heatmap_dict[percent_thr]
instance_statistics[percent_thr] = self.evaluate_instance_predictions(mask, heatmap, ann["task"], instance_statistics[percent_thr])
# Pixel-Level Evaluation
for percent_thr in self.heatmap_thresholds:
heatmap = heatmap_dict[percent_thr]
pixel_statistics[percent_thr] = self.evaluate_pixel_predictions(accumulated_mask, heatmap, pixel_statistics[percent_thr])
if not off_screen:
cious[percent_thr] = self.evaluate_cIoU(accumulated_mask, heatmap)
self.evaluation_results[f"{video_id}_{frame_id}"] = {
"pixel_statistics": pixel_statistics,
"instance_statistics": instance_statistics,
"off_screen": off_screen,
"cious": cious
}
def evaluate_instance_predictions(self, mask: np.ndarray, heatmap: np.ndarray, task: str, instance_statistics: Dict):
if np.sum(np.logical_and(mask, heatmap)) / np.sum(mask) >= self.instance_threshold:
instance_statistics["total_tp"] += 1
if task == task_dict["single"]:
instance_statistics["single_tp"] += 1
elif task == task_dict["mixed"]:
instance_statistics["mixed_tp"] += 1
elif task == task_dict["multi"]:
instance_statistics["multi_tp"] += 1
else:
instance_statistics["total_fn"] += 1
if task == task_dict["single"]:
instance_statistics["single_fn"] += 1
elif task == task_dict["mixed"]:
instance_statistics["mixed_fn"] += 1
elif task == task_dict["multi"]:
instance_statistics["multi_fn"] += 1
return instance_statistics
def evaluate_pixel_predictions(self, acc_mask: np.ndarray, heatmap: np.ndarray, pixel_statistics: Dict):
tp_pixel = int(np.sum(np.logical_and(acc_mask, heatmap)))
fp_pixel = int(np.sum(heatmap) - tp_pixel)
fn_pixel = int(np.sum(acc_mask) - tp_pixel)
pixel_statistics["tp"] = tp_pixel
pixel_statistics["fp"] = fp_pixel
pixel_statistics["fn"] = fn_pixel
return pixel_statistics
def evaluate_cIoU(self, acc_mask: np.ndarray, heatmap: np.ndarray):
ciou = np.sum(heatmap * acc_mask) / (np.sum(acc_mask) + np.sum(heatmap * (acc_mask == 0)))
return ciou
def save_results(self):
save_folder = os.path.join(self.data_root, self.model)
os.makedirs(save_folder, exist_ok=True)
save_path = os.path.join(save_folder, f"evaluation_results.json")
with open(save_path, "w") as f:
json.dump(self.evaluation_results, f)
print(f"Results are saved at {save_path}")