File size: 7,480 Bytes
f89df01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import os
import json
import torch
import numpy as np
from tqdm import tqdm
from torch import Tensor
from typing import List, Dict, Set
from evaluation.utils import heatmap_interpolation, video_interpolation, \
convert_ann_to_mask
task_dict = {
"single": "Single-Sound",
"mixed": "Mixed-Sound",
"multi": "Multi-Entity",
"off-screen": "Off-Screen"
}
class Evaluator:
def __init__(self, data_root: str, model: str, benchmark_path: str,
heatmap_thresholds: List=[0.1, 0.15, 0.2], height: int=360, width: int=640,
frame_sample_rate: int=8, video: bool=True):
# data path
self.data_root = data_root
self.model = model
self.benchmark_path = benchmark_path
self.heatmap_thresholds = heatmap_thresholds
self.height = height
self.width = width
self.frame_sample_rate = frame_sample_rate
self.video = video
self.threshold_results = json.load(open(os.path.join(self.data_root, self.model, f"heatmap_threshold.json"), "r"))
self.evaluation_results = {} # {frame_id: {heatmap_percent: {metric: value}, ...}}
def compute(self):
data_path = os.path.join(self.data_root, self.model, "heatmap")
print(f"Evaluating {'Video' if self.video else 'Image'} Heatmap")
self.evaluate_video(data_path) if self.video else self.evaluate_image(data_path)
self.save_results()
def evaluate_image(self, data_path: str):
for data_file in tqdm(os.listdir(data_path)):
if not data_file.endswith(".npy"):
continue
frame_num = int(data_file.split("_")[-1].split(".")[0])
video_id = "_".join(data_file.split("_")[:-1])
metadata_path = os.path.join(self.benchmark_path, video_id, f"{frame_num:05d}.json")
infer = torch.tensor(np.load(os.path.join(data_path, data_file))).unsqueeze(0).unsqueeze(0)
infer = heatmap_interpolation(infer, self.height, self.width)
metadata = json.load(open(metadata_path, "r"))
self.evaluate(video_id, frame_num, infer, metadata["annotations"])
def evaluate_video(self, data_path: str):
for data_file in tqdm(os.listdir(data_path)):
if not data_file.endswith(".npy"):
continue
video_id = data_file.split(".")[0]
metadata_path = os.path.join(self.benchmark_path, video_id)
infer = torch.tensor(np.load(os.path.join(data_path, data_file)))
infer = video_interpolation(infer, self.frame_sample_rate)
for frame_data in os.listdir(metadata_path):
if frame_data.endswith(".jpg"):
continue
frame_num = int(frame_data.split(".")[0])
if frame_num >= infer.shape[0]:
print(f"Frame number is larger than infer shape: {frame_num} >= {infer.shape[0]}")
infer_map = infer[frame_num].unsqueeze(0)
infer_map = heatmap_interpolation(infer_map, self.height, self.width)
metadata = json.load(open(os.path.join(metadata_path, frame_data), "r"))
self.evaluate(video_id, frame_num, infer_map, metadata["annotations"])
def evaluate(self, video_id: str, frame_id: int, infer_map: np.ndarray, annotations: Dict):
heatmap_dict = dict() # {percent: map}
for percent_thr in self.heatmap_thresholds:
thr = self.threshold_results[str(percent_thr)]
heatmap_dict[percent_thr] = (infer_map > thr)
accumulated_mask = np.zeros((self.height, self.width))
off_screen = False
pixel_statistics = {thr: {"tp": 0, "fp": 0, "fn": 0} for thr in self.heatmap_thresholds}
instance_statistics = {thr: {
"total_tp": 0, "total_fn": 0,
"single_tp": 0, "single_fn": 0,
"mixed_tp": 0, "mixed_fn": 0,
"multi_tp": 0, "multi_fn": 0
} for thr in self.heatmap_thresholds}
cious = {thr: 0 for thr in self.heatmap_thresholds}
# Instance-Level Evaluation
for ann in annotations:
if ann["task"] == task_dict["off-screen"]:
off_screen = True
continue
mask = convert_ann_to_mask(ann, self.height, self.width)
accumulated_mask = np.logical_or(accumulated_mask, mask)
for percent_thr in self.heatmap_thresholds:
heatmap = heatmap_dict[percent_thr]
instance_statistics[percent_thr] = self.evaluate_instance_predictions(mask, heatmap, ann["task"], instance_statistics[percent_thr])
# Pixel-Level Evaluation
for percent_thr in self.heatmap_thresholds:
heatmap = heatmap_dict[percent_thr]
pixel_statistics[percent_thr] = self.evaluate_pixel_predictions(accumulated_mask, heatmap, pixel_statistics[percent_thr])
if not off_screen:
cious[percent_thr] = self.evaluate_cIoU(accumulated_mask, heatmap)
self.evaluation_results[f"{video_id}_{frame_id}"] = {
"pixel_statistics": pixel_statistics,
"instance_statistics": instance_statistics,
"off_screen": off_screen,
"cious": cious
}
def evaluate_instance_predictions(self, mask: np.ndarray, heatmap: np.ndarray, task: str, instance_statistics: Dict):
if np.sum(np.logical_and(mask, heatmap)) / np.sum(mask) >= self.instance_threshold:
instance_statistics["total_tp"] += 1
if task == task_dict["single"]:
instance_statistics["single_tp"] += 1
elif task == task_dict["mixed"]:
instance_statistics["mixed_tp"] += 1
elif task == task_dict["multi"]:
instance_statistics["multi_tp"] += 1
else:
instance_statistics["total_fn"] += 1
if task == task_dict["single"]:
instance_statistics["single_fn"] += 1
elif task == task_dict["mixed"]:
instance_statistics["mixed_fn"] += 1
elif task == task_dict["multi"]:
instance_statistics["multi_fn"] += 1
return instance_statistics
def evaluate_pixel_predictions(self, acc_mask: np.ndarray, heatmap: np.ndarray, pixel_statistics: Dict):
tp_pixel = int(np.sum(np.logical_and(acc_mask, heatmap)))
fp_pixel = int(np.sum(heatmap) - tp_pixel)
fn_pixel = int(np.sum(acc_mask) - tp_pixel)
pixel_statistics["tp"] = tp_pixel
pixel_statistics["fp"] = fp_pixel
pixel_statistics["fn"] = fn_pixel
return pixel_statistics
def evaluate_cIoU(self, acc_mask: np.ndarray, heatmap: np.ndarray):
ciou = np.sum(heatmap * acc_mask) / (np.sum(acc_mask) + np.sum(heatmap * (acc_mask == 0)))
return ciou
def save_results(self):
save_folder = os.path.join(self.data_root, self.model)
os.makedirs(save_folder, exist_ok=True)
save_path = os.path.join(save_folder, f"evaluation_results.json")
with open(save_path, "w") as f:
json.dump(self.evaluation_results, f)
print(f"Results are saved at {save_path}") |