Commit ·
f89df01
1
Parent(s): ac7d17b
Add zipped video and metadata files
Browse files- code/evaluation.py +47 -0
- code/evaluation/__init__.py +7 -0
- code/evaluation/evaluator.py +187 -0
- code/evaluation/heatmap_analyzer.py +89 -0
- code/evaluation/utils.py +47 -0
- code/print_example.py +93 -0
- vggsound_10k.txt +0 -0
- video.zip +3 -0
code/evaluation.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
|
| 4 |
+
from evaluation import Evaluator, HeatmapAnalyzer
|
| 5 |
+
from evaluation.utils import list_of_strings
|
| 6 |
+
|
| 7 |
+
def get_arguments():
|
| 8 |
+
parser = argparse.ArgumentParser()
|
| 9 |
+
|
| 10 |
+
# target data
|
| 11 |
+
parser.add_argument('--heatmap_root', type=str, default='your_heatmap_root', help='Root directory path of heatmap data')
|
| 12 |
+
parser.add_argument('--model', type=str, default='model_name', help='Model name to load from heatmap root directory')
|
| 13 |
+
parser.add_argument('--benchmark_path', type=str, default='AVATAR/metadata', help='Benchmark path for metadata')
|
| 14 |
+
|
| 15 |
+
# for interpolation
|
| 16 |
+
parser.add_argument('--height', type=int, default=360, help='height of benchmark data')
|
| 17 |
+
parser.add_argument('--width', type=int, default=640, help='width of benchmark data')
|
| 18 |
+
|
| 19 |
+
parser.add_argument('--heatmap_thresholds', type=list_of_strings, default='0.05,0.1,0.15,0.2', help='heatmap thresholds')
|
| 20 |
+
parser.add_argument('--frame_sample_rate', type=int, default=8, help='Interval for sampling frames during interpolation')
|
| 21 |
+
|
| 22 |
+
parser.add_argument('--video', type=bool, default=True, help='video or image')
|
| 23 |
+
|
| 24 |
+
return parser.parse_args()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def main(args):
|
| 28 |
+
heatmap_analyzer = HeatmapAnalyzer(
|
| 29 |
+
args.heatmap_root, args.model, args.heatmap_thresholds, args.benchmark_path,
|
| 30 |
+
args.height, args.width, args.frame_sample_rate, args.video
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
evaluator = Evaluator(
|
| 34 |
+
args.heatmap_root, args.model, args.benchmark_path,
|
| 35 |
+
heatmap_thresholds=args.heatmap_thresholds, height=args.height, width=args.width,
|
| 36 |
+
frame_sample_rate=args.frame_sample_rate, video=args.video
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
os.makedirs(os.path.join(args.heatmap_root, args.model), exist_ok=True)
|
| 40 |
+
|
| 41 |
+
heatmap_analyzer.compute()
|
| 42 |
+
evaluator.compute()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if __name__ == '__main__':
|
| 46 |
+
args = get_arguments()
|
| 47 |
+
main(args)
|
code/evaluation/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from evaluation.evaluator import Evaluator
|
| 2 |
+
from evaluation.heatmap_analyzer import HeatmapAnalyzer
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"Evaluator",
|
| 6 |
+
"HeatmapAnalyzer",
|
| 7 |
+
]
|
code/evaluation/evaluator.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
|
| 8 |
+
from typing import List, Dict, Set
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
from evaluation.utils import heatmap_interpolation, video_interpolation, \
|
| 12 |
+
convert_ann_to_mask
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
task_dict = {
|
| 16 |
+
"single": "Single-Sound",
|
| 17 |
+
"mixed": "Mixed-Sound",
|
| 18 |
+
"multi": "Multi-Entity",
|
| 19 |
+
"off-screen": "Off-Screen"
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
class Evaluator:
|
| 23 |
+
def __init__(self, data_root: str, model: str, benchmark_path: str,
|
| 24 |
+
heatmap_thresholds: List=[0.1, 0.15, 0.2], height: int=360, width: int=640,
|
| 25 |
+
frame_sample_rate: int=8, video: bool=True):
|
| 26 |
+
|
| 27 |
+
# data path
|
| 28 |
+
self.data_root = data_root
|
| 29 |
+
self.model = model
|
| 30 |
+
self.benchmark_path = benchmark_path
|
| 31 |
+
|
| 32 |
+
self.heatmap_thresholds = heatmap_thresholds
|
| 33 |
+
self.height = height
|
| 34 |
+
self.width = width
|
| 35 |
+
self.frame_sample_rate = frame_sample_rate
|
| 36 |
+
|
| 37 |
+
self.video = video
|
| 38 |
+
|
| 39 |
+
self.threshold_results = json.load(open(os.path.join(self.data_root, self.model, f"heatmap_threshold.json"), "r"))
|
| 40 |
+
self.evaluation_results = {} # {frame_id: {heatmap_percent: {metric: value}, ...}}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def compute(self):
|
| 44 |
+
data_path = os.path.join(self.data_root, self.model, "heatmap")
|
| 45 |
+
|
| 46 |
+
print(f"Evaluating {'Video' if self.video else 'Image'} Heatmap")
|
| 47 |
+
self.evaluate_video(data_path) if self.video else self.evaluate_image(data_path)
|
| 48 |
+
|
| 49 |
+
self.save_results()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def evaluate_image(self, data_path: str):
|
| 53 |
+
|
| 54 |
+
for data_file in tqdm(os.listdir(data_path)):
|
| 55 |
+
if not data_file.endswith(".npy"):
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
frame_num = int(data_file.split("_")[-1].split(".")[0])
|
| 59 |
+
video_id = "_".join(data_file.split("_")[:-1])
|
| 60 |
+
metadata_path = os.path.join(self.benchmark_path, video_id, f"{frame_num:05d}.json")
|
| 61 |
+
|
| 62 |
+
infer = torch.tensor(np.load(os.path.join(data_path, data_file))).unsqueeze(0).unsqueeze(0)
|
| 63 |
+
infer = heatmap_interpolation(infer, self.height, self.width)
|
| 64 |
+
|
| 65 |
+
metadata = json.load(open(metadata_path, "r"))
|
| 66 |
+
self.evaluate(video_id, frame_num, infer, metadata["annotations"])
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def evaluate_video(self, data_path: str):
|
| 70 |
+
|
| 71 |
+
for data_file in tqdm(os.listdir(data_path)):
|
| 72 |
+
if not data_file.endswith(".npy"):
|
| 73 |
+
continue
|
| 74 |
+
|
| 75 |
+
video_id = data_file.split(".")[0]
|
| 76 |
+
metadata_path = os.path.join(self.benchmark_path, video_id)
|
| 77 |
+
|
| 78 |
+
infer = torch.tensor(np.load(os.path.join(data_path, data_file)))
|
| 79 |
+
infer = video_interpolation(infer, self.frame_sample_rate)
|
| 80 |
+
|
| 81 |
+
for frame_data in os.listdir(metadata_path):
|
| 82 |
+
if frame_data.endswith(".jpg"):
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
frame_num = int(frame_data.split(".")[0])
|
| 86 |
+
if frame_num >= infer.shape[0]:
|
| 87 |
+
print(f"Frame number is larger than infer shape: {frame_num} >= {infer.shape[0]}")
|
| 88 |
+
|
| 89 |
+
infer_map = infer[frame_num].unsqueeze(0)
|
| 90 |
+
infer_map = heatmap_interpolation(infer_map, self.height, self.width)
|
| 91 |
+
|
| 92 |
+
metadata = json.load(open(os.path.join(metadata_path, frame_data), "r"))
|
| 93 |
+
self.evaluate(video_id, frame_num, infer_map, metadata["annotations"])
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def evaluate(self, video_id: str, frame_id: int, infer_map: np.ndarray, annotations: Dict):
|
| 98 |
+
heatmap_dict = dict() # {percent: map}
|
| 99 |
+
for percent_thr in self.heatmap_thresholds:
|
| 100 |
+
thr = self.threshold_results[str(percent_thr)]
|
| 101 |
+
heatmap_dict[percent_thr] = (infer_map > thr)
|
| 102 |
+
|
| 103 |
+
accumulated_mask = np.zeros((self.height, self.width))
|
| 104 |
+
off_screen = False
|
| 105 |
+
|
| 106 |
+
pixel_statistics = {thr: {"tp": 0, "fp": 0, "fn": 0} for thr in self.heatmap_thresholds}
|
| 107 |
+
instance_statistics = {thr: {
|
| 108 |
+
"total_tp": 0, "total_fn": 0,
|
| 109 |
+
"single_tp": 0, "single_fn": 0,
|
| 110 |
+
"mixed_tp": 0, "mixed_fn": 0,
|
| 111 |
+
"multi_tp": 0, "multi_fn": 0
|
| 112 |
+
} for thr in self.heatmap_thresholds}
|
| 113 |
+
cious = {thr: 0 for thr in self.heatmap_thresholds}
|
| 114 |
+
|
| 115 |
+
# Instance-Level Evaluation
|
| 116 |
+
for ann in annotations:
|
| 117 |
+
if ann["task"] == task_dict["off-screen"]:
|
| 118 |
+
off_screen = True
|
| 119 |
+
continue
|
| 120 |
+
mask = convert_ann_to_mask(ann, self.height, self.width)
|
| 121 |
+
accumulated_mask = np.logical_or(accumulated_mask, mask)
|
| 122 |
+
|
| 123 |
+
for percent_thr in self.heatmap_thresholds:
|
| 124 |
+
heatmap = heatmap_dict[percent_thr]
|
| 125 |
+
instance_statistics[percent_thr] = self.evaluate_instance_predictions(mask, heatmap, ann["task"], instance_statistics[percent_thr])
|
| 126 |
+
|
| 127 |
+
# Pixel-Level Evaluation
|
| 128 |
+
for percent_thr in self.heatmap_thresholds:
|
| 129 |
+
heatmap = heatmap_dict[percent_thr]
|
| 130 |
+
pixel_statistics[percent_thr] = self.evaluate_pixel_predictions(accumulated_mask, heatmap, pixel_statistics[percent_thr])
|
| 131 |
+
|
| 132 |
+
if not off_screen:
|
| 133 |
+
cious[percent_thr] = self.evaluate_cIoU(accumulated_mask, heatmap)
|
| 134 |
+
|
| 135 |
+
self.evaluation_results[f"{video_id}_{frame_id}"] = {
|
| 136 |
+
"pixel_statistics": pixel_statistics,
|
| 137 |
+
"instance_statistics": instance_statistics,
|
| 138 |
+
"off_screen": off_screen,
|
| 139 |
+
"cious": cious
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def evaluate_instance_predictions(self, mask: np.ndarray, heatmap: np.ndarray, task: str, instance_statistics: Dict):
|
| 144 |
+
if np.sum(np.logical_and(mask, heatmap)) / np.sum(mask) >= self.instance_threshold:
|
| 145 |
+
instance_statistics["total_tp"] += 1
|
| 146 |
+
if task == task_dict["single"]:
|
| 147 |
+
instance_statistics["single_tp"] += 1
|
| 148 |
+
elif task == task_dict["mixed"]:
|
| 149 |
+
instance_statistics["mixed_tp"] += 1
|
| 150 |
+
elif task == task_dict["multi"]:
|
| 151 |
+
instance_statistics["multi_tp"] += 1
|
| 152 |
+
else:
|
| 153 |
+
instance_statistics["total_fn"] += 1
|
| 154 |
+
if task == task_dict["single"]:
|
| 155 |
+
instance_statistics["single_fn"] += 1
|
| 156 |
+
elif task == task_dict["mixed"]:
|
| 157 |
+
instance_statistics["mixed_fn"] += 1
|
| 158 |
+
elif task == task_dict["multi"]:
|
| 159 |
+
instance_statistics["multi_fn"] += 1
|
| 160 |
+
return instance_statistics
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def evaluate_pixel_predictions(self, acc_mask: np.ndarray, heatmap: np.ndarray, pixel_statistics: Dict):
|
| 164 |
+
tp_pixel = int(np.sum(np.logical_and(acc_mask, heatmap)))
|
| 165 |
+
fp_pixel = int(np.sum(heatmap) - tp_pixel)
|
| 166 |
+
fn_pixel = int(np.sum(acc_mask) - tp_pixel)
|
| 167 |
+
|
| 168 |
+
pixel_statistics["tp"] = tp_pixel
|
| 169 |
+
pixel_statistics["fp"] = fp_pixel
|
| 170 |
+
pixel_statistics["fn"] = fn_pixel
|
| 171 |
+
return pixel_statistics
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def evaluate_cIoU(self, acc_mask: np.ndarray, heatmap: np.ndarray):
|
| 175 |
+
ciou = np.sum(heatmap * acc_mask) / (np.sum(acc_mask) + np.sum(heatmap * (acc_mask == 0)))
|
| 176 |
+
return ciou
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def save_results(self):
|
| 180 |
+
save_folder = os.path.join(self.data_root, self.model)
|
| 181 |
+
os.makedirs(save_folder, exist_ok=True)
|
| 182 |
+
|
| 183 |
+
save_path = os.path.join(save_folder, f"evaluation_results.json")
|
| 184 |
+
with open(save_path, "w") as f:
|
| 185 |
+
json.dump(self.evaluation_results, f)
|
| 186 |
+
|
| 187 |
+
print(f"Results are saved at {save_path}")
|
code/evaluation/heatmap_analyzer.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
from typing import List
|
| 8 |
+
|
| 9 |
+
from utils import heatmap_interpolation, video_interpolation
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class HeatmapAnalyzer:
|
| 13 |
+
def __init__(self, data_root: str, model: str, heatmap_thresholds: List, benchmark_path: str, \
|
| 14 |
+
height: int=360, width: int=640, frame_sample_rate: int=8, video: bool=True):
|
| 15 |
+
|
| 16 |
+
# data path
|
| 17 |
+
self.data_root = data_root
|
| 18 |
+
self.model = model
|
| 19 |
+
self.benchmark_path = benchmark_path
|
| 20 |
+
|
| 21 |
+
self.height = height
|
| 22 |
+
self.width = width
|
| 23 |
+
self.frame_sample_rate = frame_sample_rate
|
| 24 |
+
self.heatmap_thresholds = heatmap_thresholds
|
| 25 |
+
|
| 26 |
+
self.video = video
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def compute(self):
|
| 30 |
+
data_path = os.path.join(self.data_root, self.model, "heatmap")
|
| 31 |
+
save_path = os.path.join(self.data_root, self.model, "heatmap_threshold.json")
|
| 32 |
+
|
| 33 |
+
print(f"Calculating {'Video' if self.video else 'Image'} Heatmap")
|
| 34 |
+
total_val = self.compute_for_video(data_path) if self.video else self.compute_for_image(data_path)
|
| 35 |
+
|
| 36 |
+
heatmap_threshold = self.cal_heatmap_threshold(np.array(total_val))
|
| 37 |
+
self.save_heatmap_threshold(save_path, heatmap_threshold)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def compute_for_image(self, data_path):
|
| 41 |
+
total_val = []
|
| 42 |
+
|
| 43 |
+
for data_file in tqdm(os.listdir(data_path)):
|
| 44 |
+
if not data_file.endswith(".npy"):
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
infer = torch.tensor(np.load(os.path.join(data_path, data_file))).unsqueeze(0).unsqueeze(0)
|
| 48 |
+
infer = heatmap_interpolation(infer, self.height, self.width)
|
| 49 |
+
total_val.append(infer)
|
| 50 |
+
|
| 51 |
+
return total_val
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def compute_for_video(self, data_path):
|
| 55 |
+
total_val = []
|
| 56 |
+
|
| 57 |
+
for data_file in tqdm(os.listdir(data_path)):
|
| 58 |
+
if not data_file.endswith(".npy"):
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
video_id = data_file.split(".")[0]
|
| 62 |
+
metadata_path = os.path.join(self.benchmark_path, video_id)
|
| 63 |
+
|
| 64 |
+
infer = torch.tensor(np.load(os.path.join(data_path, data_file)))
|
| 65 |
+
infer = video_interpolation(infer, self.frame_sample_rate)
|
| 66 |
+
|
| 67 |
+
for frame_data in os.listdir(metadata_path):
|
| 68 |
+
if frame_data.endswith(".jpg"):
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
frame_num = int(frame_data.split(".")[0].split("_")[-1])
|
| 72 |
+
infer_map = infer[frame_num].unsqueeze(0)
|
| 73 |
+
infer_map = heatmap_interpolation(infer_map, self.height, self.width)
|
| 74 |
+
total_val.append(infer_map)
|
| 75 |
+
|
| 76 |
+
return total_val
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def cal_heatmap_threshold(self, total_heatmap: np.ndarray):
|
| 80 |
+
sorted_data = np.sort(total_heatmap, axis=None)[::-1]
|
| 81 |
+
result = dict()
|
| 82 |
+
for thr in self.heatmap_thresholds:
|
| 83 |
+
result[thr] = float(sorted_data[int(len(sorted_data) * thr)])
|
| 84 |
+
return result
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def save_heatmap_threshold(self, save_path: str, result: dict):
|
| 88 |
+
with open(save_path, 'w') as f:
|
| 89 |
+
json.dump(result, f, indent=4)
|
code/evaluation/utils.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
from imantics import Mask
|
| 8 |
+
from typing import List
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def convert_ann_to_mask(ann: List, height: int, width: int):
|
| 13 |
+
mask = np.zeros((height, width), dtype=np.uint8)
|
| 14 |
+
poly = ann["segmentation"]
|
| 15 |
+
|
| 16 |
+
for p in poly:
|
| 17 |
+
p = np.array(p).reshape(-1, 2).astype(int)
|
| 18 |
+
cv2.fillPoly(mask, [p], 1)
|
| 19 |
+
return mask
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def convert_mask_to_ann(mask: np.ndarray):
|
| 23 |
+
polygons = Mask(mask).polygons()
|
| 24 |
+
return polygons.segmentation
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# Define a custom argument type for a list of strings
|
| 28 |
+
def list_of_strings(arg):
|
| 29 |
+
return [float(thr) for thr in arg.split(',')]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def video_interpolation(video: Tensor, frame_sample_rate: int):
|
| 33 |
+
expanded_heatmap = []
|
| 34 |
+
for i in range(len(video) - 1):
|
| 35 |
+
pre_heatmap, post_heatmap = video[i], video[i + 1]
|
| 36 |
+
|
| 37 |
+
for j in range(frame_sample_rate):
|
| 38 |
+
interpolated_heatmap = ((frame_sample_rate - j) / frame_sample_rate) * pre_heatmap \
|
| 39 |
+
+ (j / frame_sample_rate) * post_heatmap
|
| 40 |
+
expanded_heatmap.append(interpolated_heatmap)
|
| 41 |
+
expanded_heatmap.append(video[-1])
|
| 42 |
+
return torch.stack(expanded_heatmap).unsqueeze(1)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def heatmap_interpolation(heatmap: Tensor, height: int, width: int):
|
| 47 |
+
return F.interpolate(heatmap, size=(height, width), mode='bilinear', align_corners=False).squeeze().numpy()
|
code/print_example.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, json
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn import metrics
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
|
| 6 |
+
# --- Utilities ---
|
| 7 |
+
def final_auc(data):
|
| 8 |
+
thresholds = [0.05 * i for i in range(21)]
|
| 9 |
+
cious = [np.mean(np.array(data) >= t) for t in thresholds]
|
| 10 |
+
return metrics.auc(thresholds, cious)
|
| 11 |
+
|
| 12 |
+
def final_ciou(data):
|
| 13 |
+
return np.mean(data) if data else 0.0
|
| 14 |
+
|
| 15 |
+
def parse_task_flags(annotations):
|
| 16 |
+
flags = {"Single-Sound": False, "Mixed-Sound": False, "Multi-Entity": False, "Off-Screen": False}
|
| 17 |
+
for ann in annotations:
|
| 18 |
+
task = ann["task"]
|
| 19 |
+
if task not in flags:
|
| 20 |
+
raise ValueError(f"Unknown task: {task}")
|
| 21 |
+
flags[task] = True
|
| 22 |
+
return flags
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# --- Parameters ---
|
| 26 |
+
heatmap_threshold = 0.1
|
| 27 |
+
width, height = 640, 360
|
| 28 |
+
folder = "AVATAR"
|
| 29 |
+
file = "evaluation_results.json"
|
| 30 |
+
model = "your_model_name" # Replace with your model name
|
| 31 |
+
data_path = os.path.join("your_heatmap_root", model, folder, file)
|
| 32 |
+
benchmark_path = "AVATAR/metadata" # Replace with your benchmark path
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# --- Initialization ---
|
| 37 |
+
ciou_by_task = {
|
| 38 |
+
"Total": [],
|
| 39 |
+
"Single-Sound": [],
|
| 40 |
+
"Mixed-Sound": [],
|
| 41 |
+
"Multi-Entity": []
|
| 42 |
+
}
|
| 43 |
+
off_screen_tn, off_screen_fp = 0, 0
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# --- Load Evaluation Results ---
|
| 47 |
+
with open(data_path, 'r') as f:
|
| 48 |
+
data = json.load(f)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# --- Process Each Frame ---
|
| 52 |
+
for frame_key, result in tqdm(data.items()):
|
| 53 |
+
video_id = "_".join(frame_key.split("_")[:-1])
|
| 54 |
+
frame_num = int(frame_key.split("_")[-1])
|
| 55 |
+
metadata_file = os.path.join(benchmark_path, video_id, f"{frame_num:05d}.json")
|
| 56 |
+
|
| 57 |
+
with open(metadata_file, 'r') as f:
|
| 58 |
+
annotations = json.load(f)["annotations"]
|
| 59 |
+
|
| 60 |
+
flags = parse_task_flags(annotations)
|
| 61 |
+
ciou = result["cious"][str(heatmap_threshold)]
|
| 62 |
+
ciou_by_task["Total"].append(ciou)
|
| 63 |
+
|
| 64 |
+
for task in ["Single-Sound", "Mixed-Sound", "Multi-Entity"]:
|
| 65 |
+
if flags[task]:
|
| 66 |
+
ciou_by_task[task].append(ciou)
|
| 67 |
+
|
| 68 |
+
if flags["Off-Screen"]:
|
| 69 |
+
stats = result["pixel_statistics"][str(heatmap_threshold)]
|
| 70 |
+
off_screen_tn += width * height - stats["fp"]
|
| 71 |
+
off_screen_fp += stats["fp"]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# --- Compute Final Metrics ---
|
| 75 |
+
summary = {}
|
| 76 |
+
for task, values in ciou_by_task.items():
|
| 77 |
+
summary[task] = {
|
| 78 |
+
"ciou": final_ciou(values),
|
| 79 |
+
"auc": final_auc(values)
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# --- Print Results ---
|
| 84 |
+
print(f"model: {model}, file: {file}\n")
|
| 85 |
+
|
| 86 |
+
for task in ["Total", "Single-Sound", "Mixed-Sound", "Multi-Entity"]:
|
| 87 |
+
print(f"--- {task.lower()} ---")
|
| 88 |
+
print(f"final ciou: {summary[task]['ciou']:.4f}")
|
| 89 |
+
print(f"final auc : {summary[task]['auc']:.4f}\n")
|
| 90 |
+
|
| 91 |
+
print("--- off-screen pixel statistics ---")
|
| 92 |
+
print("tn pixels \t fp pixels")
|
| 93 |
+
print(f"{off_screen_tn} \t {off_screen_fp}")
|
vggsound_10k.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
video.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ce5be7fa6dda85889750f6c7cc2fad807527063e7d7090c664170a66b078a68
|
| 3 |
+
size 3785157586
|