ANDRYHA commited on
Commit
6468b8b
·
verified ·
1 Parent(s): 4a014eb

Upload bench.py

Browse files
Files changed (1) hide show
  1. bench.py +240 -246
bench.py CHANGED
@@ -1,246 +1,240 @@
1
- # Please folow our requirements for metrics calculation:
2
- # conda create -n saliency python=3.8.16
3
- # conda activate saliency
4
- # pip install numpy==1.24.2 opencv-python==4.7.0.72 tqdm==4.65.0
5
- # conda install ffmpeg=4.4.2 -c conda-forge
6
-
7
- from multiprocessing.pool import Pool, ThreadPool
8
- from os import path, listdir, mkdir
9
- from pathlib import Path
10
- from tqdm import tqdm
11
- from glob import glob
12
- import numpy as np
13
- import subprocess
14
- import argparse
15
- import warnings
16
- import json
17
- import cv2
18
-
19
- cv2.setNumThreads(0)
20
- eps = np.finfo(np.float32).eps
21
- warnings.filterwarnings("error")
22
-
23
- ###metrics###
24
-
25
- def nss(s_map, gt):
26
- s_map_norm = (s_map - np.mean(s_map))/(np.std(s_map) + 1e-7)
27
- temp = s_map_norm[gt[:, 0], gt[:, 1]]
28
- return np.mean(temp)
29
-
30
-
31
- def similarity(s_map, gt):
32
- s_map = s_map / (np.sum(s_map) + 1e-7)
33
- gt = gt / (np.sum(gt) + 1e-7)
34
- return np.sum(np.minimum(s_map, gt))
35
-
36
-
37
- def cc(s_map, gt):
38
- a = (s_map - np.mean(s_map))/(np.std(s_map) + 1e-7)
39
- b = (gt - np.mean(gt))/(np.std(gt) + 1e-7)
40
- r = (a*b).sum() / np.sqrt((a*a).sum() * (b*b).sum() + 1e-7)
41
- return r
42
-
43
-
44
- def auc_judd(S, F):
45
-
46
- Sth = S[F[:, 0], F[:, 1]]
47
- Nfixations = len(Sth)
48
- Uniqe_fixations = np.unique(F, axis=1).shape[-1]
49
- Possible_fixations = np.prod(S.shape) + (Nfixations - Uniqe_fixations)
50
-
51
- allthreshes = np.sort(Sth)[::-1]
52
- tp = np.zeros(Nfixations + 2)
53
- fp = np.zeros(Nfixations + 2)
54
- tp[0] = fp[0] = 0
55
- tp[-1] = fp[-1] = 1
56
-
57
- # Vectorized computation of aboveth
58
- aboveth = np.sum(S >= allthreshes[:, np.newaxis, np.newaxis], axis=(1, 2))
59
-
60
- arange = np.arange(1, Nfixations + 1)
61
- fp[1:-1] = (aboveth - arange) / (Possible_fixations - Nfixations)
62
- tp[1:-1] = arange / Nfixations
63
-
64
- # Trapezoidal integration to compute AUC-Judd
65
- return np.trapz(tp, fp)
66
-
67
-
68
-
69
- def kldiv(s_map, gt):
70
- s_map = s_map / (np.sum(s_map) * 1.0)
71
- gt = gt / (np.sum(gt) * 1.0)
72
- eps = 2.2204e-16
73
- res = np.sum(gt * np.log(eps + gt / (s_map + eps)))
74
- return res
75
-
76
-
77
- ######
78
-
79
- def xrgb2gray(img):
80
- assert len(img.shape) in (2, 3)
81
- return img.mean(axis=2) if len(img.shape) == 3 else img
82
-
83
- # Returns SM in [0; 1] range
84
- def read_sm(path):
85
- img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
86
- img = xrgb2gray(img)
87
- img = (img - img.min()) / (img.max() - img.min() + eps)
88
- return img
89
-
90
- def calculate_frame_metrics(frame):
91
- gt_fix = np.array(frame['gt_fixations'])
92
- gt_120_sm = read_sm(frame['gt_saliency_path'])
93
- pred_sm = cv2.resize(read_sm(frame['predictions_path']), (gt_120_sm.shape[1], gt_120_sm.shape[0]))
94
-
95
- return {
96
- 'sim_score': similarity(pred_sm, gt_120_sm),
97
- 'nss_score': nss(pred_sm, gt_fix),
98
- 'cc_score': cc(pred_sm, gt_120_sm),
99
- 'auc_judd_score': auc_judd(pred_sm, gt_fix),
100
- }
101
-
102
-
103
- def calculate_metrics(video_name, temp_predictions_path, temp_gt_saliency_path, temp_gt_fixations_path, num_workers=4):
104
- predictions_path = glob(temp_predictions_path)[0]
105
- gt_saliency_path = glob(temp_gt_saliency_path)[0]
106
- with open(temp_gt_fixations_path) as f:
107
- gt_fixations = json.load(f)
108
-
109
- scores = []
110
- assert_func = lambda path: set([int(x.split('.')[0]) for x in listdir(path)])
111
- assert assert_func(gt_saliency_path) == assert_func(predictions_path)
112
-
113
- frames = [
114
- {
115
- 'gt_fixations': gt_fix,
116
- 'gt_saliency_path': gt_sal,
117
- 'predictions_path': pred,
118
- } for gt_fix, gt_sal, pred in zip(
119
- gt_fixations,
120
- [path.join(gt_saliency_path, x) for x in sorted(listdir(gt_saliency_path))],
121
- [path.join(predictions_path, x) for x in sorted(listdir(predictions_path))]
122
- )]
123
- with Pool(num_workers) as pool:
124
- scores = pool.map(calculate_frame_metrics, frames)
125
-
126
- conv_scores = {metric: [x[metric] for x in scores] for metric in scores[0].keys()}
127
-
128
- return {
129
- 'video_name' : video_name,
130
- 'cc' : np.mean(conv_scores['cc_score']),
131
- 'sim' : np.mean(conv_scores['sim_score']),
132
- 'nss' : np.mean(conv_scores['nss_score']),
133
- 'auc_judd' : np.mean(conv_scores['auc_judd_score']),
134
- }
135
-
136
-
137
- def calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers=4):
138
-
139
- detail_result = []
140
- for video_name in tqdm(video_names):
141
- if len([x for x in detail_result if x['video_name'] == video_name]) > 0:
142
- continue
143
- short_video_name = Path(video_name).name
144
- model_output = str(Path(model_extracted_frames) / f'{short_video_name}*')
145
- gt_gaussians = str(Path(gt_extracted_frames) / f'{short_video_name}*')
146
- gt_fixations = Path(gt_fixations_path) / short_video_name / 'fixations.json'
147
- cur_result = calculate_metrics(video_name, model_output, gt_gaussians, gt_fixations, num_workers)
148
- detail_result += [cur_result]
149
- np.save("tmp2.npy", detail_result)
150
-
151
- return detail_result
152
-
153
-
154
- def make_bench(model_extracted_frames, gt_extracted_frames, gt_fixations_path, split_json='TrainTestSplit.json', results_json='results.json', mode='public_test', num_workers=4):
155
-
156
- print(num_workers, 'worker(s)')
157
- print(f'Testing {model_extracted_frames}')
158
-
159
- sm_listdir = listdir(model_extracted_frames)
160
- gt_listdir = listdir(gt_extracted_frames)
161
-
162
- if len(sm_listdir) < len(gt_listdir):
163
- msg = f'There are results for only a few videos ({len(sm_listdir)}/{len(gt_listdir)})!'
164
- raise ValueError(msg)
165
-
166
- video_names = sorted(sm_listdir)
167
- with open(split_json) as f:
168
- splits = set(json.load(f)[mode])
169
-
170
- video_names = [name for name in video_names if name in splits]
171
-
172
- detail_result = calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers)
173
- detail_result = sorted(detail_result, key=lambda res: res['video_name'])
174
-
175
- result = {'cc' : [], 'sim' : [], 'nss' : [], 'auc_judd' : []}
176
- for i in result:
177
- for j in detail_result:
178
- result[i].append(j[i])
179
-
180
- with open(results_json, 'w') as f:
181
- json.dump(result, f)
182
-
183
- model_res = {'Model': [model_extracted_frames], 'Mode': [mode]}
184
- [model_res.update({key: [np.mean(result[key])]}) for key in result.keys()]
185
-
186
- print(model_res)
187
-
188
-
189
-
190
- def extract_frames(input_dir, output_dir, split_json='TrainTestSplit.json', mode='public_test', num_workers=4):
191
-
192
- def poolfunc(x):
193
- if x.stem not in splits[mode]:
194
- return
195
- dst_vid = dst / x.stem
196
- if dst_vid.exists():
197
- pbar.update(1)
198
- return
199
- dst_vid.mkdir()
200
- subprocess.check_call(f'ffmpeg -v error -i {x} {dst_vid}/%03d.png'.split())
201
- pbar.update(1)
202
-
203
- with open(split_json) as f:
204
- splits = json.load(f)
205
-
206
- root = Path(input_dir)
207
- dst = Path(output_dir)
208
- dst.mkdir(exist_ok=True)
209
- videos = list(root.iterdir())
210
- pbar = tqdm(total=len(splits[mode]))
211
- with ThreadPool(num_workers) as p:
212
- p.map(poolfunc, videos)
213
-
214
-
215
-
216
- if __name__ == '__main__':
217
-
218
- parser = argparse.ArgumentParser()
219
- parser.add_argument('--model_video_predictions', default='./SampleSubmission-CenterPrior',
220
- help='Folder with predicted saliency videos')
221
- parser.add_argument('--model_extracted_frames', default='./SampleSubmission-CenterPrior-Frames',
222
- help='Folder to store prediction frames (should not exist at launch time), requires ~170 GB of free space')
223
-
224
- parser.add_argument('--gt_video_predictions', default='./SaliencyTest/Test',
225
- help='Folder from dataset page with gt saliency videos')
226
- parser.add_argument('--gt_extracted_frames', default='./SaliencyTest-Frames',
227
- help='Folder to store ground-truth frames (should not exist at launch time), requires ~170 GB of free space')
228
- parser.add_argument('--gt_fixations_path', default='./FixationsTest/Test',
229
- help='Folder from dataset page with gt saliency fixations')
230
- parser.add_argument('--split_json', default='./TrainTestSplit.json',
231
- help='Json from dataset page with names splitting')
232
-
233
- parser.add_argument('--results_json', default='./results.json')
234
- parser.add_argument('--mode', default='public_test', help='public_test/private_test')
235
- parser.add_argument('--num_workers', type=int, default=4)
236
-
237
- args = parser.parse_args()
238
-
239
- if not path.exists(args.model_extracted_frames):
240
- print("Extracting", args.model_video_predictions, 'to', args.model_extracted_frames)
241
- extract_frames(args.model_video_predictions, args.model_extracted_frames, args.split_json, args.mode, args.num_workers)
242
- if not path.exists(args.gt_extracted_frames):
243
- print("Extracting", args.gt_video_predictions, 'to', args.gt_extracted_frames)
244
- extract_frames(args.gt_video_predictions, args.gt_extracted_frames, args.split_json, args.mode, args.num_workers)
245
-
246
- make_bench(args.model_extracted_frames, args.gt_extracted_frames, args.gt_fixations_path, args.split_json, args.results_json, args.mode, args.num_workers)
 
1
+ from multiprocessing.pool import Pool, ThreadPool
2
+ from os import path, listdir, mkdir
3
+ from pathlib import Path
4
+ from tqdm import tqdm
5
+ from glob import glob
6
+ import numpy as np
7
+ import subprocess
8
+ import argparse
9
+ import warnings
10
+ import json
11
+ import cv2
12
+
13
+ cv2.setNumThreads(0)
14
+ eps = np.finfo(np.float32).eps
15
+ warnings.filterwarnings("error")
16
+
17
+ ###metrics###
18
+
19
+ def nss(s_map, gt):
20
+ s_map_norm = (s_map - np.mean(s_map))/(np.std(s_map) + 1e-7)
21
+ temp = s_map_norm[gt[:, 0], gt[:, 1]]
22
+ return np.mean(temp)
23
+
24
+
25
+ def similarity(s_map, gt):
26
+ s_map = s_map / (np.sum(s_map) + 1e-7)
27
+ gt = gt / (np.sum(gt) + 1e-7)
28
+ return np.sum(np.minimum(s_map, gt))
29
+
30
+
31
+ def cc(s_map, gt):
32
+ a = (s_map - np.mean(s_map))/(np.std(s_map) + 1e-7)
33
+ b = (gt - np.mean(gt))/(np.std(gt) + 1e-7)
34
+ r = (a*b).sum() / np.sqrt((a*a).sum() * (b*b).sum() + 1e-7)
35
+ return r
36
+
37
+
38
+ def auc_judd(S, F):
39
+
40
+ Sth = S[F[:, 0], F[:, 1]]
41
+ Nfixations = len(Sth)
42
+ Uniqe_fixations = np.unique(F, axis=1).shape[-1]
43
+ Possible_fixations = np.prod(S.shape) + (Nfixations - Uniqe_fixations)
44
+
45
+ allthreshes = np.sort(Sth)[::-1]
46
+ tp = np.zeros(Nfixations + 2)
47
+ fp = np.zeros(Nfixations + 2)
48
+ tp[0] = fp[0] = 0
49
+ tp[-1] = fp[-1] = 1
50
+
51
+ # Vectorized computation of aboveth
52
+ aboveth = np.sum(S >= allthreshes[:, np.newaxis, np.newaxis], axis=(1, 2))
53
+
54
+ arange = np.arange(1, Nfixations + 1)
55
+ fp[1:-1] = (aboveth - arange) / (Possible_fixations - Nfixations)
56
+ tp[1:-1] = arange / Nfixations
57
+
58
+ # Trapezoidal integration to compute AUC-Judd
59
+ return np.trapz(tp, fp)
60
+
61
+
62
+
63
+ def kldiv(s_map, gt):
64
+ s_map = s_map / (np.sum(s_map) * 1.0)
65
+ gt = gt / (np.sum(gt) * 1.0)
66
+ eps = 2.2204e-16
67
+ res = np.sum(gt * np.log(eps + gt / (s_map + eps)))
68
+ return res
69
+
70
+
71
+ ######
72
+
73
+ def xrgb2gray(img):
74
+ assert len(img.shape) in (2, 3)
75
+ return img.mean(axis=2) if len(img.shape) == 3 else img
76
+
77
+ # Returns SM in [0; 1] range
78
+ def read_sm(path):
79
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
80
+ img = xrgb2gray(img)
81
+ img = (img - img.min()) / (img.max() - img.min() + eps)
82
+ return img
83
+
84
+ def calculate_frame_metrics(frame):
85
+ gt_fix = np.array(frame['gt_fixations'])
86
+ gt_120_sm = read_sm(frame['gt_saliency_path'])
87
+ pred_sm = cv2.resize(read_sm(frame['predictions_path']), (gt_120_sm.shape[1], gt_120_sm.shape[0]))
88
+
89
+ return {
90
+ 'sim_score': similarity(pred_sm, gt_120_sm),
91
+ 'nss_score': nss(pred_sm, gt_fix),
92
+ 'cc_score': cc(pred_sm, gt_120_sm),
93
+ 'auc_judd_score': auc_judd(pred_sm, gt_fix),
94
+ }
95
+
96
+
97
+ def calculate_metrics(video_name, temp_predictions_path, temp_gt_saliency_path, temp_gt_fixations_path, num_workers=4):
98
+ predictions_path = glob(temp_predictions_path)[0]
99
+ gt_saliency_path = glob(temp_gt_saliency_path)[0]
100
+ with open(temp_gt_fixations_path) as f:
101
+ gt_fixations = json.load(f)
102
+
103
+ scores = []
104
+ assert_func = lambda path: set([int(x.split('.')[0]) for x in listdir(path)])
105
+ assert assert_func(gt_saliency_path) == assert_func(predictions_path)
106
+
107
+ frames = [
108
+ {
109
+ 'gt_fixations': gt_fix,
110
+ 'gt_saliency_path': gt_sal,
111
+ 'predictions_path': pred,
112
+ } for gt_fix, gt_sal, pred in zip(
113
+ gt_fixations,
114
+ [path.join(gt_saliency_path, x) for x in sorted(listdir(gt_saliency_path))],
115
+ [path.join(predictions_path, x) for x in sorted(listdir(predictions_path))]
116
+ )]
117
+ with Pool(num_workers) as pool:
118
+ scores = pool.map(calculate_frame_metrics, frames)
119
+
120
+ conv_scores = {metric: [x[metric] for x in scores] for metric in scores[0].keys()}
121
+
122
+ return {
123
+ 'video_name' : video_name,
124
+ 'cc' : np.mean(conv_scores['cc_score']),
125
+ 'sim' : np.mean(conv_scores['sim_score']),
126
+ 'nss' : np.mean(conv_scores['nss_score']),
127
+ 'auc_judd' : np.mean(conv_scores['auc_judd_score']),
128
+ }
129
+
130
+
131
+ def calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers=4):
132
+
133
+ detail_result = []
134
+ for video_name in tqdm(video_names):
135
+ if len([x for x in detail_result if x['video_name'] == video_name]) > 0:
136
+ continue
137
+ short_video_name = Path(video_name).name
138
+ model_output = str(Path(model_extracted_frames) / f'{short_video_name}')
139
+ gt_gaussians = str(Path(gt_extracted_frames) / f'{short_video_name}')
140
+ gt_fixations = Path(gt_fixations_path) / short_video_name / 'fixations.json'
141
+ cur_result = calculate_metrics(video_name, model_output, gt_gaussians, gt_fixations, num_workers)
142
+ detail_result += [cur_result]
143
+ np.save("tmp2.npy", detail_result)
144
+
145
+ return detail_result
146
+
147
+
148
+ def make_bench(model_extracted_frames, gt_extracted_frames, gt_fixations_path, split_json='TrainTestSplit.json', results_json='results.json', mode='public_test', num_workers=4):
149
+
150
+ print(num_workers, 'worker(s)')
151
+ print(f'Testing {model_extracted_frames}')
152
+
153
+ sm_listdir = listdir(model_extracted_frames)
154
+ gt_listdir = listdir(gt_extracted_frames)
155
+
156
+ if len(sm_listdir) < len(gt_listdir):
157
+ msg = f'There are results for only a few videos ({len(sm_listdir)}/{len(gt_listdir)})!'
158
+ raise ValueError(msg)
159
+
160
+ video_names = sorted(sm_listdir)
161
+ with open(split_json) as f:
162
+ splits = set(json.load(f)[mode])
163
+
164
+ video_names = [name for name in video_names if name in splits]
165
+
166
+ detail_result = calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers)
167
+ detail_result = sorted(detail_result, key=lambda res: res['video_name'])
168
+
169
+ result = {'cc' : [], 'sim' : [], 'nss' : [], 'auc_judd' : []}
170
+ for i in result:
171
+ for j in detail_result:
172
+ result[i].append(j[i])
173
+
174
+ with open(results_json, 'w') as f:
175
+ json.dump(result, f)
176
+
177
+ model_res = {'Model': [model_extracted_frames], 'Mode': [mode]}
178
+ [model_res.update({key: [np.mean(result[key])]}) for key in result.keys()]
179
+
180
+ print(model_res)
181
+
182
+
183
+
184
+ def extract_frames(input_dir, output_dir, split_json='TrainTestSplit.json', mode='public_test', num_workers=4):
185
+
186
+ def poolfunc(x):
187
+ if x.stem not in splits[mode]:
188
+ return
189
+ dst_vid = dst / x.stem
190
+ if dst_vid.exists():
191
+ pbar.update(1)
192
+ return
193
+ dst_vid.mkdir()
194
+ subprocess.check_call(f'ffmpeg -v error -i {x} {dst_vid}/%03d.png'.split())
195
+ pbar.update(1)
196
+
197
+ with open(split_json) as f:
198
+ splits = json.load(f)
199
+
200
+ root = Path(input_dir)
201
+ dst = Path(output_dir)
202
+ dst.mkdir(exist_ok=True)
203
+ videos = list(root.iterdir())
204
+ pbar = tqdm(total=len(splits[mode]))
205
+ with ThreadPool(num_workers) as p:
206
+ p.map(poolfunc, videos)
207
+
208
+
209
+
210
+ if __name__ == '__main__':
211
+
212
+ parser = argparse.ArgumentParser()
213
+ parser.add_argument('--model_video_predictions', default='./SampleSubmission-CenterPrior',
214
+ help='Folder with predicted saliency videos')
215
+ parser.add_argument('--model_extracted_frames', default='./SampleSubmission-CenterPrior-Frames',
216
+ help='Folder to store prediction frames (should not exist at launch time), requires ~170 GB of free space')
217
+
218
+ parser.add_argument('--gt_video_predictions', default='./SaliencyTest/Test',
219
+ help='Folder from dataset page with gt saliency videos')
220
+ parser.add_argument('--gt_extracted_frames', default='./SaliencyTest-Frames',
221
+ help='Folder to store ground-truth frames (should not exist at launch time), requires ~170 GB of free space')
222
+ parser.add_argument('--gt_fixations_path', default='./FixationsTest/Test',
223
+ help='Folder from dataset page with gt saliency fixations')
224
+ parser.add_argument('--split_json', default='./TrainTestSplit.json',
225
+ help='Json from dataset page with names splitting')
226
+
227
+ parser.add_argument('--results_json', default='./results.json')
228
+ parser.add_argument('--mode', default='public_test', help='public_test/private_test')
229
+ parser.add_argument('--num_workers', type=int, default=4)
230
+
231
+ args = parser.parse_args()
232
+
233
+ if not path.exists(args.model_extracted_frames):
234
+ print("Extracting", args.model_video_predictions, 'to', args.model_extracted_frames)
235
+ extract_frames(args.model_video_predictions, args.model_extracted_frames, args.split_json, args.mode, args.num_workers)
236
+ if not path.exists(args.gt_extracted_frames):
237
+ print("Extracting", args.gt_video_predictions, 'to', args.gt_extracted_frames)
238
+ extract_frames(args.gt_video_predictions, args.gt_extracted_frames, args.split_json, args.mode, args.num_workers)
239
+
240
+ make_bench(args.model_extracted_frames, args.gt_extracted_frames, args.gt_fixations_path, args.split_json, args.results_json, args.mode, args.num_workers)