File size: 4,528 Bytes
8aa674c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import os
import copy
import json
import tqdm
import torch
import torch.distributed as dist
from opentad.utils import create_folder
from opentad.models.utils.post_processing import build_classifier, batched_nms
from opentad.evaluations import build_evaluator
from opentad.datasets.base import SlidingWindowDataset
def eval_one_epoch(
test_loader,
model,
cfg,
logger,
rank,
model_ema=None,
use_amp=False,
world_size=0,
not_eval=False,
):
"""Inference and Evaluation the model"""
# load the ema dict for evaluation
if model_ema != None:
current_dict = copy.deepcopy(model.state_dict())
model.load_state_dict(model_ema.module.state_dict())
cfg.inference["folder"] = os.path.join(cfg.work_dir, "outputs")
if cfg.inference.save_raw_prediction:
create_folder(cfg.inference["folder"])
# external classifier
if "external_cls" in cfg.post_processing:
if cfg.post_processing.external_cls != None:
external_cls = build_classifier(cfg.post_processing.external_cls)
else:
external_cls = test_loader.dataset.class_map
# whether the testing dataset is sliding window
cfg.post_processing.sliding_window = isinstance(test_loader.dataset, SlidingWindowDataset)
# model forward
model.eval()
result_dict = {}
for data_dict in tqdm.tqdm(test_loader, disable=(rank != 0)):
with torch.cuda.amp.autocast(dtype=torch.float16, enabled=use_amp):
with torch.no_grad():
results = model(
**data_dict,
return_loss=False,
infer_cfg=cfg.inference,
post_cfg=cfg.post_processing,
ext_cls=external_cls,
)
# update the result dict
for k, v in results.items():
if k in result_dict.keys():
result_dict[k].extend(v)
else:
result_dict[k] = v
result_dict = gather_ddp_results(world_size, result_dict, cfg.post_processing)
# load back the normal model dict
if model_ema != None:
model.load_state_dict(current_dict)
if rank == 0:
result_eval = dict(results=result_dict)
if cfg.post_processing.save_dict:
result_path = os.path.join(cfg.work_dir, "result_detection_onAnetTest.json")
with open(result_path, "w") as out:
json.dump(result_eval, out, indent=4)
if not not_eval:
# build evaluator
evaluator = build_evaluator(dict(prediction_filename=result_eval, **cfg.evaluation))
# evaluate and output
logger.info("Evaluation starts...")
metrics_dict = evaluator.evaluate()
evaluator.logging(logger)
def gather_ddp_results(world_size, result_dict, post_cfg):
gather_dict_list = [None for _ in range(world_size)]
dist.all_gather_object(gather_dict_list, result_dict)
result_dict = {}
for i in range(world_size): # update the result dict
for k, v in gather_dict_list[i].items():
if k in result_dict.keys():
result_dict[k].extend(v)
else:
result_dict[k] = v
# do nms for sliding window, if needed
if post_cfg.sliding_window == True and post_cfg.nms is not None:
# assert sliding_window=True
tmp_result_dict = {}
for k, v in result_dict.items():
segments = torch.Tensor([data["segment"] for data in v])
scores = torch.Tensor([data["score"] for data in v])
labels = []
class_idx = []
for data in v:
if data["label"] not in class_idx:
class_idx.append(data["label"])
labels.append(class_idx.index(data["label"]))
labels = torch.Tensor(labels)
segments, scores, labels = batched_nms(segments, scores, labels, **post_cfg.nms)
results_per_video = []
for segment, label, score in zip(segments, labels, scores):
# convert to python scalars
results_per_video.append(
dict(
segment=[round(seg.item(), 2) for seg in segment],
label=class_idx[int(label.item())],
score=round(score.item(), 4),
)
)
tmp_result_dict[k] = results_per_video
result_dict = tmp_result_dict
return result_dict
|