repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
STTS | STTS-main/MViT/slowfast/utils/weight_init_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Utility function for weight initialization"""
import torch.nn as nn
from fvcore.nn.weight_init import c2_msra_fill
def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True):
"""
Performs ResNet style weight initialization.
Args:
fc_init_std (float): the expected standard deviation for fc layer.
zero_init_final_bn (bool): if True, zero initialize the final bn for
every bottleneck.
"""
for m in model.modules():
if isinstance(m, nn.Conv3d):
"""
Follow the initialization method proposed in:
{He, Kaiming, et al.
"Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification."
arXiv preprint arXiv:1502.01852 (2015)}
"""
c2_msra_fill(m)
elif isinstance(m, nn.BatchNorm3d):
if (
hasattr(m, "transform_final_bn")
and m.transform_final_bn
and zero_init_final_bn
):
batchnorm_weight = 0.0
else:
batchnorm_weight = 1.0
if m.weight is not None:
m.weight.data.fill_(batchnorm_weight)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=fc_init_std)
if m.bias is not None:
m.bias.data.zero_()
| 1,563 | 33.755556 | 76 | py |
STTS | STTS-main/MViT/slowfast/utils/multiprocessing.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Multiprocessing helpers."""
import torch
def run(
local_rank,
num_proc,
func,
init_method,
shard_id,
num_shards,
backend,
cfg,
output_queue=None,
):
"""
Runs a function from a child process.
Args:
local_rank (int): rank of the current process on the current machine.
num_proc (int): number of processes per machine.
func (function): function to execute on each of the process.
init_method (string): method to initialize the distributed training.
TCP initialization: equiring a network address reachable from all
processes followed by the port.
Shared file-system initialization: makes use of a file system that
is shared and visible from all machines. The URL should start with
file:// and contain a path to a non-existent file on a shared file
system.
shard_id (int): the rank of the current machine.
num_shards (int): number of overall machines for the distributed
training job.
backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are
supports, each with different capabilities. Details can be found
here:
https://pytorch.org/docs/stable/distributed.html
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
output_queue (queue): can optionally be used to return values from the
master process.
"""
# Initialize the process group.
world_size = num_proc * num_shards
rank = shard_id * num_proc + local_rank
try:
torch.distributed.init_process_group(
backend=backend,
init_method=init_method,
world_size=world_size,
rank=rank,
)
except Exception as e:
raise e
torch.cuda.set_device(local_rank)
ret = func(cfg)
if output_queue is not None and local_rank == 0:
output_queue.put(ret)
| 2,105 | 32.428571 | 80 | py |
STTS | STTS-main/MViT/slowfast/utils/bn_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""bn helper."""
import itertools
import torch
@torch.no_grad()
def compute_and_update_bn_stats(model, data_loader, num_batches=200):
"""
Compute and update the batch norm stats to make it more precise. During
training both bn stats and the weight are changing after every iteration,
so the bn can not precisely reflect the latest stats of the current model.
Here the bn stats is recomputed without change of weights, to make the
running mean and running var more precise.
Args:
model (model): the model using to compute and update the bn stats.
data_loader (dataloader): dataloader using to provide inputs.
num_batches (int): running iterations using to compute the stats.
"""
# Prepares all the bn layers.
bn_layers = [
m
for m in model.modules()
if any(
(
isinstance(m, bn_type)
for bn_type in (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
)
)
)
]
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum * batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers]
for bn in bn_layers:
bn.momentum = 1.0
# Calculates the running iterations for precise stats computation.
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_square_mean = [torch.zeros_like(bn.running_var) for bn in bn_layers]
for ind, (inputs, _, _) in enumerate(
itertools.islice(data_loader, num_batches)
):
# Forwards the model to update the bn stats.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].float().cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
model(inputs)
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# $E(x^2) = Var(x) + E(x)^2$.
cur_square_mean = bn.running_var + bn.running_mean ** 2
running_square_mean[i] += (
cur_square_mean - running_square_mean[i]
) / (ind + 1)
for i, bn in enumerate(bn_layers):
bn.running_mean = running_mean[i]
# Var(x) = $E(x^2) - E(x)^2$.
bn.running_var = running_square_mean[i] - bn.running_mean ** 2
# Sets the precise bn stats.
bn.momentum = momentum_actual[i]
| 2,858 | 35.653846 | 80 | py |
STTS | STTS-main/MViT/slowfast/utils/meters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Meters."""
import datetime
import numpy as np
import os
from collections import defaultdict, deque
import torch
from fvcore.common.timer import Timer
from sklearn.metrics import average_precision_score
import slowfast.datasets.ava_helper as ava_helper
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
from slowfast.utils.ava_eval_helper import (
evaluate_ava,
read_csv,
read_exclusions,
read_labelmap,
)
logger = logging.get_logger(__name__)
def get_ava_mini_groundtruth(full_groundtruth):
"""
Get the groundtruth annotations corresponding the "subset" of AVA val set.
We define the subset to be the frames such that (second % 4 == 0).
We optionally use subset for faster evaluation during training
(in order to track training progress).
Args:
full_groundtruth(dict): list of groundtruth.
"""
ret = [defaultdict(list), defaultdict(list), defaultdict(list)]
for i in range(3):
for key in full_groundtruth[i].keys():
if int(key.split(",")[1]) % 4 == 0:
ret[i][key] = full_groundtruth[i][key]
return ret
class AVAMeter(object):
"""
Measure the AVA train, val, and test stats.
"""
def __init__(self, overall_iters, cfg, mode):
"""
overall_iters (int): the overall number of iterations of one epoch.
cfg (CfgNode): configs.
mode (str): `train`, `val`, or `test` mode.
"""
self.cfg = cfg
self.lr = None
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.full_ava_test = cfg.AVA.FULL_TEST_ON_VAL
self.mode = mode
self.iter_timer = Timer()
self.data_timer = Timer()
self.net_timer = Timer()
self.all_preds = []
self.all_ori_boxes = []
self.all_metadata = []
self.overall_iters = overall_iters
self.excluded_keys = read_exclusions(
os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.EXCLUSION_FILE)
)
self.categories, self.class_whitelist = read_labelmap(
os.path.join(cfg.AVA.ANNOTATION_DIR, cfg.AVA.LABEL_MAP_FILE)
)
gt_filename = os.path.join(
cfg.AVA.ANNOTATION_DIR, cfg.AVA.GROUNDTRUTH_FILE
)
self.full_groundtruth = read_csv(gt_filename, self.class_whitelist)
self.mini_groundtruth = get_ava_mini_groundtruth(self.full_groundtruth)
_, self.video_idx_to_name = ava_helper.load_image_lists(
cfg, mode == "train"
)
self.output_dir = cfg.OUTPUT_DIR
def log_iter_stats(self, cur_epoch, cur_iter):
"""
Log the stats.
Args:
cur_epoch (int): the current epoch.
cur_iter (int): the current iteration.
"""
if (cur_iter + 1) % self.cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
if self.mode == "train":
stats = {
"_type": "{}_iter".format(self.mode),
"cur_epoch": "{}".format(cur_epoch + 1),
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"dt": self.iter_timer.seconds(),
"dt_data": self.data_timer.seconds(),
"dt_net": self.net_timer.seconds(),
"mode": self.mode,
"loss": self.loss.get_win_median(),
"lr": self.lr,
}
elif self.mode == "val":
stats = {
"_type": "{}_iter".format(self.mode),
"cur_epoch": "{}".format(cur_epoch + 1),
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"dt": self.iter_timer.seconds(),
"dt_data": self.data_timer.seconds(),
"dt_net": self.net_timer.seconds(),
"mode": self.mode,
}
elif self.mode == "test":
stats = {
"_type": "{}_iter".format(self.mode),
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"dt": self.iter_timer.seconds(),
"dt_data": self.data_timer.seconds(),
"dt_net": self.net_timer.seconds(),
"mode": self.mode,
}
else:
raise NotImplementedError("Unknown mode: {}".format(self.mode))
logging.log_json_stats(stats)
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
self.data_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
self.net_timer.pause()
def data_toc(self):
self.data_timer.pause()
self.net_timer.reset()
def reset(self):
"""
Reset the Meter.
"""
self.loss.reset()
self.all_preds = []
self.all_ori_boxes = []
self.all_metadata = []
def update_stats(self, preds, ori_boxes, metadata, loss=None, lr=None):
"""
Update the current stats.
Args:
preds (tensor): prediction embedding.
ori_boxes (tensor): original boxes (x1, y1, x2, y2).
metadata (tensor): metadata of the AVA data.
loss (float): loss value.
lr (float): learning rate.
"""
if self.mode in ["val", "test"]:
self.all_preds.append(preds)
self.all_ori_boxes.append(ori_boxes)
self.all_metadata.append(metadata)
if loss is not None:
self.loss.add_value(loss)
if lr is not None:
self.lr = lr
def finalize_metrics(self, log=True):
"""
Calculate and log the final AVA metrics.
"""
all_preds = torch.cat(self.all_preds, dim=0)
all_ori_boxes = torch.cat(self.all_ori_boxes, dim=0)
all_metadata = torch.cat(self.all_metadata, dim=0)
if self.mode == "test" or (self.full_ava_test and self.mode == "val"):
groundtruth = self.full_groundtruth
else:
groundtruth = self.mini_groundtruth
self.full_map = evaluate_ava(
all_preds,
all_ori_boxes,
all_metadata.tolist(),
self.excluded_keys,
self.class_whitelist,
self.categories,
groundtruth=groundtruth,
video_idx_to_name=self.video_idx_to_name,
)
if log:
stats = {"mode": self.mode, "map": self.full_map}
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
if self.mode in ["val", "test"]:
self.finalize_metrics(log=False)
stats = {
"_type": "{}_epoch".format(self.mode),
"cur_epoch": "{}".format(cur_epoch + 1),
"mode": self.mode,
"map": self.full_map,
"gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
"RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
}
logging.log_json_stats(stats)
class TestMeter(object):
"""
Perform the multi-view ensemble for testing: each video with an unique index
will be sampled with multiple clips, and the predictions of the clips will
be aggregated to produce the final prediction for the video.
The accuracy is calculated with the given ground truth labels.
"""
def __init__(
self,
num_videos,
num_clips,
num_cls,
overall_iters,
multi_label=False,
ensemble_method="sum",
):
"""
Construct tensors to store the predictions and labels. Expect to get
num_clips predictions from each video, and calculate the metrics on
num_videos videos.
Args:
num_videos (int): number of videos to test.
num_clips (int): number of clips sampled from each video for
aggregating the final prediction for the video.
num_cls (int): number of classes for each prediction.
overall_iters (int): overall iterations for testing.
multi_label (bool): if True, use map as the metric.
ensemble_method (str): method to perform the ensemble, options
include "sum", and "max".
"""
self.total_net_time = 0.
self.total_time = 0.
self.total_data_time = 0.
self.iter_timer = Timer()
self.data_timer = Timer()
self.net_timer = Timer()
self.num_clips = num_clips
self.overall_iters = overall_iters
self.multi_label = multi_label
self.ensemble_method = ensemble_method
# Initialize tensors.
self.video_preds = torch.zeros((num_videos, num_cls))
if multi_label:
self.video_preds -= 1e10
self.video_labels = (
torch.zeros((num_videos, num_cls))
if multi_label
else torch.zeros((num_videos)).long()
)
self.clip_count = torch.zeros((num_videos)).long()
self.topk_accs = []
self.stats = {}
# Reset metric.
self.reset()
def reset(self):
"""
Reset the metric.
"""
self.clip_count.zero_()
self.video_preds.zero_()
if self.multi_label:
self.video_preds -= 1e10
self.video_labels.zero_()
def update_stats(self, preds, labels, clip_ids):
"""
Collect the predictions from the current batch and perform on-the-flight
summation as ensemble.
Args:
preds (tensor): predictions from the current batch. Dimension is
N x C where N is the batch size and C is the channel size
(num_cls).
labels (tensor): the corresponding labels of the current batch.
Dimension is N.
clip_ids (tensor): clip indexes of the current batch, dimension is
N.
"""
for ind in range(preds.shape[0]):
vid_id = int(clip_ids[ind]) // self.num_clips
if self.video_labels[vid_id].sum() > 0:
assert torch.equal(
self.video_labels[vid_id].type(torch.FloatTensor),
labels[ind].type(torch.FloatTensor),
)
self.video_labels[vid_id] = labels[ind]
if self.ensemble_method == "sum":
self.video_preds[vid_id] += preds[ind]
elif self.ensemble_method == "max":
self.video_preds[vid_id] = torch.max(
self.video_preds[vid_id], preds[ind]
)
else:
raise NotImplementedError(
"Ensemble Method {} is not supported".format(
self.ensemble_method
)
)
self.clip_count[vid_id] += 1
def log_iter_stats(self, cur_iter):
"""
Log the stats.
Args:
cur_iter (int): the current iteration of testing.
"""
eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"split": "test_iter",
"cur_iter": "{}".format(cur_iter + 1),
"eta": eta,
"time_diff": self.iter_timer.seconds(),
"data_time_diff": self.data_timer.seconds(),
"model_time_diff": self.net_timer.seconds(),
}
self.total_net_time += self.net_timer.seconds()
self.total_time += self.iter_timer.seconds()
self.total_data_time += self.data_timer.seconds()
logging.log_json_stats(stats)
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
self.data_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
self.net_timer.pause()
def data_toc(self):
self.data_timer.pause()
self.net_timer.reset()
def finalize_metrics(self, ks=(1, 5)):
"""
Calculate and log the final ensembled metrics.
ks (tuple): list of top-k values for topk_accuracies. For example,
ks = (1, 5) correspods to top-1 and top-5 accuracy.
"""
if not all(self.clip_count == self.num_clips):
logger.warning(
"clip count {} ~= num clips {}".format(
", ".join(
[
"{}: {}".format(i, k)
for i, k in enumerate(self.clip_count.tolist())
]
),
self.num_clips,
)
)
self.stats = {"split": "test_final"}
if self.multi_label:
map = get_map(
self.video_preds.cpu().numpy(), self.video_labels.cpu().numpy()
)
self.stats["map"] = map
else:
num_topks_correct = metrics.topks_correct(
self.video_preds, self.video_labels, ks
)
topks = [
(x / self.video_preds.size(0)) * 100.0
for x in num_topks_correct
]
assert len({len(ks), len(topks)}) == 1
for k, topk in zip(ks, topks):
self.stats["top{}_acc".format(k)] = "{:.{prec}f}".format(
topk, prec=2
)
self.stats['total_time'] = self.total_time
self.stats['total_data_time'] = self.total_data_time
self.stats['total_net_time'] = self.total_net_time
logging.log_json_stats(self.stats)
class ScalarMeter(object):
"""
A scalar meter uses a deque to track a series of scaler values with a given
window size. It supports calculating the median and average values of the
window, and also supports calculating the global average.
"""
def __init__(self, window_size):
"""
Args:
window_size (int): size of the max length of the deque.
"""
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
"""
Reset the deque.
"""
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
"""
Add a new scalar value to the deque.
"""
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
"""
Calculate the current median value of the deque.
"""
return np.median(self.deque)
def get_win_avg(self):
"""
Calculate the current average value of the deque.
"""
return np.mean(self.deque)
def get_global_avg(self):
"""
Calculate the global mean value.
"""
return self.total / self.count
class TrainMeter(object):
"""
Measure training stats.
"""
def __init__(self, epoch_iters, cfg):
"""
Args:
epoch_iters (int): the overall number of iterations of one epoch.
cfg (CfgNode): configs.
"""
self._cfg = cfg
self.epoch_iters = epoch_iters
self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters
self.iter_timer = Timer()
self.data_timer = Timer()
self.net_timer = Timer()
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.loss_total = 0.0
self.lr = None
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.output_dir = cfg.OUTPUT_DIR
def reset(self):
"""
Reset the Meter.
"""
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
self.data_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
self.net_timer.pause()
def data_toc(self):
self.data_timer.pause()
self.net_timer.reset()
def update_stats(self, top1_err, top5_err, loss, lr, mb_size):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
loss (float): loss value.
lr (float): learning rate.
mb_size (int): mini batch size.
"""
self.loss.add_value(loss)
self.lr = lr
self.loss_total += loss * mb_size
self.num_samples += mb_size
if not self._cfg.DATA.MULTI_LABEL:
# Current minibatch stats
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
# Aggregate stats
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1)
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "train_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.epoch_iters),
"dt": self.iter_timer.seconds(),
"dt_data": self.data_timer.seconds(),
"dt_net": self.net_timer.seconds(),
"eta": eta,
"loss": self.loss.get_win_median(),
"lr": self.lr,
"gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
}
if not self._cfg.DATA.MULTI_LABEL:
stats["top1_err"] = self.mb_top1_err.get_win_median()
stats["top5_err"] = self.mb_top5_err.get_win_median()
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
eta_sec = self.iter_timer.seconds() * (
self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters
)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "train_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"dt": self.iter_timer.seconds(),
"dt_data": self.data_timer.seconds(),
"dt_net": self.net_timer.seconds(),
"eta": eta,
"lr": self.lr,
"gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
"RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
}
if not self._cfg.DATA.MULTI_LABEL:
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
avg_loss = self.loss_total / self.num_samples
stats["top1_err"] = top1_err
stats["top5_err"] = top5_err
stats["loss"] = avg_loss
logging.log_json_stats(stats)
class ValMeter(object):
"""
Measures validation stats.
"""
def __init__(self, max_iter, cfg):
"""
Args:
max_iter (int): the max number of iteration of the current epoch.
cfg (CfgNode): configs.
"""
self._cfg = cfg
self.max_iter = max_iter
self.iter_timer = Timer()
self.data_timer = Timer()
self.net_timer = Timer()
# Current minibatch errors (smoothed over a window).
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Min errors (over the full val set).
self.min_top1_err = 100.0
self.min_top5_err = 100.0
# Number of misclassified examples.
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.all_preds = []
self.all_labels = []
self.output_dir = cfg.OUTPUT_DIR
def reset(self):
"""
Reset the Meter.
"""
self.iter_timer.reset()
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
self.all_preds = []
self.all_labels = []
def iter_tic(self):
"""
Start to record time.
"""
self.iter_timer.reset()
self.data_timer.reset()
def iter_toc(self):
"""
Stop to record time.
"""
self.iter_timer.pause()
self.net_timer.pause()
def data_toc(self):
self.data_timer.pause()
self.net_timer.reset()
def update_stats(self, top1_err, top5_err, mb_size):
"""
Update the current stats.
Args:
top1_err (float): top1 error rate.
top5_err (float): top5 error rate.
mb_size (int): mini batch size.
"""
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.num_samples += mb_size
def update_predictions(self, preds, labels):
"""
Update predictions and labels.
Args:
preds (tensor): model output predictions.
labels (tensor): labels.
"""
# TODO: merge update_prediction with update_stats.
self.all_preds.append(preds)
self.all_labels.append(labels)
def log_iter_stats(self, cur_epoch, cur_iter):
"""
log the stats of the current iteration.
Args:
cur_epoch (int): the number of current epoch.
cur_iter (int): the number of current iteration.
"""
if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0:
return
eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1)
eta = str(datetime.timedelta(seconds=int(eta_sec)))
stats = {
"_type": "val_iter",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"iter": "{}/{}".format(cur_iter + 1, self.max_iter),
"time_diff": self.iter_timer.seconds(),
"eta": eta,
"gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
}
if not self._cfg.DATA.MULTI_LABEL:
stats["top1_err"] = self.mb_top1_err.get_win_median()
stats["top5_err"] = self.mb_top5_err.get_win_median()
logging.log_json_stats(stats)
def log_epoch_stats(self, cur_epoch):
"""
Log the stats of the current epoch.
Args:
cur_epoch (int): the number of current epoch.
"""
stats = {
"_type": "val_epoch",
"epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH),
"time_diff": self.iter_timer.seconds(),
"gpu_mem": "{:.2f}G".format(misc.gpu_mem_usage()),
"RAM": "{:.2f}/{:.2f}G".format(*misc.cpu_mem_usage()),
}
if self._cfg.DATA.MULTI_LABEL:
stats["map"] = get_map(
torch.cat(self.all_preds).cpu().numpy(),
torch.cat(self.all_labels).cpu().numpy(),
)
else:
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
self.min_top1_err = min(self.min_top1_err, top1_err)
self.min_top5_err = min(self.min_top5_err, top5_err)
stats["top1_err"] = top1_err
stats["top5_err"] = top5_err
stats["min_top1_err"] = self.min_top1_err
stats["min_top5_err"] = self.min_top5_err
logging.log_json_stats(stats)
def get_map(preds, labels):
"""
Compute mAP for multi-label case.
Args:
preds (numpy tensor): num_examples x num_classes.
labels (numpy tensor): num_examples x num_classes.
Returns:
mean_ap (int): final mAP score.
"""
logger.info("Getting mAP for {} examples".format(preds.shape[0]))
preds = preds[:, ~(np.all(labels == 0, axis=0))]
labels = labels[:, ~(np.all(labels == 0, axis=0))]
aps = [0]
try:
aps = average_precision_score(labels, preds, average=None)
except ValueError:
print(
"Average precision requires a sufficient number of samples \
in a batch which are missing in this sample."
)
mean_ap = np.mean(aps)
return mean_ap
class EpochTimer:
"""
A timer which computes the epoch time.
"""
def __init__(self) -> None:
self.timer = Timer()
self.timer.reset()
self.epoch_times = []
def reset(self) -> None:
"""
Reset the epoch timer.
"""
self.timer.reset()
self.epoch_times = []
def epoch_tic(self):
"""
Start to record time.
"""
self.timer.reset()
def epoch_toc(self):
"""
Stop to record time.
"""
self.timer.pause()
self.epoch_times.append(self.timer.seconds())
def last_epoch_time(self):
"""
Get the time for the last epoch.
"""
assert len(self.epoch_times) > 0, "No epoch time has been recorded!"
return self.epoch_times[-1]
def avg_epoch_time(self):
"""
Calculate the average epoch time among the recorded epochs.
"""
assert len(self.epoch_times) > 0, "No epoch time has been recorded!"
return np.mean(self.epoch_times)
def median_epoch_time(self):
"""
Calculate the median epoch time among the recorded epochs.
"""
assert len(self.epoch_times) > 0, "No epoch time has been recorded!"
return np.median(self.epoch_times)
| 27,039 | 31.228844 | 80 | py |
STTS | STTS-main/VideoSwin/tools/test.py | import argparse
import os
import os.path as osp
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.fileio.io import file_handlers
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
from mmaction.utils import register_module_hooks
# TODO import test functions from mmcv and delete them from mmaction2
try:
from mmcv.engine import multi_gpu_test, single_gpu_test
except (ImportError, ModuleNotFoundError):
warnings.warn(
'DeprecationWarning: single_gpu_test, multi_gpu_test, '
'collect_results_cpu, collect_results_gpu from mmaction2 will be '
'deprecated. Please install mmcv through master branch.')
from mmaction.apis import multi_gpu_test, single_gpu_test
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--out',
default=None,
help='output result file in pkl/yaml/json format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g.,'
' "top_k_accuracy", "mean_class_accuracy" for video dataset')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
default={},
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--average-clips',
choices=['score', 'prob', None],
default=None,
help='average type when averaging test clips')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--onnx',
action='store_true',
help='Whether to test with onnx model or not')
parser.add_argument(
'--tensorrt',
action='store_true',
help='Whether to test with TensorRT engine or not')
# args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(parser.parse_args().local_rank)
if parser.parse_args().options and parser.parse_args().eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if parser.parse_args().options:
warnings.warn('--options is deprecated in favor of --eval-options')
parser.parse_args().eval_options = parser.parse_args().options
return parser
def turn_off_pretrained(cfg):
# recursively find all pretrained in the model config,
# and set them None to avoid redundant pretrain steps for testing
if 'pretrained' in cfg:
cfg.pretrained = None
# recursively turn off pretrained value
for sub_cfg in cfg.values():
if isinstance(sub_cfg, dict):
turn_off_pretrained(sub_cfg)
def inference_pytorch(args, cfg, distributed, data_loader):
"""Get predictions by pytorch models."""
if args.average_clips is not None:
# You can set average_clips during testing, it will override the
# original setting
if cfg.model.get('test_cfg') is None and cfg.get('test_cfg') is None:
cfg.model.setdefault('test_cfg',
dict(average_clips=args.average_clips))
else:
if cfg.model.get('test_cfg') is not None:
cfg.model.test_cfg.average_clips = args.average_clips
else:
cfg.test_cfg.average_clips = args.average_clips
# remove redundant pretrain steps for testing
turn_off_pretrained(cfg.model)
# build the model and load checkpoint
model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
if len(cfg.module_hooks) > 0:
register_module_hooks(model, cfg.module_hooks)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
print('successfully load the pretrained checkpoint from: ', args.checkpoint)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
return outputs
def inference_tensorrt(ckpt_path, distributed, data_loader, batch_size):
"""Get predictions by TensorRT engine.
For now, multi-gpu mode and dynamic tensor shape are not supported.
"""
assert not distributed, \
'TensorRT engine inference only supports single gpu mode.'
import tensorrt as trt
from mmcv.tensorrt.tensorrt_utils import (torch_dtype_from_trt,
torch_device_from_trt)
# load engine
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
with open(ckpt_path, mode='rb') as f:
engine_bytes = f.read()
engine = runtime.deserialize_cuda_engine(engine_bytes)
# For now, only support fixed input tensor
cur_batch_size = engine.get_binding_shape(0)[0]
assert batch_size == cur_batch_size, \
('Dataset and TensorRT model should share the same batch size, '
f'but get {batch_size} and {cur_batch_size}')
context = engine.create_execution_context()
# get output tensor
dtype = torch_dtype_from_trt(engine.get_binding_dtype(1))
shape = tuple(context.get_binding_shape(1))
device = torch_device_from_trt(engine.get_location(1))
output = torch.empty(
size=shape, dtype=dtype, device=device, requires_grad=False)
# get predictions
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
bindings = [
data['imgs'].contiguous().data_ptr(),
output.contiguous().data_ptr()
]
context.execute_async_v2(bindings,
torch.cuda.current_stream().cuda_stream)
results.extend(output.cpu().numpy())
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
return results
def inference_onnx(ckpt_path, distributed, data_loader, batch_size):
"""Get predictions by ONNX.
For now, multi-gpu mode and dynamic tensor shape are not supported.
"""
assert not distributed, 'ONNX inference only supports single gpu mode.'
import onnx
import onnxruntime as rt
# get input tensor name
onnx_model = onnx.load(ckpt_path)
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer))
assert len(net_feed_input) == 1
# For now, only support fixed tensor shape
input_tensor = None
for tensor in onnx_model.graph.input:
if tensor.name == net_feed_input[0]:
input_tensor = tensor
break
cur_batch_size = input_tensor.type.tensor_type.shape.dim[0].dim_value
assert batch_size == cur_batch_size, \
('Dataset and ONNX model should share the same batch size, '
f'but get {batch_size} and {cur_batch_size}')
# get predictions
sess = rt.InferenceSession(ckpt_path)
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
imgs = data['imgs'].cpu().numpy()
onnx_result = sess.run(None, {net_feed_input[0]: imgs})[0]
results.extend(onnx_result)
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
return results
def main(args):
if args.tensorrt and args.onnx:
raise ValueError(
'Cannot set onnx mode and tensorrt mode at the same time.')
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# Load output_config from cfg
output_config = cfg.get('output_config', {})
if args.out:
# Overwrite output_config from args.out
output_config = Config._merge_a_into_b(
dict(out=args.out), output_config)
# Load eval_config from cfg
eval_config = cfg.get('eval_config', {})
if args.eval:
# Overwrite eval_config from args.eval
eval_config = Config._merge_a_into_b(
dict(metrics=args.eval), eval_config)
if args.eval_options:
# Add options from args.eval_options
eval_config = Config._merge_a_into_b(args.eval_options, eval_config)
assert output_config or eval_config, \
('Please specify at least one operation (save or eval the '
'results) with the argument "--out" or "--eval"')
dataset_type = cfg.data.test.type
if output_config.get('out', None):
if 'output_format' in output_config:
# ugly workround to make recognition and localization the same
warnings.warn(
'Skip checking `output_format` in localization task.')
else:
out = output_config['out']
# make sure the dirname of the output path exists
mmcv.mkdir_or_exist(osp.dirname(out))
_, suffix = osp.splitext(out)
if dataset_type == 'AVADataset':
assert suffix[1:] == 'csv', ('For AVADataset, the format of '
'the output file should be csv')
else:
assert suffix[1:] in file_handlers, (
'The format of the output '
'file should be json, pickle or yaml')
# set cudnn benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# The flag is used to register module's hooks
cfg.setdefault('module_hooks', [])
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=distributed,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
if args.tensorrt:
outputs = inference_tensorrt(args.checkpoint, distributed, data_loader,
dataloader_setting['videos_per_gpu'])
elif args.onnx:
outputs = inference_onnx(args.checkpoint, distributed, data_loader,
dataloader_setting['videos_per_gpu'])
else:
outputs = inference_pytorch(args, cfg, distributed, data_loader)
rank, _ = get_dist_info()
if rank == 0:
if output_config.get('out', None):
out = output_config['out']
print(f'\nwriting results to {out}')
dataset.dump_results(outputs, **output_config)
if eval_config:
eval_res = dataset.evaluate(outputs, **eval_config)
for name, val in eval_res.items():
print(f'{name}: {val:.04f}')
if __name__ == '__main__':
parser = parse_args()
args = parser.parse_args()
main(args)
| 13,558 | 35.645946 | 80 | py |
STTS | STTS-main/VideoSwin/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist, set_random_seed, load_checkpoint
from mmcv.utils import get_git_hash
from mmaction import __version__
from mmaction.apis import train_model
from mmaction.datasets import build_dataset
from mmaction.models import build_model
from mmaction.utils import collect_env, get_root_logger, register_module_hooks
def get_args_parser():
parser = argparse.ArgumentParser(description='Train a recognizer', add_help=False)
parser.add_argument('--config', help='train config file path')
parser.add_argument('--checkpoint', help='train checkpoint file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument("--auto_resume", action='store_true', default=False)
parser.add_argument(
'--validate',
action='store_true',
help='whether to evaluate the checkpoint during training')
parser.add_argument(
'--test-last',
action='store_true',
help='whether to test the checkpoint after training')
parser.add_argument(
'--test-best',
action='store_true',
help=('whether to test the best checkpoint (if applicable) after '
'training'))
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
#args = parser.parse_args()
#if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(parser.parse_args().local_rank)
return parser
def main(args):
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority:
# CLI > config file > default (base filename)
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(args.config))[0])
print(cfg.work_dir)
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# The flag is used to determine whether it is omnisource training
cfg.setdefault('omnisource', False)
# The flag is used to register module's hooks
cfg.setdefault('module_hooks', [])
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config: {cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['config_name'] = osp.basename(args.config)
meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\'))
model = build_model(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if len(cfg.module_hooks) > 0:
register_module_hooks(model, cfg.module_hooks)
if args.checkpoint is not None:
load_checkpoint(model, args.checkpoint, map_location='cpu')
if cfg.omnisource:
# If omnisource flag is set, cfg.data.train should be a list
assert isinstance(cfg.data.train, list)
datasets = [build_dataset(dataset) for dataset in cfg.data.train]
else:
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
# For simplicity, omnisource is not compatiable with val workflow,
# we recommend you to use `--validate`
assert not cfg.omnisource
if args.validate:
warnings.warn('val workflow is duplicated with `--validate`, '
'it is recommended to use `--validate`. see '
'https://github.com/open-mmlab/mmaction2/pull/123')
val_dataset = copy.deepcopy(cfg.data.val)
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmaction version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmaction_version=__version__ + get_git_hash(digits=7),
config=cfg.pretty_text)
test_option = dict(test_last=args.test_last, test_best=args.test_best)
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=args.validate,
test=test_option,
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
main(args)
| 7,620 | 34.119816 | 86 | py |
STTS | STTS-main/VideoSwin/tools/deployment/publish_model.py | import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,125 | 27.15 | 77 | py |
STTS | STTS-main/VideoSwin/tools/deployment/pytorch2onnx.py | import argparse
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from mmaction.models import build_model
try:
import onnx
import onnxruntime as rt
except ImportError as e:
raise ImportError(f'Please install onnx and onnxruntime first. {e}')
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=1.0.4')
def _convert_batchnorm(module):
"""Convert the syncBNs into normal BN3ds."""
module_output = module
if isinstance(module, torch.nn.SyncBatchNorm):
module_output = torch.nn.BatchNorm3d(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep requires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, _convert_batchnorm(child))
del module
return module_output
def pytorch2onnx(model,
input_shape,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False):
"""Convert pytorch model to onnx model.
Args:
model (:obj:`nn.Module`): The pytorch model to be exported.
input_shape (tuple[int]): The input tensor shape of the model.
opset_version (int): Opset version of onnx used. Default: 11.
show (bool): Determines whether to print the onnx model architecture.
Default: False.
output_file (str): Output onnx model name. Default: 'tmp.onnx'.
verify (bool): Determines whether to verify the onnx model.
Default: False.
"""
model.cpu().eval()
input_tensor = torch.randn(input_shape)
register_extra_symbolics(opset_version)
torch.onnx.export(
model,
input_tensor,
output_file,
export_params=True,
keep_initializers_as_inputs=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# check the numerical value
# get pytorch output
pytorch_result = model(input_tensor)[0].detach().numpy()
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert len(net_feed_input) == 1
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(
None, {net_feed_input[0]: input_tensor.detach().numpy()})[0]
# only compare part of results
random_class = np.random.randint(pytorch_result.shape[1])
assert np.allclose(
pytorch_result[:, random_class], onnx_result[:, random_class]
), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMAction2 models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--is-localizer',
action='store_true',
help='whether it is a localizer')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1, 3, 8, 224, 224],
help='input video size')
parser.add_argument(
'--softmax',
action='store_true',
help='wheter to add softmax layer at the end of recognizers')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
assert args.opset_version == 11, 'MMAction2 only supports opset 11 now'
cfg = mmcv.Config.fromfile(args.config)
# import modules from string list.
if not args.is_localizer:
cfg.model.backbone.pretrained = None
# build the model
model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
model = _convert_batchnorm(model)
# onnx.export does not support kwargs
if hasattr(model, 'forward_dummy'):
from functools import partial
model.forward = partial(model.forward_dummy, softmax=args.softmax)
elif hasattr(model, '_forward') and args.is_localizer:
model.forward = model._forward
else:
raise NotImplementedError(
'Please implement the forward method for exporting.')
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# conver model to onnx file
pytorch2onnx(
model,
args.shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify)
| 5,940 | 33.947059 | 78 | py |
STTS | STTS-main/VideoSwin/tools/misc/clip_feature_extraction.py | import argparse
import os
import os.path as osp
import warnings
from datetime import datetime
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.fileio.io import file_handlers
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.apis import multi_gpu_test, single_gpu_test
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
from mmaction.utils import register_module_hooks
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 clip-level feature extraction')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--video-list', help='video file list')
parser.add_argument('--video-root', help='video root directory')
parser.add_argument(
'--out',
default=None,
help='output result file in pkl/yaml/json format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def turn_off_pretrained(cfg):
# recursively find all pretrained in the model config,
# and set them None to avoid redundant pretrain steps for testing
if 'pretrained' in cfg:
cfg.pretrained = None
# recursively turn off pretrained value
for sub_cfg in cfg.values():
if isinstance(sub_cfg, dict):
turn_off_pretrained(sub_cfg)
def text2tensor(text, size=256):
nums = [ord(x) for x in text]
assert len(nums) < size
nums.extend([0] * (size - len(nums)))
nums = np.array(nums, dtype=np.uint8)
return torch.from_numpy(nums)
def tensor2text(tensor):
# 0 may not occur in a string
chars = [chr(x) for x in tensor if x != 0]
return ''.join(chars)
def inference_pytorch(args, cfg, distributed, data_loader):
"""Get predictions by pytorch models."""
# remove redundant pretrain steps for testing
turn_off_pretrained(cfg.model)
# build the model and load checkpoint
model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
if len(cfg.module_hooks) > 0:
register_module_hooks(model, cfg.module_hooks)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
return outputs
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
if cfg.model['test_cfg'] is None:
cfg.model['test_cfg'] = dict(feature_extraction=True)
else:
cfg.model['test_cfg']['feature_extraction'] = True
# Load output_config from cfg
output_config = cfg.get('output_config', {})
if args.out:
# Overwrite output_config from args.out
output_config = Config._merge_a_into_b(
dict(out=args.out), output_config)
assert output_config, 'Please specify output filename with --out.'
dataset_type = cfg.data.test.type
if output_config.get('out', None):
if 'output_format' in output_config:
# ugly workround to make recognition and localization the same
warnings.warn(
'Skip checking `output_format` in localization task.')
else:
out = output_config['out']
# make sure the dirname of the output path exists
mmcv.mkdir_or_exist(osp.dirname(out))
_, suffix = osp.splitext(out)
assert dataset_type == 'VideoDataset'
assert suffix[1:] in file_handlers, (
'The format of the output '
'file should be json, pickle or yaml')
# set cudnn benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
cfg.data.test.data_prefix = args.video_root
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
size = 256
fname_tensor = torch.zeros(size, dtype=torch.uint8).cuda()
if rank == 0:
videos = open(args.video_list).readlines()
videos = [x.strip() for x in videos]
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
fake_anno = f'fake_anno_{timestamp}.txt'
with open(fake_anno, 'w') as fout:
lines = [x + ' 0' for x in videos]
fout.write('\n'.join(lines))
fname_tensor = text2tensor(fake_anno, size).cuda()
if distributed:
dist.broadcast(fname_tensor.cuda(), src=0)
fname = tensor2text(fname_tensor)
cfg.data.test.ann_file = fname
# The flag is used to register module's hooks
cfg.setdefault('module_hooks', [])
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
dist=distributed,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
outputs = inference_pytorch(args, cfg, distributed, data_loader)
if rank == 0:
if output_config.get('out', None):
out = output_config['out']
print(f'\nwriting results to {out}')
dataset.dump_results(outputs, **output_config)
# remove the temporary file
os.remove(fake_anno)
if __name__ == '__main__':
main()
| 7,541 | 31.934498 | 78 | py |
STTS | STTS-main/VideoSwin/tools/misc/bsn_proposal_generation.py | import argparse
import os
import os.path as osp
import mmcv
import numpy as np
import torch.multiprocessing as mp
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals)
def load_video_infos(ann_file):
"""Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos.
"""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update()
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, pgm_features_thread, **kwargs):
"""Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_features_thread
processes = []
manager = mp.Manager()
feature_return_dict = manager.dict()
kwargs['result_dict'] = feature_return_dict
for tid in range(pgm_features_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_features_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_features_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
for video_name in feature_return_dict.keys():
bsp_feature = feature_return_dict[video_name]
feature_path = osp.join(pgm_features_dir, video_name + '.npy')
np.save(feature_path, bsp_feature)
prog_bar.update()
def parse_args():
parser = argparse.ArgumentParser(description='Proposal generation module')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--mode',
choices=['train', 'test'],
default='test',
help='train or test')
args = parser.parse_args()
return args
def main():
print('Begin Proposal Generation Module')
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
tem_results_dir = cfg.tem_results_dir
pgm_proposals_dir = cfg.pgm_proposals_dir
pgm_features_dir = cfg.pgm_features_dir
if args.mode == 'test':
generate_proposals(cfg.ann_file_val, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, **cfg.pgm_features_test_cfg)
print('\nFinish feature generation')
elif args.mode == 'train':
generate_proposals(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, pgm_features_dir,
**cfg.pgm_features_train_cfg)
print('\nFinish feature generation')
print('Finish Proposal Generation Module')
if __name__ == '__main__':
main()
| 6,626 | 32.469697 | 79 | py |
STTS | STTS-main/VideoSwin/tools/analysis/benchmark.py | import argparse
import time
import torch
from mmcv import Config
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel
from mmcv.runner.fp16_utils import wrap_fp16_model
from mmaction.datasets import build_dataloader, build_dataset
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(
description='MMAction2 benchmark a recognizer')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--log-interval', default=10, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.backbone.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
data_loader = build_dataloader(
dataset,
videos_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
# build the model and load checkpoint
model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
# benchmark with 2000 video and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done video [{i + 1:<3}/ 2000], fps: {fps:.1f} video / s')
if (i + 1) == 200:
pure_inf_time += elapsed
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.1f} video / s')
break
if __name__ == '__main__':
main()
| 2,638 | 27.376344 | 79 | py |
STTS | STTS-main/VideoSwin/tools/data/build_audio_features.py | import argparse
import glob
import os
import os.path as osp
import sys
from multiprocessing import Pool
import mmcv
import numpy as np
from scipy.io import wavfile
try:
import librosa
import lws
except ImportError:
print('Please import librosa, lws first.')
sys.path.append('..')
SILENCE_THRESHOLD = 2
FMIN = 125
FMAX = 7600
FRAME_SHIFT_MS = None
MIN_LEVEL_DB = -100
REF_LEVEL_DB = 20
RESCALING = True
RESCALING_MAX = 0.999
ALLOW_CLIPPING_IN_NORMALIZATION = True
LOG_SCALE_MIN = -32.23619130191664
NORM_AUDIO = True
class AudioTools:
"""All methods related to audio feature extraction. Code Reference:
<https://github.com/r9y9/deepvoice3_pytorch>`_,
<https://pypi.org/project/lws/1.2.6/>`_.
Args:
frame_rate (int): The frame rate per second of the video. Default: 30.
sample_rate (int): The sample rate for audio sampling. Default: 16000.
num_mels (int): Number of channels of the melspectrogram. Default: 80.
fft_size (int): fft_size / sample_rate is window size. Default: 1280.
hop_size (int): hop_size / sample_rate is step size. Default: 320.
"""
def __init__(self,
frame_rate=30,
sample_rate=16000,
num_mels=80,
fft_size=1280,
hop_size=320,
spectrogram_type='lws'):
self.frame_rate = frame_rate
self.sample_rate = sample_rate
self.silence_threshold = SILENCE_THRESHOLD
self.num_mels = num_mels
self.fmin = FMIN
self.fmax = FMAX
self.fft_size = fft_size
self.hop_size = hop_size
self.frame_shift_ms = FRAME_SHIFT_MS
self.min_level_db = MIN_LEVEL_DB
self.ref_level_db = REF_LEVEL_DB
self.rescaling = RESCALING
self.rescaling_max = RESCALING_MAX
self.allow_clipping_in_normalization = ALLOW_CLIPPING_IN_NORMALIZATION
self.log_scale_min = LOG_SCALE_MIN
self.norm_audio = NORM_AUDIO
self.spectrogram_type = spectrogram_type
assert spectrogram_type in ['lws', 'librosa']
def load_wav(self, path):
"""Load an audio file into numpy array."""
return librosa.core.load(path, sr=self.sample_rate)[0]
@staticmethod
def audio_normalize(samples, desired_rms=0.1, eps=1e-4):
"""RMS normalize the audio data."""
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples
def generate_spectrogram_magphase(self, audio, with_phase=False):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that D = S * P.
Args:
audio (np.ndarray): The input audio signal.
with_phase (bool): Determines whether to output the
phase components. Default: False.
Returns:
np.ndarray: magnitude and phase component of the complex-valued
spectrogram.
"""
spectro = librosa.core.stft(
audio,
hop_length=self.get_hop_size(),
n_fft=self.fft_size,
center=True)
spectro_mag, spectro_phase = librosa.core.magphase(spectro)
spectro_mag = np.expand_dims(spectro_mag, axis=0)
if with_phase:
spectro_phase = np.expand_dims(np.angle(spectro_phase), axis=0)
return spectro_mag, spectro_phase
return spectro_mag
def save_wav(self, wav, path):
"""Save the wav to disk."""
# 32767 = (2 ^ 15 - 1) maximum of int16
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, self.sample_rate, wav.astype(np.int16))
def trim(self, quantized):
"""Trim the audio wavfile."""
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end]
def adjust_time_resolution(self, quantized, mel):
"""Adjust time resolution by repeating features.
Args:
quantized (np.ndarray): (T,)
mel (np.ndarray): (N, D)
Returns:
tuple: Tuple of (T,) and (T, D)
"""
assert quantized.ndim == 1
assert mel.ndim == 2
upsample_factor = quantized.size // mel.shape[0]
mel = np.repeat(mel, upsample_factor, axis=0)
n_pad = quantized.size - mel.shape[0]
if n_pad != 0:
assert n_pad > 0
mel = np.pad(
mel, [(0, n_pad), (0, 0)], mode='constant', constant_values=0)
# trim
start, end = self.start_and_end_indices(quantized,
self.silence_threshold)
return quantized[start:end], mel[start:end, :]
@staticmethod
def start_and_end_indices(quantized, silence_threshold=2):
"""Trim the audio file when reaches the silence threshold."""
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end
def melspectrogram(self, y):
"""Generate the melspectrogram."""
D = self._lws_processor().stft(y).T
S = self._amp_to_db(self._linear_to_mel(np.abs(D))) - self.ref_level_db
if not self.allow_clipping_in_normalization:
assert S.max() <= 0 and S.min() - self.min_level_db >= 0
return self._normalize(S)
def get_hop_size(self):
"""Calculate the hop size."""
hop_size = self.hop_size
if hop_size is None:
assert self.frame_shift_ms is not None
hop_size = int(self.frame_shift_ms / 1000 * self.sample_rate)
return hop_size
def _lws_processor(self):
"""Perform local weighted sum.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
return lws.lws(self.fft_size, self.get_hop_size(), mode='speech')
@staticmethod
def lws_num_frames(length, fsize, fshift):
"""Compute number of time frames of lws spectrogram.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def lws_pad_lr(self, x, fsize, fshift):
"""Compute left and right padding lws internally uses.
Please refer to <https://pypi.org/project/lws/1.2.6/>`_.
"""
M = self.lws_num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
def _linear_to_mel(self, spectrogram):
"""Warp linear scale spectrograms to the mel scale.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
global _mel_basis
_mel_basis = self._build_mel_basis()
return np.dot(_mel_basis, spectrogram)
def _build_mel_basis(self):
"""Build mel filters.
Please refer to <https://github.com/r9y9/deepvoice3_pytorch>`_
"""
assert self.fmax <= self.sample_rate // 2
return librosa.filters.mel(
self.sample_rate,
self.fft_size,
fmin=self.fmin,
fmax=self.fmax,
n_mels=self.num_mels)
def _amp_to_db(self, x):
min_level = np.exp(self.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
@staticmethod
def _db_to_amp(x):
return np.power(10.0, x * 0.05)
def _normalize(self, S):
return np.clip((S - self.min_level_db) / -self.min_level_db, 0, 1)
def _denormalize(self, S):
return (np.clip(S, 0, 1) * -self.min_level_db) + self.min_level_db
def read_audio(self, audio_path):
wav = self.load_wav(audio_path)
if self.norm_audio:
wav = self.audio_normalize(wav)
else:
wav = wav / np.abs(wav).max()
return wav
def audio_to_spectrogram(self, wav):
if self.spectrogram_type == 'lws':
spectrogram = self.melspectrogram(wav).astype(np.float32).T
elif self.spectrogram_type == 'librosa':
spectrogram = self.generate_spectrogram_magphase(wav)
return spectrogram
def extract_audio_feature(wav_path, audio_tools, mel_out_dir):
file_name, _ = osp.splitext(osp.basename(wav_path))
# Write the spectrograms to disk:
mel_filename = os.path.join(mel_out_dir, file_name + '.npy')
if not os.path.exists(mel_filename):
try:
wav = audio_tools.read_audio(wav_path)
spectrogram = audio_tools.audio_to_spectrogram(wav)
np.save(
mel_filename,
spectrogram.astype(np.float32),
allow_pickle=False)
except BaseException:
print(f'Read audio [{wav_path}] failed.')
if __name__ == '__main__':
audio_tools = AudioTools(
fft_size=512, hop_size=256) # window_size:32ms hop_size:16ms
parser = argparse.ArgumentParser()
parser.add_argument('audio_home_path', type=str)
parser.add_argument('spectrogram_save_path', type=str)
parser.add_argument('--level', type=int, default=1)
parser.add_argument('--ext', default='.m4a')
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--part', type=str, default='1/1')
args = parser.parse_args()
mmcv.mkdir_or_exist(args.spectrogram_save_path)
files = glob.glob(
osp.join(args.audio_home_path, '*/' * args.level, '*' + args.ext))
print(f'found {len(files)} files.')
files = sorted(files)
if args.part is not None:
[this_part, num_parts] = [int(i) for i in args.part.split('/')]
part_len = len(files) // num_parts
p = Pool(args.num_workers)
for file in files[part_len * (this_part - 1):(
part_len * this_part) if this_part != num_parts else len(files)]:
p.apply_async(
extract_audio_feature,
args=(file, audio_tools, args.spectrogram_save_path))
p.close()
p.join()
| 10,532 | 32.438095 | 79 | py |
STTS | STTS-main/VideoSwin/tools/data/activitynet/tsn_feature_extraction.py | import argparse
import os
import os.path as osp
import pickle
import mmcv
import numpy as np
import torch
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Extract TSN Feature')
parser.add_argument('--data-prefix', default='', help='dataset prefix')
parser.add_argument('--output-prefix', default='', help='output prefix')
parser.add_argument(
'--data-list',
help='video list of the dataset, the format should be '
'`frame_dir num_frames output_file`')
parser.add_argument(
'--frame-interval',
type=int,
default=16,
help='the sampling frequency of frame in the untrimed video')
parser.add_argument('--modality', default='RGB', choices=['RGB', 'Flow'])
parser.add_argument('--ckpt', help='checkpoint for feature extraction')
parser.add_argument(
'--part',
type=int,
default=0,
help='which part of dataset to forward(alldata[part::total])')
parser.add_argument(
'--total', type=int, default=1, help='how many parts exist')
args = parser.parse_args()
return args
def main():
args = parse_args()
args.is_rgb = args.modality == 'RGB'
args.clip_len = 1 if args.is_rgb else 5
args.input_format = 'NCHW' if args.is_rgb else 'NCHW_Flow'
rgb_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=False)
flow_norm_cfg = dict(mean=[128, 128], std=[128, 128])
args.img_norm_cfg = rgb_norm_cfg if args.is_rgb else flow_norm_cfg
args.f_tmpl = 'img_{:05d}.jpg' if args.is_rgb else 'flow_{}_{:05d}.jpg'
args.in_channels = args.clip_len * (3 if args.is_rgb else 2)
# max batch_size for one forward
args.batch_size = 200
# define the data pipeline for Untrimmed Videos
data_pipeline = [
dict(
type='UntrimmedSampleFrames',
clip_len=args.clip_len,
frame_interval=args.frame_interval,
start_index=0),
dict(type='FrameSelector'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=256),
dict(type='Normalize', **args.img_norm_cfg),
dict(type='FormatShape', input_format=args.input_format),
dict(type='Collect', keys=['imgs'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data_pipeline = Compose(data_pipeline)
# define TSN R50 model, the model is used as the feature extractor
model_cfg = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
depth=50,
in_channels=args.in_channels,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=200,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1)),
test_cfg=dict(average_clips=None))
model = build_model(model_cfg)
# load pretrained weight into the feature extractor
state_dict = torch.load(args.ckpt)['state_dict']
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
data = open(args.data_list).readlines()
data = [x.strip() for x in data]
data = data[args.part::args.total]
# enumerate Untrimmed videos, extract feature from each of them
prog_bar = mmcv.ProgressBar(len(data))
if not osp.exists(args.output_prefix):
os.system(f'mkdir -p {args.output_prefix}')
for item in data:
frame_dir, length, _ = item.split()
output_file = osp.basename(frame_dir) + '.pkl'
frame_dir = osp.join(args.data_prefix, frame_dir)
output_file = osp.join(args.output_prefix, output_file)
assert output_file.endswith('.pkl')
length = int(length)
# prepare a psuedo sample
tmpl = dict(
frame_dir=frame_dir,
total_frames=length,
filename_tmpl=args.f_tmpl,
start_index=0,
modality=args.modality)
sample = data_pipeline(tmpl)
imgs = sample['imgs']
shape = imgs.shape
# the original shape should be N_seg * C * H * W, resize it to N_seg *
# 1 * C * H * W so that the network return feature of each frame (No
# score average among segments)
imgs = imgs.reshape((shape[0], 1) + shape[1:])
imgs = imgs.cuda()
def forward_data(model, data):
# chop large data into pieces and extract feature from them
results = []
start_idx = 0
num_clip = data.shape[0]
while start_idx < num_clip:
with torch.no_grad():
part = data[start_idx:start_idx + args.batch_size]
feat = model.forward(part, return_loss=False)
results.append(feat)
start_idx += args.batch_size
return np.concatenate(results)
feat = forward_data(model, imgs)
with open(output_file, 'wb') as fout:
pickle.dump(feat, fout)
prog_bar.update()
if __name__ == '__main__':
main()
| 5,206 | 33.946309 | 78 | py |
STTS | STTS-main/VideoSwin/mmcv_custom/runner/checkpoint.py | # Copyright (c) Open-MMLab. All rights reserved.
import apex
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
def save_checkpoint(model, filename, optimizer=None, meta=None, amp=False):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
if amp:
checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| 2,843 | 34.111111 | 77 | py |
STTS | STTS-main/VideoSwin/mmcv_custom/runner/epoch_based_runner.py | # Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import time
import warnings
import torch
import mmcv
from .checkpoint import save_checkpoint
import apex
from torch.optim import Optimizer
import os
class EpochBasedRunnerAmp(mmcv.runner.EpochBasedRunner):
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None,
amp=False):
super().__init__(
model,
batch_processor,
optimizer,
work_dir,
logger,
meta,
max_iters,
max_epochs)
self.amp = amp
def run_iter(self, data_batch, train_mode, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer,
**kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
def train(self, cur_epoch, total_epochs, data_loader, **kwargs):
self.model.train()
if hasattr(self.model, "module"):
model_noddp = self.model.module
else:
model_noddp = self.model
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = self._max_epochs * len(self.data_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
data_size = len(self.data_loader)
total_steps = total_epochs * data_size
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
cur_step = cur_epoch * data_size + i
if hasattr(model_noddp, 'update_sigma'):
model_noddp.update_sigma(cur_step, total_steps)
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, **kwargs)
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
@torch.no_grad()
def val(self, cur_epoch, total_epochs, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
self.run_iter(data_batch, train_mode=False)
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch')
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
for i, flow in enumerate(workflow):
mode, epochs = flow
if mode == 'train':
self._max_iters = self._max_epochs * len(data_loaders[i])
break
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
#self.logger.info('Start running, host: %s, work_dir: %s',
# get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
while self.epoch < self._max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
'mode in workflow must be a str, but got {}'.format(
type(mode)))
for cur_epoch in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(cur_epoch, epochs, data_loaders[i], **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
def save_checkpoint(self,
out_dir,
filename_tmpl='epoch_{}.pth',
save_optimizer=True,
meta=None,
create_symlink=True):
"""Save the checkpoint.
Args:
out_dir (str): The directory that checkpoints are saved.
filename_tmpl (str, optional): The checkpoint filename template,
which contains a placeholder for the epoch number.
Defaults to 'epoch_{}.pth'.
save_optimizer (bool, optional): Whether to save the optimizer to
the checkpoint. Defaults to True.
meta (dict, optional): The meta information to be saved in the
checkpoint. Defaults to None.
create_symlink (bool, optional): Whether to create a symlink
"latest.pth" to point to the latest checkpoint.
Defaults to True.
"""
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
elif isinstance(meta, dict):
meta.update(epoch=self.epoch + 1, iter=self.iter)
else:
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta, amp=self.amp)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
try:
mmcv.symlink(filename, dst_file)
except:
shutil.copy(filepath, dst_file)
def resume(self,
checkpoint,
resume_optimizer=True,
map_location='cpu',
resume_amp=False):
if map_location == 'default':
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(
checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(
checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if 'optimizer' in checkpoint and resume_optimizer:
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(
checkpoint['optimizer'][k])
else:
raise TypeError(
'Optimizer should be dict or torch.optim.Optimizer '
f'but got {type(self.optimizer)}')
if 'amp' in checkpoint and resume_amp:
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
del checkpoint
torch.cuda.empty_cache()
def auto_resume(self):
linkname = osp.join(self.work_dir, 'latest.pth')
if osp.exists(linkname):
self.logger.info('latest checkpoint found')
self.resume(linkname)
| 9,764 | 38.216867 | 91 | py |
STTS | STTS-main/VideoSwin/mmaction/apis/inference.py | import os
import os.path as osp
import re
from operator import itemgetter
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmaction.core import OutputHook
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_recognizer
def init_recognizer(config,
checkpoint=None,
device='cuda:0',
use_frames=False):
"""Initialize a recognizer from config file.
Args:
config (str | :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Default: None.
device (str | :obj:`torch.device`): The desired device of returned
tensor. Default: 'cuda:0'.
use_frames (bool): Whether to use rawframes as input. Default:False.
Returns:
nn.Module: The constructed recognizer.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if ((use_frames and config.dataset_type != 'RawframeDataset')
or (not use_frames and config.dataset_type != 'VideoDataset')):
input_type = 'rawframes' if use_frames else 'video'
raise RuntimeError('input data type should be consist with the '
f'dataset type in config, but got input type '
f"'{input_type}' and dataset type "
f"'{config.dataset_type}'")
# pretrained model is unnecessary since we directly load checkpoint later
config.model.backbone.pretrained = None
model = build_recognizer(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
load_checkpoint(model, checkpoint, map_location=device)
model.cfg = config
model.to(device)
model.eval()
return model
def inference_recognizer(model,
video_path,
label_path,
use_frames=False,
outputs=None,
as_tensor=True):
"""Inference a video with the detector.
Args:
model (nn.Module): The loaded recognizer.
video_path (str): The video file path/url or the rawframes directory
path. If ``use_frames`` is set to True, it should be rawframes
directory path. Otherwise, it should be video file path.
label_path (str): The label file path.
use_frames (bool): Whether to use rawframes as input. Default:False.
outputs (list(str) | tuple(str) | str | None) : Names of layers whose
outputs need to be returned, default: None.
as_tensor (bool): Same as that in ``OutputHook``. Default: True.
Returns:
dict[tuple(str, float)]: Top-5 recognition result dict.
dict[torch.tensor | np.ndarray]:
Output feature maps from layers specified in `outputs`.
"""
if not (osp.exists(video_path) or video_path.startswith('http')):
raise RuntimeError(f"'{video_path}' is missing")
if osp.isfile(video_path) and use_frames:
raise RuntimeError(
f"'{video_path}' is a video file, not a rawframe directory")
if osp.isdir(video_path) and not use_frames:
raise RuntimeError(
f"'{video_path}' is a rawframe directory, not a video file")
if isinstance(outputs, str):
outputs = (outputs, )
assert outputs is None or isinstance(outputs, (tuple, list))
cfg = model.cfg
device = next(model.parameters()).device # model device
# construct label map
with open(label_path, 'r') as f:
label = [line.strip() for line in f]
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = Compose(test_pipeline)
# prepare data
if use_frames:
filename_tmpl = cfg.data.test.get('filename_tmpl', 'img_{:05}.jpg')
modality = cfg.data.test.get('modality', 'RGB')
start_index = cfg.data.test.get('start_index', 1)
# count the number of frames that match the format of `filename_tmpl`
# RGB pattern example: img_{:05}.jpg -> ^img_\d+.jpg$
# Flow patteren example: {}_{:05d}.jpg -> ^x_\d+.jpg$
pattern = f'^{filename_tmpl}$'
if modality == 'Flow':
pattern = pattern.replace('{}', 'x')
pattern = pattern.replace(
pattern[pattern.find('{'):pattern.find('}') + 1], '\\d+')
total_frames = len(
list(
filter(lambda x: re.match(pattern, x) is not None,
os.listdir(video_path))))
data = dict(
frame_dir=video_path,
total_frames=total_frames,
label=-1,
start_index=start_index,
filename_tmpl=filename_tmpl,
modality=modality)
else:
start_index = cfg.data.test.get('start_index', 0)
data = dict(
filename=video_path,
label=-1,
start_index=start_index,
modality='RGB')
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with OutputHook(model, outputs=outputs, as_tensor=as_tensor) as h:
with torch.no_grad():
scores = model(return_loss=False, **data)[0]
returned_features = h.layer_outputs if outputs else None
score_tuples = tuple(zip(label, scores))
score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)
top5_label = score_sorted[:5]
if outputs:
return top5_label, returned_features
return top5_label
| 5,977 | 37.076433 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/apis/test.py | import os.path as osp
import pickle
import shutil
import tempfile
# TODO import test functions from mmcv and delete them from mmaction2
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
try:
from mmcv.engine import (single_gpu_test, multi_gpu_test,
collect_results_gpu, collect_results_cpu)
from_mmcv = True
except (ImportError, ModuleNotFoundError):
warnings.warn(
'DeprecationWarning: single_gpu_test, multi_gpu_test, '
'collect_results_cpu, collect_results_gpu from mmaction2 will be '
'deprecated. Please install mmcv through master branch.')
from_mmcv = False
if not from_mmcv:
def single_gpu_test(model, data_loader): # noqa: F811
"""Test model with a single gpu.
This method tests model with a single gpu and
displays test progress bar.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test( # noqa: F811
model, data_loader, tmpdir=None, gpu_collect=True):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting
'gpu_collect=True' it encodes results to gpu tensors and use gpu
communication for results collection. On cpu mode it saves the results
on different gpus to 'tmpdir' and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. Default: None
gpu_collect (bool): Option to use either gpu or cpu to collect
results. Default: True
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
if rank == 0:
# use the first key as main key to calculate the batch size
batch_size = len(next(iter(data.values())))
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None): # noqa: F811
"""Collect results in cpu mode.
It saves the results on different gpus to 'tmpdir' and collects
them by the rank 0 worker.
Args:
result_part (list): Results to be collected
size (int): Result size.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. Default: None
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()),
dtype=torch.uint8,
device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# synchronizes all processes to make sure tmpdir exist
dist.barrier()
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
# synchronizes all processes for loding pickle file
dist.barrier()
# collect all parts
if rank != 0:
return None
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size): # noqa: F811
"""Collect results in gpu mode.
It encodes results to gpu tensors and use gpu communication for results
collection.
Args:
result_part (list): Results to be collected
size (int): Result size.
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)),
dtype=torch.uint8,
device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
return None
| 7,577 | 35.965854 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/apis/train.py | import copy as cp
import os.path as osp
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, OptimizerHook,
build_optimizer, get_dist_info)
from mmcv.runner.hooks import Fp16OptimizerHook
from ..core import (DistEvalHook, EvalHook, OmniSourceDistSamplerSeedHook,
OmniSourceRunner)
from ..datasets import build_dataloader, build_dataset
from ..utils import PreciseBNHook, get_root_logger
from .test import multi_gpu_test
from mmcv_custom.runner import EpochBasedRunnerAmp
import apex
import os.path as osp
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
test=dict(test_best=False, test_last=False),
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
test (dict): The testing option, with two keys: test_last & test_best.
The value is True or False, indicating whether to test the
corresponding checkpoint.
Default: dict(test_best=False, test_last=False).
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'optimizer_config' not in cfg:
cfg.optimizer_config={}
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1) // cfg.optimizer_config.get('update_interval', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('train_dataloader', {}))
if cfg.omnisource:
# The option can override videos_per_gpu
train_ratio = cfg.data.get('train_ratio', [1] * len(dataset))
omni_videos_per_gpu = cfg.data.get('omni_videos_per_gpu', None)
if omni_videos_per_gpu is None:
dataloader_settings = [dataloader_setting] * len(dataset)
else:
dataloader_settings = []
for videos_per_gpu in omni_videos_per_gpu:
this_setting = cp.deepcopy(dataloader_setting)
this_setting['videos_per_gpu'] = videos_per_gpu
dataloader_settings.append(this_setting)
data_loaders = [
build_dataloader(ds, **setting)
for ds, setting in zip(dataset, dataloader_settings)
]
else:
data_loaders = [
build_dataloader(ds, **dataloader_setting) for ds in dataset
]
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
# use apex fp16 optimizer
# Noticed that this is just a temporary patch. We shoud not encourage this kind of code style
use_amp = False
if (
cfg.optimizer_config.get("type", None)
and cfg.optimizer_config["type"] == "DistOptimizerHook"
):
if cfg.optimizer_config.get("use_fp16", False):
model, optimizer = apex.amp.initialize(
model.cuda(), optimizer, opt_level="O1"
)
for m in model.modules():
if hasattr(m, "fp16_enabled"):
m.fp16_enabled = True
use_amp = True
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if use_amp:
Runner = EpochBasedRunnerAmp
runner = Runner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta,
amp=use_amp)
else:
Runner = OmniSourceRunner if cfg.omnisource else EpochBasedRunner
runner = Runner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
if cfg.omnisource:
runner.register_hook(OmniSourceDistSamplerSeedHook())
else:
runner.register_hook(DistSamplerSeedHook())
# precise bn setting
if cfg.get('precise_bn', False):
precise_bn_dataset = build_dataset(cfg.data.train)
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=0, # save memory and time
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed)
data_loader_precise_bn = build_dataloader(precise_bn_dataset,
**dataloader_setting)
precise_bn_hook = PreciseBNHook(data_loader_precise_bn,
**cfg.get('precise_bn'))
runner.register_hook(precise_bn_hook)
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('val_dataloader', {}))
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from, resume_amp=use_amp)
elif cfg.get("auto_resume", False) and osp.exists(osp.join(runner.work_dir, 'latest.pth')):
runner.auto_resume()
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner_kwargs = dict()
if cfg.omnisource:
runner_kwargs = dict(train_ratio=train_ratio)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)
if test['test_last'] or test['test_best']:
best_ckpt_path = None
if test['test_best']:
if hasattr(eval_hook, 'best_ckpt_path'):
best_ckpt_path = eval_hook.best_ckpt_path
if best_ckpt_path is None or not osp.exists(best_ckpt_path):
test['test_best'] = False
if best_ckpt_path is None:
runner.logger.info('Warning: test_best set as True, but '
'is not applicable '
'(eval_hook.best_ckpt_path is None)')
else:
runner.logger.info('Warning: test_best set as True, but '
'is not applicable (best_ckpt '
f'{best_ckpt_path} not found)')
if not test['test_last']:
return
test_dataset = build_dataset(cfg.data.test, dict(test_mode=True))
gpu_collect = cfg.get('evaluation', {}).get('gpu_collect', False)
tmpdir = cfg.get('evaluation', {}).get('tmpdir',
osp.join(cfg.work_dir, 'tmp'))
dataloader_setting = dict(
videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
num_gpus=len(cfg.gpu_ids),
dist=distributed,
shuffle=False)
dataloader_setting = dict(dataloader_setting,
**cfg.data.get('test_dataloader', {}))
test_dataloader = build_dataloader(test_dataset, **dataloader_setting)
names, ckpts = [], []
if test['test_last']:
names.append('last')
ckpts.append(None)
if test['test_best']:
names.append('best')
ckpts.append(best_ckpt_path)
for name, ckpt in zip(names, ckpts):
if ckpt is not None:
runner.load_checkpoint(ckpt)
outputs = multi_gpu_test(runner.model, test_dataloader, tmpdir,
gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
out = osp.join(cfg.work_dir, f'{name}_pred.pkl')
test_dataset.dump_results(outputs, out)
eval_cfg = cfg.get('evaluation', {})
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect',
'save_best', 'rule', 'by_epoch', 'broadcast_bn_buffers'
]:
eval_cfg.pop(key, None)
eval_res = test_dataset.evaluate(outputs, **eval_cfg)
runner.logger.info(f'Testing results of the {name} checkpoint')
for metric_name, val in eval_res.items():
runner.logger.info(f'{metric_name}: {val:.04f}')
| 10,658 | 39.222642 | 107 | py |
STTS | STTS-main/VideoSwin/mmaction/core/evaluation/eval_hooks.py | import os
import os.path as osp
import warnings
from math import inf
import torch.distributed as dist
from torch.nn.modules.batchnorm import _BatchNorm
from torch.utils.data import DataLoader
try:
from mmcv.runner import EvalHook as BasicEvalHook
from mmcv.runner import DistEvalHook as BasicDistEvalHook
from_mmcv = True
class EvalHook(BasicEvalHook):
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self, *args, save_best='auto', **kwargs):
super().__init__(*args, save_best=save_best, **kwargs)
class DistEvalHook(BasicDistEvalHook):
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self, *args, save_best='auto', **kwargs):
super().__init__(*args, save_best=save_best, **kwargs)
except (ImportError, ModuleNotFoundError):
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook in mmaction2 '
'will be deprecated, please install mmcv through master '
'branch.')
from_mmcv = False
if not from_mmcv:
from mmcv.runner import Hook
class EvalHook(Hook): # noqa: F811
"""Non-Distributed evaluation hook.
Notes:
If new arguments are added for EvalHook, tools/test.py,
tools/eval_metric.py may be effected.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the
resuming epoch. If None, whether to evaluate is merely decided
by ``interval``. Default: None.
interval (int): Evaluation interval. Default: 1.
by_epoch (bool): Determine perform evaluation by epoch or by
iteration. If set to True, it will perform by epoch.
Otherwise, by iteration. default: True.
save_best (str | None, optional): If a metric is specified, it
would measure the best checkpoint during evaluation. The
information about best checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and
VideoDataset). ``AR@AN``, ``auc`` for action localization
dataset. (ActivityNetDataset). ``mAP@0.5IOU`` for
spatio-temporal action detection dataset (AVADataset).
If ``save_best`` is ``auto``, the first key of the returned
``OrderedDict`` result will be used. Default: 'auto'.
rule (str | None, optional): Comparison rule for best score.
If set to None, it will infer a reasonable rule. Keys such as
'acc', 'top' .etc will be inferred by 'greater' rule. Keys
contain 'loss' will be inferred by 'less' rule. Options are
'greater', 'less', None. Default: None.
**eval_kwargs: Evaluation arguments fed into the evaluate function
of the dataset.
"""
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
init_value_map = {'greater': -inf, 'less': inf}
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best='auto',
rule=None,
**eval_kwargs):
if 'key_indicator' in eval_kwargs:
raise RuntimeError(
'"key_indicator" is deprecated, '
'you need to use "save_best" instead. '
'See https://github.com/open-mmlab/mmaction2/pull/395 '
'for more info')
if not isinstance(dataloader, DataLoader):
raise TypeError(f'dataloader must be a pytorch DataLoader, '
f'but got {type(dataloader)}')
if interval <= 0:
raise ValueError(
f'interval must be positive, but got {interval}')
assert isinstance(by_epoch, bool)
if start is not None and start < 0:
warnings.warn(
f'The evaluation start epoch {start} is smaller than 0, '
f'use 0 instead', UserWarning)
start = 0
self.dataloader = dataloader
self.interval = interval
self.start = start
self.by_epoch = by_epoch
assert isinstance(save_best, str) or save_best is None
self.save_best = save_best
self.eval_kwargs = eval_kwargs
self.initial_flag = True
if self.save_best is not None:
self.best_ckpt_path = None
self._init_rule(rule, self.save_best)
def _init_rule(self, rule, key_indicator):
"""Initialize rule, key_indicator, comparison_func, and best score.
Args:
rule (str | None): Comparison rule for best score.
key_indicator (str | None): Key indicator to determine the
comparison rule.
"""
if rule not in self.rule_map and rule is not None:
raise KeyError(f'rule must be greater, less or None, '
f'but got {rule}.')
if rule is None:
if key_indicator != 'auto':
if any(key in key_indicator for key in self.greater_keys):
rule = 'greater'
elif any(key in key_indicator for key in self.less_keys):
rule = 'less'
else:
raise ValueError(
f'Cannot infer the rule for key '
f'{key_indicator}, thus a specific rule '
f'must be specified.')
self.rule = rule
self.key_indicator = key_indicator
if self.rule is not None:
self.compare_func = self.rule_map[self.rule]
def before_run(self, runner):
if self.save_best is not None:
if runner.meta is None:
warnings.warn('runner.meta is None. Creating a empty one.')
runner.meta = dict()
runner.meta.setdefault('hook_msgs', dict())
def before_train_iter(self, runner):
"""Evaluate the model only at the start of training by
iteration."""
if self.by_epoch:
return
if not self.initial_flag:
return
if self.start is not None and runner.iter >= self.start:
self.after_train_iter(runner)
self.initial_flag = False
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
if not self.by_epoch:
return
if not self.initial_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_flag = False
def after_train_iter(self, runner):
"""Called after every training iter to evaluate the results."""
if not self.by_epoch:
self._do_evaluate(runner)
def after_train_epoch(self, runner):
"""Called after every training epoch to evaluate the results."""
if self.by_epoch:
self._do_evaluate(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self.evaluation_flag(runner):
return
from mmaction.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
def evaluation_flag(self, runner):
"""Judge whether to perform_evaluation.
Returns:
bool: The flag indicating whether to perform evaluation.
"""
if self.by_epoch:
current = runner.epoch
check_time = self.every_n_epochs
else:
current = runner.iter
check_time = self.every_n_iters
if self.start is None:
if not check_time(runner, self.interval):
# No evaluation during the interval.
return False
elif (current + 1) < self.start:
# No evaluation if start is larger than the current time.
return False
else:
# Evaluation only at epochs/iters 3, 5, 7...
# if start==3 and interval==2
if (current + 1 - self.start) % self.interval:
return False
return True
def _save_ckpt(self, runner, key_score):
if self.by_epoch:
current = f'epoch_{runner.epoch + 1}'
cur_type, cur_time = 'epoch', runner.epoch + 1
else:
current = f'iter_{runner.iter + 1}'
cur_type, cur_time = 'iter', runner.iter + 1
best_score = runner.meta['hook_msgs'].get(
'best_score', self.init_value_map[self.rule])
if self.compare_func(key_score, best_score):
best_score = key_score
runner.meta['hook_msgs']['best_score'] = best_score
if self.best_ckpt_path and osp.isfile(self.best_ckpt_path):
os.remove(self.best_ckpt_path)
best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
runner.save_checkpoint(
runner.work_dir, best_ckpt_name, create_symlink=False)
self.best_ckpt_path = osp.join(runner.work_dir, best_ckpt_name)
runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
runner.logger.info(
f'Now best checkpoint is saved as {best_ckpt_name}.')
runner.logger.info(
f'Best {self.key_indicator} is {best_score:0.4f} '
f'at {cur_time} {cur_type}.')
def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]
return None
class DistEvalHook(EvalHook): # noqa: F811
"""Distributed evaluation hook.
This hook will regularly perform evaluation in a given interval when
performing in distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the
resuming epoch. If None, whether to evaluate is merely decided
by ``interval``. Default: None.
interval (int): Evaluation interval. Default: 1.
by_epoch (bool): Determine perform evaluation by epoch or by
iteration. If set to True, it will perform by epoch. Otherwise,
by iteration. default: True.
save_best (str | None, optional): If a metric is specified, it
would measure the best checkpoint during evaluation. The
information about best checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and
VideoDataset). ``AR@AN``, ``auc`` for action localization
dataset (ActivityNetDataset). ``mAP@0.5IOU`` for
spatio-temporal action detection dataset (AVADataset).
If ``save_best`` is ``auto``, the first key of the returned
``OrderedDict`` result will be used. Default: 'auto'.
rule (str | None, optional): Comparison rule for best score. If
set to None, it will infer a reasonable rule. Keys such as
'acc', 'top' .etc will be inferred by 'greater' rule. Keys
contain 'loss' will be inferred by 'less' rule. Options are
'greater', 'less', None. Default: None.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
broadcast_bn_buffer (bool): Whether to broadcast the
buffer(running_mean and running_var) of rank 0 to other rank
before evaluation. Default: True.
**eval_kwargs: Evaluation arguments fed into the evaluate function
of the dataset.
"""
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best='auto',
rule=None,
broadcast_bn_buffer=True,
tmpdir=None,
gpu_collect=False,
**eval_kwargs):
super().__init__(
dataloader,
start=start,
interval=interval,
by_epoch=by_epoch,
save_best=save_best,
rule=rule,
**eval_kwargs)
self.broadcast_bn_buffer = broadcast_bn_buffer
self.tmpdir = tmpdir
self.gpu_collect = gpu_collect
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for _, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self.evaluation_flag(runner):
return
from mmaction.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 16,695 | 41.700767 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/core/bbox/bbox_target.py | import torch
import torch.nn.functional as F
def bbox_target(pos_bboxes_list, neg_bboxes_list, gt_labels, cfg):
"""Generate classification targets for bboxes.
Args:
pos_bboxes_list (list[Tensor]): Positive bboxes list.
neg_bboxes_list (list[Tensor]): Negative bboxes list.
gt_labels (list[Tensor]): Groundtruth classification label list.
cfg (Config): RCNN config.
Returns:
(Tensor, Tensor): Label and label_weight for bboxes.
"""
labels, label_weights = [], []
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
assert len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels)
length = len(pos_bboxes_list)
for i in range(length):
pos_bboxes = pos_bboxes_list[i]
neg_bboxes = neg_bboxes_list[i]
gt_label = gt_labels[i]
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
label = F.pad(gt_label, (0, 0, 0, num_neg))
label_weight = pos_bboxes.new_zeros(num_samples)
label_weight[:num_pos] = pos_weight
label_weight[-num_neg:] = 1.
labels.append(label)
label_weights.append(label_weight)
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
return labels, label_weights
| 1,334 | 30.785714 | 73 | py |
STTS | STTS-main/VideoSwin/mmaction/core/bbox/assigners/max_iou_assigner_ava.py | import torch
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import AssignResult, MaxIoUAssigner
from mmdet.core.bbox.builder import BBOX_ASSIGNERS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@BBOX_ASSIGNERS.register_module()
class MaxIoUAssignerAVA(MaxIoUAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float | tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each
gt). Default: 0.
gt_max_assign_all (bool): Whether to assign all bboxes with the
same highest overlap with some gt to that gt. Default: True.
"""
# The function is overriden, to handle the case that gt_label is not
# int
def assign_wrt_overlaps(self, overlaps, gt_labels=None):
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor, optional): Labels of k gt_bboxes, shape
(k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwirte the assigned_gt_inds
# assigned in Step 3. Thus, the assigned gt might not be the
# best one for prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox
# 1 & 2, bbox 1 will be assigned as the best target for bbox A
# in step 3. However, if GT bbox 2's gt_argmax_overlaps = A,
# bbox A's assigned_gt_inds will be overwritten to be bbox B.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
if gt_labels is not None:
# consider multi-class case (AVA)
assert len(gt_labels[0]) > 1
assigned_labels = assigned_gt_inds.new_zeros(
(num_bboxes, len(gt_labels[0])), dtype=torch.float32)
# If not assigned, labels will be all 0
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
else:
# define an empty class, so that can be imported
@import_module_error_class('mmdet')
class MaxIoUAssignerAVA:
pass
| 6,032 | 42.402878 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/core/runner/omnisource_runner.py | # Copyright (c) Open-MMLab. All rights reserved.
import time
import warnings
import mmcv
from mmcv.runner import EpochBasedRunner, Hook
from mmcv.runner.utils import get_host_info
def cycle(iterable):
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable)
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch wraps the sampler as its attributes.
data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
class OmniSourceRunner(EpochBasedRunner):
"""OmniSource Epoch-based Runner.
This runner train models epoch by epoch, the epoch length is defined by the
dataloader[0], which is the main dataloader.
"""
def run_iter(self, data_batch, train_mode, source, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer,
**kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
# Since we have multiple sources, we add a suffix to log_var names,
# so that we can differentiate them.
if 'log_vars' in outputs:
log_vars = outputs['log_vars']
log_vars = {k + source: v for k, v in log_vars.items()}
self.log_buffer.update(log_vars, outputs['num_samples'])
self.outputs = outputs
def train(self, data_loaders, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loaders = data_loaders
self.main_loader = self.data_loaders[0]
# Add aliasing
self.data_loader = self.main_loader
self.aux_loaders = self.data_loaders[1:]
self.aux_iters = [cycle(loader) for loader in self.aux_loaders]
auxiliary_iter_times = [1] * len(self.aux_loaders)
use_aux_per_niter = 1
if 'train_ratio' in kwargs:
train_ratio = kwargs.pop('train_ratio')
use_aux_per_niter = train_ratio[0]
auxiliary_iter_times = train_ratio[1:]
self._max_iters = self._max_epochs * len(self.main_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.main_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, source='')
self.call_hook('after_train_iter')
if self._iter % use_aux_per_niter != 0:
self._iter += 1
continue
for idx, n_times in enumerate(auxiliary_iter_times):
for _ in range(n_times):
data_batch = next(self.aux_iters[idx])
self.call_hook('before_train_iter')
self.run_iter(
data_batch, train_mode=True, source=f'/aux{idx}')
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
# Now that we use validate hook, not implement this func to save efforts.
def val(self, data_loader, **kwargs):
raise NotImplementedError
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training.
`data_loaders[0]` is the main data_loader, which contains
target datasets and determines the epoch length.
`data_loaders[1:]` are auxiliary data loaders, which contain
auxiliary web datasets.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2)] means running 2
epochs for training iteratively. Note that val epoch is not
supported for this runner for simplicity.
max_epochs (int | None): The max epochs that training lasts,
deprecated now. Default: None.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(workflow) == 1 and workflow[0][0] == 'train'
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
mode, epochs = workflow[0]
self._max_iters = self._max_epochs * len(data_loaders[0])
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
while self.epoch < self._max_epochs:
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
f'mode in workflow must be a str, but got {mode}')
for _ in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(data_loaders, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
| 6,589 | 39.429448 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/core/hooks/output.py | import functools
import warnings
import torch
class OutputHook:
"""Output feature map of some layers.
Args:
module (nn.Module): The whole module to get layers.
outputs (tuple[str] | list[str]): Layer name to output. Default: None.
as_tensor (bool): Determine to return a tensor or a numpy array.
Default: False.
"""
def __init__(self, module, outputs=None, as_tensor=False):
self.outputs = outputs
self.as_tensor = as_tensor
self.layer_outputs = {}
self.handles = []
self.register(module)
def register(self, module):
def hook_wrapper(name):
def hook(model, input, output):
if not isinstance(output, torch.Tensor):
warnings.warn(f'Directly return the output from {name}, '
f'since it is not a tensor')
self.layer_outputs[name] = output
elif self.as_tensor:
self.layer_outputs[name] = output
else:
self.layer_outputs[name] = output.detach().cpu().numpy()
return hook
if isinstance(self.outputs, (list, tuple)):
for name in self.outputs:
try:
layer = rgetattr(module, name)
h = layer.register_forward_hook(hook_wrapper(name))
except AttributeError:
raise AttributeError(f'Module {name} not found')
self.handles.append(h)
def remove(self):
for h in self.handles:
h.remove()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.remove()
# using wonder's beautiful simplification:
# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
| 2,040 | 29.014706 | 84 | py |
STTS | STTS-main/VideoSwin/mmaction/core/optimizer/topk_optimizer_constructor.py | import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TopkOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optimizer in different ways from the default one.
1. Parameters of the first conv layer have default lr and weight decay.
2. Parameters of BN layers have default lr and zero weight decay.
3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters
of the last fc layer in cls_head have 5x lr multiplier and 10x weight
decay multiplier.
4. Weights of other layers have default lr and weight decay, and biases
have a 2x lr multiplier and zero weight decay.
"""
def add_params(self, params, model):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
train_topk_only = self.paramwise_cfg['train_topk_only']
# Batchnorm parameters.
bn_params = []
# Non-batchnorm parameters.
non_bn_params = []
predictor = []
for name, param in model.named_parameters():
if 'predictor' in name:
predictor.append(param)
elif train_topk_only:
continue # frozen weights other than predictor
elif "bn" in name:
bn_params.append(param)
else:
non_bn_params.append(param)
params.append({
'params': predictor,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': bn_params,
'lr': self.base_lr * 0.01,
'weight_decay': 0.0
})
params.append({
'params': non_bn_params,
'lr': self.base_lr * 0.01,
'weight_decay': self.base_wd
})
| 2,226 | 32.238806 | 77 | py |
STTS | STTS-main/VideoSwin/mmaction/core/optimizer/tsm_optimizer_constructor.py | import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TSMOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optimizer in different ways from the default one.
1. Parameters of the first conv layer have default lr and weight decay.
2. Parameters of BN layers have default lr and zero weight decay.
3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters
of the last fc layer in cls_head have 5x lr multiplier and 10x weight
decay multiplier.
4. Weights of other layers have default lr and weight decay, and biases
have a 2x lr multiplier and zero weight decay.
"""
def add_params(self, params, model):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
# use fc_lr5 to determine whether to specify higher multi-factor
# for fc layer weights and bias.
fc_lr5 = self.paramwise_cfg['fc_lr5']
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
conv_cnt = 0
for m in model.modules():
if isinstance(m, _ConvNd):
m_params = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(m_params[0])
if len(m_params) == 2:
first_conv_bias.append(m_params[1])
else:
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m, torch.nn.Linear):
m_params = list(m.parameters())
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m,
(_BatchNorm, SyncBatchNorm, torch.nn.GroupNorm)):
for param in list(m.parameters()):
if param.requires_grad:
bn.append(param)
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError(f'New atomic module type: {type(m)}. '
'Need to give it a learning policy')
# pop the cls_head fc layer params
last_fc_weight = normal_weight.pop()
last_fc_bias = normal_bias.pop()
if fc_lr5:
lr5_weight.append(last_fc_weight)
lr10_bias.append(last_fc_bias)
else:
normal_weight.append(last_fc_weight)
normal_bias.append(last_fc_bias)
params.append({
'params': first_conv_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': first_conv_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({
'params': normal_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': normal_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0})
params.append({
'params': lr5_weight,
'lr': self.base_lr * 5,
'weight_decay': self.base_wd
})
params.append({
'params': lr10_bias,
'lr': self.base_lr * 10,
'weight_decay': 0
})
| 4,074 | 36.045455 | 77 | py |
STTS | STTS-main/VideoSwin/mmaction/core/optimizer/copy_of_sgd.py | from mmcv.runner import OPTIMIZERS
from torch.optim import SGD
@OPTIMIZERS.register_module()
class CopyOfSGD(SGD):
"""A clone of torch.optim.SGD.
A customized optimizer could be defined like CopyOfSGD. You may derive from
built-in optimizers in torch.optim, or directly implement a new optimizer.
"""
| 320 | 25.75 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/localizers/base.py | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
from .. import builder
class BaseLocalizer(nn.Module, metaclass=ABCMeta):
"""Base class for localizers.
All localizers should subclass it. All subclass should overwrite:
Methods:``forward_train``, supporting to forward when training.
Methods:``forward_test``, supporting to forward when testing.
"""
def __init__(self, backbone, cls_head, train_cfg=None, test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
self.cls_head = builder.build_head(cls_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights()
def init_weights(self):
"""Weight initialization for model."""
self.backbone.init_weights()
self.cls_head.init_weights()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
@abstractmethod
def forward_train(self, imgs, labels):
"""Defines the computation performed at training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at testing."""
def forward(self, imgs, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(imgs, **kwargs)
return self.forward_test(imgs, **kwargs)
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self.forward(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
results = self.forward(return_loss=False, **data_batch)
outputs = dict(results=results)
return outputs
| 5,143 | 34.722222 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/localizers/bsn.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...localization import temporal_iop
from ..builder import LOCALIZERS, build_loss
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class TEM(BaseLocalizer):
"""Temporal Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
tem_feat_dim (int): Feature dimension.
tem_hidden_dim (int): Hidden layer dimension.
tem_match_threshold (float): Temporal evaluation match threshold.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BinaryLogisticRegressionLoss')``.
loss_weight (float): Weight term for action_loss. Default: 2.
output_dim (int): Output dimension. Default: 3.
conv1_ratio (float): Ratio of conv1 layer output. Default: 1.0.
conv2_ratio (float): Ratio of conv2 layer output. Default: 1.0.
conv3_ratio (float): Ratio of conv3 layer output. Default: 0.01.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
tem_feat_dim,
tem_hidden_dim,
tem_match_threshold,
loss_cls=dict(type='BinaryLogisticRegressionLoss'),
loss_weight=2,
output_dim=3,
conv1_ratio=1,
conv2_ratio=1,
conv3_ratio=0.01):
super(BaseLocalizer, self).__init__()
self.temporal_dim = temporal_dim
self.boundary_ratio = boundary_ratio
self.feat_dim = tem_feat_dim
self.c_hidden = tem_hidden_dim
self.match_threshold = tem_match_threshold
self.output_dim = output_dim
self.loss_cls = build_loss(loss_cls)
self.loss_weight = loss_weight
self.conv1_ratio = conv1_ratio
self.conv2_ratio = conv2_ratio
self.conv3_ratio = conv3_ratio
self.conv1 = nn.Conv1d(
in_channels=self.feat_dim,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv2 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv3 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.output_dim,
kernel_size=1,
stride=1,
padding=0)
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors()
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.temporal_dim
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.temporal_dim):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.conv1_ratio * self.conv1(x))
x = F.relu(self.conv2_ratio * self.conv2(x))
x = torch.sigmoid(self.conv3_ratio * self.conv3(x))
return x
def forward_train(self, raw_feature, label_action, label_start, label_end):
"""Define the computation performed at every call when training."""
tem_output = self._forward(raw_feature)
score_action = tem_output[:, 0, :]
score_start = tem_output[:, 1, :]
score_end = tem_output[:, 2, :]
loss_action = self.loss_cls(score_action, label_action,
self.match_threshold)
loss_start_small = self.loss_cls(score_start, label_start,
self.match_threshold)
loss_end_small = self.loss_cls(score_end, label_end,
self.match_threshold)
loss_dict = {
'loss_action': loss_action * self.loss_weight,
'loss_start': loss_start_small,
'loss_end': loss_end_small
}
return loss_dict
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
tem_output = self._forward(raw_feature).cpu().numpy()
batch_action = tem_output[:, 0, :]
batch_start = tem_output[:, 1, :]
batch_end = tem_output[:, 2, :]
video_meta_list = [dict(x) for x in video_meta]
video_results = []
for batch_idx, _ in enumerate(batch_action):
video_name = video_meta_list[batch_idx]['video_name']
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
self.anchors_tmins, self.anchors_tmaxs),
axis=1)
video_results.append((video_name, video_result))
return video_results
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_action_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_tmins = every_gt_bbox[:, 0].cpu().numpy()
gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy()
gt_lens = gt_tmaxs - gt_tmins
gt_len_pad = np.maximum(1. / self.temporal_dim,
self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_action = []
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_action.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax, gt_tmins,
gt_tmaxs)))
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_action_list.append(match_score_action)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_action_list = torch.Tensor(match_score_action_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_action_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_action, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_action = label_action.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_action, label_start,
label_end)
return self.forward_test(raw_feature, video_meta)
@LOCALIZERS.register_module()
class PEM(BaseLocalizer):
"""Proposals Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
pem_feat_dim (int): Feature dimension.
pem_hidden_dim (int): Hidden layer dimension.
pem_u_ratio_m (float): Ratio for medium score proprosals to balance
data.
pem_u_ratio_l (float): Ratio for low score proprosals to balance data.
pem_high_temporal_iou_threshold (float): High IoU threshold.
pem_low_temporal_iou_threshold (float): Low IoU threshold.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
fc1_ratio (float): Ratio for fc1 layer output. Default: 0.1.
fc2_ratio (float): Ratio for fc2 layer output. Default: 0.1.
output_dim (int): Output dimension. Default: 1.
"""
def __init__(self,
pem_feat_dim,
pem_hidden_dim,
pem_u_ratio_m,
pem_u_ratio_l,
pem_high_temporal_iou_threshold,
pem_low_temporal_iou_threshold,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
fc1_ratio=0.1,
fc2_ratio=0.1,
output_dim=1):
super(BaseLocalizer, self).__init__()
self.feat_dim = pem_feat_dim
self.hidden_dim = pem_hidden_dim
self.u_ratio_m = pem_u_ratio_m
self.u_ratio_l = pem_u_ratio_l
self.pem_high_temporal_iou_threshold = pem_high_temporal_iou_threshold
self.pem_low_temporal_iou_threshold = pem_low_temporal_iou_threshold
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.fc1_ratio = fc1_ratio
self.fc2_ratio = fc2_ratio
self.output_dim = output_dim
self.fc1 = nn.Linear(
in_features=self.feat_dim, out_features=self.hidden_dim, bias=True)
self.fc2 = nn.Linear(
in_features=self.hidden_dim,
out_features=self.output_dim,
bias=True)
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = torch.cat(list(x))
x = F.relu(self.fc1_ratio * self.fc1(x))
x = torch.sigmoid(self.fc2_ratio * self.fc2(x))
return x
def forward_train(self, bsp_feature, reference_temporal_iou):
"""Define the computation performed at every call when training."""
pem_output = self._forward(bsp_feature)
reference_temporal_iou = torch.cat(list(reference_temporal_iou))
device = pem_output.device
reference_temporal_iou = reference_temporal_iou.to(device)
anchors_temporal_iou = pem_output.view(-1)
u_hmask = (reference_temporal_iou >
self.pem_high_temporal_iou_threshold).float()
u_mmask = (
(reference_temporal_iou <= self.pem_high_temporal_iou_threshold)
& (reference_temporal_iou > self.pem_low_temporal_iou_threshold)
).float()
u_lmask = (reference_temporal_iou <=
self.pem_low_temporal_iou_threshold).float()
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = self.u_ratio_m * num_h / (num_m)
r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0]
u_smmask = torch.rand(u_hmask.size()[0], device=device)
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = self.u_ratio_l * num_h / (num_l)
r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0]
u_slmask = torch.rand(u_hmask.size()[0], device=device)
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1. - r_l)).float()
temporal_iou_weights = u_hmask + u_smmask + u_slmask
temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou,
reference_temporal_iou)
temporal_iou_loss = torch.sum(
temporal_iou_loss *
temporal_iou_weights) / torch.sum(temporal_iou_weights)
loss_dict = dict(temporal_iou_loss=temporal_iou_loss)
return loss_dict
def forward_test(self, bsp_feature, tmin, tmax, tmin_score, tmax_score,
video_meta):
"""Define the computation performed at every call when testing."""
pem_output = self._forward(bsp_feature).view(-1).cpu().numpy().reshape(
-1, 1)
tmin = tmin.view(-1).cpu().numpy().reshape(-1, 1)
tmax = tmax.view(-1).cpu().numpy().reshape(-1, 1)
tmin_score = tmin_score.view(-1).cpu().numpy().reshape(-1, 1)
tmax_score = tmax_score.view(-1).cpu().numpy().reshape(-1, 1)
score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1)
result = np.concatenate(
(tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1)
result = result.reshape(-1, 6)
video_info = dict(video_meta[0])
proposal_list = post_processing(result, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward(self,
bsp_feature,
reference_temporal_iou=None,
tmin=None,
tmax=None,
tmin_score=None,
tmax_score=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(bsp_feature, reference_temporal_iou)
return self.forward_test(bsp_feature, tmin, tmax, tmin_score,
tmax_score, video_meta)
| 15,855 | 39.141772 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/localizers/ssn.py | import torch
import torch.nn as nn
from .. import builder
from ..builder import LOCALIZERS
from .base import BaseLocalizer
@LOCALIZERS.register_module()
class SSN(BaseLocalizer):
"""Temporal Action Detection with Structured Segment Networks.
Args:
backbone (dict): Config for building backbone.
cls_head (dict): Config for building classification head.
in_channels (int): Number of channels for input data.
Default: 3.
spatial_type (str): Type of spatial pooling.
Default: 'avg'.
dropout_ratio (float): Ratio of dropout.
Default: 0.5.
loss_cls (dict): Config for building loss.
Default: ``dict(type='SSNLoss')``.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
in_channels=3,
spatial_type='avg',
dropout_ratio=0.5,
loss_cls=dict(type='SSNLoss'),
train_cfg=None,
test_cfg=None):
super().__init__(backbone, cls_head, train_cfg, test_cfg)
self.is_test_prepared = False
self.in_channels = in_channels
self.spatial_type = spatial_type
if self.spatial_type == 'avg':
self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
elif self.spatial_type == 'max':
self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
else:
self.pool = None
self.dropout_ratio = dropout_ratio
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.loss_cls = builder.build_loss(loss_cls)
def forward_train(self, imgs, proposal_scale_factor, proposal_type,
proposal_labels, reg_targets, **kwargs):
"""Define the computation performed at every call when training."""
imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
x = self.extract_feat(imgs)
if self.pool:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
return loss_dict
def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
proposal_tick_list, reg_norm_consts, **kwargs):
"""Define the computation performed at every call when testing."""
num_crops = imgs.shape[0]
imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
num_ticks = imgs.shape[1]
output = []
minibatch_size = self.test_cfg.ssn.sampler.batch_size
for idx in range(0, num_ticks, minibatch_size):
chunk = imgs[:, idx:idx +
minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
x = self.extract_feat(chunk)
if self.pool:
x = self.pool(x)
# Merge crop to save memory.
x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
output.append(x)
output = torch.cat(output, dim=0)
relative_proposal_list = relative_proposal_list.squeeze(0)
proposal_tick_list = proposal_tick_list.squeeze(0)
scale_factor_list = scale_factor_list.squeeze(0)
reg_norm_consts = reg_norm_consts.squeeze(0)
if not self.is_test_prepared:
self.is_test_prepared = self.cls_head.prepare_test_fc(
self.cls_head.consensus.num_multipliers)
(output, activity_scores, completeness_scores,
bbox_preds) = self.cls_head(
(output, proposal_tick_list, scale_factor_list), test_mode=True)
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
reg_norm_consts[0, 0])
bbox_preds[:, :, 1] = (
bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
reg_norm_consts[0, 1])
bbox_preds = bbox_preds.cpu().numpy()
result = [
dict(
relative_proposal_list=relative_proposal_list,
activity_scores=activity_scores,
completeness_scores=completeness_scores,
bbox_preds=bbox_preds)
]
return result
| 5,048 | 36.4 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/localizers/bmn.py | import math
import numpy as np
import torch
import torch.nn as nn
from ...localization import temporal_iop, temporal_iou
from ..builder import LOCALIZERS, build_loss
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class BMN(BaseLocalizer):
"""Boundary Matching Network for temporal action proposal generation.
Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal
Generation <https://arxiv.org/abs/1907.09702>`_.
Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network
Args:
temporal_dim (int): Total frames selected for each video.
boundary_ratio (float): Ratio for determining video boundaries.
num_samples (int): Number of samples for each proposal.
num_samples_per_bin (int): Number of bin samples for each sample.
feat_dim (int): Feature dimension.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BMNLoss')``.
hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256.
hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128.
hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
num_samples,
num_samples_per_bin,
feat_dim,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
loss_cls=dict(type='BMNLoss'),
hidden_dim_1d=256,
hidden_dim_2d=128,
hidden_dim_3d=512):
super(BaseLocalizer, self).__init__()
self.tscale = temporal_dim
self.boundary_ratio = boundary_ratio
self.num_samples = num_samples
self.num_samples_per_bin = num_samples_per_bin
self.feat_dim = feat_dim
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.loss_cls = build_loss(loss_cls)
self.hidden_dim_1d = hidden_dim_1d
self.hidden_dim_2d = hidden_dim_2d
self.hidden_dim_3d = hidden_dim_3d
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(
self.feat_dim,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True))
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
self.x_1d_e = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True))
self.x_3d_p = nn.Sequential(
nn.Conv3d(
self.hidden_dim_1d,
self.hidden_dim_3d,
kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True))
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid())
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors(
-0.5, 1.5)
self.match_map = self._match_map()
self.bm_mask = self._get_bm_mask()
def _match_map(self):
"""Generate match map."""
temporal_gap = 1. / self.tscale
match_map = []
for idx in range(self.tscale):
match_window = []
tmin = temporal_gap * idx
for jdx in range(1, self.tscale + 1):
tmax = tmin + temporal_gap * jdx
match_window.append([tmin, tmax])
match_map.append(match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
return match_map
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end
def _boundary_matching_layer(self, x):
"""Generate matching layer."""
input_size = x.size()
out = torch.matmul(x,
self.sample_mask).reshape(input_size[0],
input_size[1],
self.num_samples,
self.tscale, self.tscale)
return out
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
confidence_map, start, end = self._forward(raw_feature)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = dict(video_meta[0])
proposal_list = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward_train(self, raw_feature, label_confidence, label_start,
label_end):
"""Define the computation performed at every call when training."""
confidence_map, start, end = self._forward(raw_feature)
loss = self.loss_cls(confidence_map, start, end, label_confidence,
label_start, label_end,
self.bm_mask.to(raw_feature.device))
loss_dict = dict(loss=loss[0])
return loss_dict
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_confidence_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_iou_map = []
for start, end in every_gt_bbox:
if isinstance(start, torch.Tensor):
start = start.numpy()
if isinstance(end, torch.Tensor):
end = end.numpy()
current_gt_iou_map = temporal_iou(self.match_map[:, 0],
self.match_map[:, 1], start,
end)
current_gt_iou_map = np.reshape(current_gt_iou_map,
[self.tscale, self.tscale])
gt_iou_map.append(current_gt_iou_map)
gt_iou_map = np.array(gt_iou_map).astype(np.float32)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_tmins = every_gt_bbox[:, 0]
gt_tmaxs = every_gt_bbox[:, 1]
gt_len_pad = 3 * (1. / self.tscale)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_confidence_list.append(gt_iou_map)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_confidence_list = torch.Tensor(match_score_confidence_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_confidence_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_confidence, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_confidence,
label_start, label_end)
return self.forward_test(raw_feature, video_meta)
@staticmethod
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
def _get_bm_mask(self):
"""Generate Boundary-Matching Mask."""
bm_mask = []
for idx in range(self.tscale):
mask_vector = [1] * (self.tscale - idx) + [0] * idx
bm_mask.append(mask_vector)
bm_mask = torch.tensor(bm_mask, dtype=torch.float)
return bm_mask
| 17,788 | 41.659472 | 93 | py |
STTS | STTS-main/VideoSwin/mmaction/models/recognizers/base.py | import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``forward_train``, supporting to forward when training.
- Methods:``forward_test``, supporting to forward when testing.
Args:
backbone (dict): Backbone modules to extract feature.
cls_head (dict | None): Classification head to process feature.
Default: None.
neck (dict | None): Neck for feature fusion. Default: None.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head=None,
neck=None,
train_cfg=None,
test_cfg=None):
super().__init__()
# record the source of the backbone
self.backbone_from = 'mmaction2'
if backbone['type'].startswith('mmcls.'):
try:
import mmcls.models.builder as mmcls_builder
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install mmcls to use this backbone.')
backbone['type'] = backbone['type'][6:]
self.backbone = mmcls_builder.build_backbone(backbone)
self.backbone_from = 'mmcls'
elif backbone['type'].startswith('torchvision.'):
try:
import torchvision.models
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install torchvision to use this '
'backbone.')
backbone_type = backbone.pop('type')[12:]
self.backbone = torchvision.models.__dict__[backbone_type](
**backbone)
# disable the classifier
self.backbone.classifier = nn.Identity()
self.backbone.fc = nn.Identity()
self.backbone_from = 'torchvision'
elif backbone['type'].startswith('timm.'):
try:
import timm
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install timm to use this '
'backbone.')
backbone_type = backbone.pop('type')[5:]
# disable the classifier
backbone['num_classes'] = 0
self.backbone = timm.create_model(backbone_type, **backbone)
self.backbone_from = 'timm'
else:
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.cls_head = builder.build_head(cls_head) if cls_head else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# aux_info is the list of tensor names beyond 'imgs' and 'label' which
# will be used in train_step and val_step, data_batch should contain
# these tensors
self.aux_info = []
if train_cfg is not None and 'aux_info' in train_cfg:
self.aux_info = train_cfg['aux_info']
# max_testing_views should be int
self.max_testing_views = None
if test_cfg is not None and 'max_testing_views' in test_cfg:
self.max_testing_views = test_cfg['max_testing_views']
assert isinstance(self.max_testing_views, int)
if test_cfg is not None and 'feature_extraction' in test_cfg:
self.feature_extraction = test_cfg['feature_extraction']
else:
self.feature_extraction = False
# mini-batch blending, e.g. mixup, cutmix, etc.
self.blending = None
if train_cfg is not None and 'blending' in train_cfg:
from mmcv.utils import build_from_cfg
from mmaction.datasets.builder import BLENDINGS
self.blending = build_from_cfg(train_cfg['blending'], BLENDINGS)
self.init_weights()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the recognizer has a neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_cls_head(self):
"""bool: whether the recognizer has a cls_head"""
return hasattr(self, 'cls_head') and self.cls_head is not None
def init_weights(self):
"""Initialize the model network weights."""
if self.backbone_from in ['mmcls', 'mmaction2']:
self.backbone.init_weights()
elif self.backbone_from in ['torchvision', 'timm']:
warnings.warn('We do not initialize weights for backbones in '
f'{self.backbone_from}, since the weights for '
f'backbones in {self.backbone_from} are initialized'
'in their __init__ functions.')
else:
raise NotImplementedError('Unsupported backbone source '
f'{self.backbone_from}!')
if self.with_cls_head:
self.cls_head.init_weights()
if self.with_neck:
self.neck.init_weights()
@auto_fp16()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
if (hasattr(self.backbone, 'features')
and self.backbone_from == 'torchvision'):
x = self.backbone.features(imgs)
elif self.backbone_from == 'timm':
x = self.backbone.forward_features(imgs)
else:
x = self.backbone(imgs)
return x
def average_clip(self, cls_score, num_segs=1):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_score (torch.Tensor): Class score to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
if average_clips is None:
return cls_score
batch_size = cls_score.shape[0]
cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=1)
return cls_score
@abstractmethod
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
@abstractmethod
def forward_gradcam(self, imgs):
"""Defines the computation performed at every all when using gradcam
utils."""
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward(self, imgs, label=None, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if kwargs.get('gradcam', False):
del kwargs['gradcam']
return self.forward_gradcam(imgs, **kwargs)
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
if self.blending is not None:
imgs, label = self.blending(imgs, label)
return self.forward_train(imgs, label, **kwargs)
return self.forward_test(imgs, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
assert item in data_batch
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| 12,392 | 36.554545 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/recognizers/recognizer3d.py | import torch
from torch import nn
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
def copyParams(module_src, module_dest):
params_src = module_src.named_parameters()
params_dest = module_dest.named_parameters()
dict_dest = dict(params_dest)
for name, param in params_src:
dict_dest[name].data.copy_(param.data)
@RECOGNIZERS.register_module()
class Recognizer3D(BaseRecognizer):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
assert self.with_cls_head
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if self.with_neck:
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
if self.max_testing_views is not None:
total_views = imgs.shape[0]
assert num_segs == total_views, (
'max_testing_views is only compatible '
'with batch_size == 1')
view_ptr = 0
feats = []
while view_ptr < total_views:
batch_imgs = imgs[view_ptr:view_ptr + self.max_testing_views]
x = self.extract_feat(batch_imgs)
if self.with_neck:
x, _ = self.neck(x)
feats.append(x)
view_ptr += self.max_testing_views
# should consider the case that feat is a tuple
if isinstance(feats[0], tuple):
len_tuple = len(feats[0])
feat = [
torch.cat([x[i] for x in feats]) for i in range(len_tuple)
]
feat = tuple(feat)
else:
feat = torch.cat(feats)
else:
feat = self.extract_feat(imgs)
if self.with_neck:
feat, _ = self.neck(feat)
if self.feature_extraction:
# perform spatio-temporal pooling
avg_pool = nn.AdaptiveAvgPool3d(1)
if isinstance(feat, tuple):
feat = [avg_pool(x) for x in feat]
# concat them
feat = torch.cat(feat, axis=1)
else:
feat = avg_pool(feat)
# squeeze dimensions
feat = feat.reshape((batches, num_segs, -1))
# temporal average pooling
feat = feat.mean(axis=1)
return feat
# should have cls_head if not extracting features
assert self.with_cls_head
cls_score = self.cls_head(feat)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
assert self.with_cls_head
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if self.with_neck:
x, _ = self.neck(x)
outs = self.cls_head(x)
if softmax:
outs = nn.functional.softmax(outs)
return (outs, )
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
assert self.with_cls_head
return self._do_test(imgs)
| 4,166 | 30.330827 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/recognizers/recognizer2d.py | import torch
from torch import nn
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
losses.update(loss_aux)
cls_score = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
if self.feature_extraction:
# perform spatial pooling
avg_pool = nn.AdaptiveAvgPool2d(1)
x = avg_pool(x)
# squeeze dimensions
x = x.reshape((batches, num_segs, -1))
# temporal average pooling
x = x.mean(axis=1)
return x
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
# should have cls_head if not extracting features
cls_score = self.cls_head(x, num_segs)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def _do_fcn_test(self, imgs):
# [N, num_crops * num_segs, C, H, W] ->
# [N * num_crops * num_segs, C, H, W]
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = self.test_cfg.get('num_segs', self.backbone.num_segments)
if self.test_cfg.get('flip', False):
imgs = torch.flip(imgs, [-1])
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
else:
x = x.reshape((-1, num_segs) +
x.shape[1:]).transpose(1, 2).contiguous()
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
cls_score = self.cls_head(x, fcn_test=True)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
if self.test_cfg.get('fcn_test', False):
# If specified, spatially fully-convolutional testing is performed
assert not self.feature_extraction
assert self.with_cls_head
return self._do_fcn_test(imgs).cpu().numpy()
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
outs = self.cls_head(x, num_segs)
if softmax:
outs = nn.functional.softmax(outs)
return (outs, )
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
assert self.with_cls_head
return self._do_test(imgs)
| 6,572 | 34.33871 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/recognizers/audio_recognizer.py | from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class AudioRecognizer(BaseRecognizer):
"""Audio recognizer model framework."""
def forward(self, audios, label=None, return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(audios, label)
return self.forward_test(audios)
def forward_train(self, audios, labels):
"""Defines the computation performed at every call when training."""
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss = self.cls_head.loss(cls_score, gt_labels)
return loss
def forward_test(self, audios):
"""Defines the computation performed at every call when evaluation and
testing."""
num_segs = audios.shape[1]
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score.cpu().numpy()
def forward_gradcam(self, audios):
raise NotImplementedError
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| 3,632 | 34.617647 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/common/tam.py | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init, normal_init
class TAM(nn.Module):
"""Temporal Adaptive Module(TAM) for TANet.
This module is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
Args:
in_channels (int): Channel num of input features.
num_segments (int): Number of frame segments.
alpha (int): ```alpha``` in the paper and is the ratio of the
intermediate channel number to the initial channel number in the
global branch. Default: 2.
adaptive_kernel_size (int): ```K``` in the paper and is the size of the
adaptive kernel size in the global branch. Default: 3.
beta (int): ```beta``` in the paper and is set to control the model
complexity in the local branch. Default: 4.
conv1d_kernel_size (int): Size of the convolution kernel of Conv1d in
the local branch. Default: 3.
adaptive_convolution_stride (int): The first dimension of strides in
the adaptive convolution of ```Temporal Adaptive Aggregation```.
Default: 1.
adaptive_convolution_padding (int): The first dimension of paddings in
the adaptive convolution of ```Temporal Adaptive Aggregation```.
Default: 1.
init_std (float): Std value for initiation of `nn.Linear`. Default:
0.001.
"""
def __init__(self,
in_channels,
num_segments,
alpha=2,
adaptive_kernel_size=3,
beta=4,
conv1d_kernel_size=3,
adaptive_convolution_stride=1,
adaptive_convolution_padding=1,
init_std=0.001):
super().__init__()
assert beta > 0 and alpha > 0
self.in_channels = in_channels
self.num_segments = num_segments
self.alpha = alpha
self.adaptive_kernel_size = adaptive_kernel_size
self.beta = beta
self.conv1d_kernel_size = conv1d_kernel_size
self.adaptive_convolution_stride = adaptive_convolution_stride
self.adaptive_convolution_padding = adaptive_convolution_padding
self.init_std = init_std
self.G = nn.Sequential(
nn.Linear(num_segments, num_segments * alpha, bias=False),
nn.BatchNorm1d(num_segments * alpha), nn.ReLU(inplace=True),
nn.Linear(num_segments * alpha, adaptive_kernel_size, bias=False),
nn.Softmax(-1))
self.L = nn.Sequential(
nn.Conv1d(
in_channels,
in_channels // beta,
conv1d_kernel_size,
stride=1,
padding=conv1d_kernel_size // 2,
bias=False), nn.BatchNorm1d(in_channels // beta),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels // beta, in_channels, 1, bias=False),
nn.Sigmoid())
self.init_weights()
def init_weights(self):
"""Initiate the parameters from scratch."""
for m in self.modules():
if isinstance(m, nn.Conv1d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm1d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# [n, c, h, w]
n, c, h, w = x.size()
num_segments = self.num_segments
num_batches = n // num_segments
assert c == self.in_channels
# [num_batches, c, num_segments, h, w]
x = x.view(num_batches, num_segments, c, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
# [num_batches * c, num_segments, 1, 1]
theta_out = F.adaptive_avg_pool2d(
x.view(-1, num_segments, h, w), (1, 1))
# [num_batches * c, 1, adaptive_kernel_size, 1]
conv_kernel = self.G(theta_out.view(-1, num_segments)).view(
num_batches * c, 1, -1, 1)
# [num_batches, c, num_segments, 1, 1]
local_activation = self.L(theta_out.view(-1, c, num_segments)).view(
num_batches, c, num_segments, 1, 1)
# [num_batches, c, num_segments, h, w]
new_x = x * local_activation
# [1, num_batches * c, num_segments, h * w]
y = F.conv2d(
new_x.view(1, num_batches * c, num_segments, h * w),
conv_kernel,
bias=None,
stride=(self.adaptive_convolution_stride, 1),
padding=(self.adaptive_convolution_padding, 0),
groups=num_batches * c)
# [n, c, h, w]
y = y.view(num_batches, c, num_segments, h, w)
y = y.permute(0, 2, 1, 3, 4).contiguous().view(n, c, h, w)
return y
| 5,051 | 36.422222 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/common/conv_audio.py | import torch
import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, ConvModule, constant_init, kaiming_init
from torch.nn.modules.utils import _pair
@CONV_LAYERS.register_module()
class ConvAudio(nn.Module):
"""Conv2d module for AudioResNet backbone.
<https://arxiv.org/abs/2001.08740>`_.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int | tuple[int]): Same as nn.Conv2d.
op (string): Operation to merge the output of freq
and time feature map. Choices are 'sum' and 'concat'.
Default: 'concat'.
stride (int | tuple[int]): Same as nn.Conv2d.
padding (int | tuple[int]): Same as nn.Conv2d.
dilation (int | tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
op='concat',
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert op in ['concat', 'sum']
self.op = op
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.output_padding = (0, 0)
self.transposed = False
self.conv_1 = ConvModule(
in_channels,
out_channels,
kernel_size=(kernel_size[0], 1),
stride=stride,
padding=(kernel_size[0] // 2, 0),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.conv_2 = ConvModule(
in_channels,
out_channels,
kernel_size=(1, kernel_size[1]),
stride=stride,
padding=(0, kernel_size[1] // 2),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if self.op == 'concat':
out = torch.cat([x_1, x_2], 1)
else:
out = x_1 + x_2
return out
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_1.conv)
kaiming_init(self.conv_2.conv)
constant_init(self.conv_1.bn, 1, bias=0)
constant_init(self.conv_2.bn, 1, bias=0)
| 3,225 | 29.72381 | 77 | py |
STTS | STTS-main/VideoSwin/mmaction/models/common/lfb.py | import io
import os.path as osp
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
try:
import lmdb
lmdb_imported = True
except (ImportError, ModuleNotFoundError):
lmdb_imported = False
class LFB:
"""Long-Term Feature Bank (LFB).
LFB is proposed in `Long-Term Feature Banks for Detailed Video
Understanding <https://arxiv.org/abs/1812.05038>`_
The ROI features of videos are stored in the feature bank. The feature bank
was generated by inferring with a lfb infer config.
Formally, LFB is a Dict whose keys are video IDs and its values are also
Dicts whose keys are timestamps in seconds. Example of LFB:
.. code-block:: Python
{
'0f39OWEqJ24': {
901: tensor([[ 1.2760, 1.1965, ..., 0.0061, -0.0639],
[-0.6320, 0.3794, ..., -1.2768, 0.5684],
[ 0.2535, 1.0049, ..., 0.4906, 1.2555],
[-0.5838, 0.8549, ..., -2.1736, 0.4162]]),
...
1705: tensor([[-1.0169, -1.1293, ..., 0.6793, -2.0540],
[ 1.2436, -0.4555, ..., 0.2281, -0.8219],
[ 0.2815, -0.0547, ..., -0.4199, 0.5157]]),
...
},
'xmqSaQPzL1E': {
...
},
...
}
Args:
lfb_prefix_path (str): The storage path of lfb.
max_num_sampled_feat (int): The max number of sampled features.
Default: 5.
window_size (int): Window size of sampling long term feature.
Default: 60.
lfb_channels (int): Number of the channels of the features stored
in LFB. Default: 2048.
dataset_modes (tuple[str] | str): Load LFB of datasets with different
modes, such as training, validation, testing datasets. If you don't
do cross validation during training, just load the training dataset
i.e. setting `dataset_modes = ('train')`.
Default: ('train', 'val').
device (str): Where to load lfb. Choices are 'gpu', 'cpu' and 'lmdb'.
A 1.65GB half-precision ava lfb (including training and validation)
occupies about 2GB GPU memory. Default: 'gpu'.
lmdb_map_size (int): Map size of lmdb. Default: 4e9.
construct_lmdb (bool): Whether to construct lmdb. If you have
constructed lmdb of lfb, you can set to False to skip the
construction. Default: True.
"""
def __init__(self,
lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=2048,
dataset_modes=('train', 'val'),
device='gpu',
lmdb_map_size=4e9,
construct_lmdb=True):
if not osp.exists(lfb_prefix_path):
raise ValueError(
f'lfb prefix path {lfb_prefix_path} does not exist!')
self.lfb_prefix_path = lfb_prefix_path
self.max_num_sampled_feat = max_num_sampled_feat
self.window_size = window_size
self.lfb_channels = lfb_channels
if not isinstance(dataset_modes, tuple):
assert isinstance(dataset_modes, str)
dataset_modes = (dataset_modes, )
self.dataset_modes = dataset_modes
self.device = device
rank, world_size = get_dist_info()
# Loading LFB
if self.device == 'gpu':
self.load_lfb(f'cuda:{rank}')
elif self.device == 'cpu':
if world_size > 1:
warnings.warn(
'If distributed training is used with multi-GPUs, lfb '
'will be loaded multiple times on RAM. In this case, '
"'lmdb' is recomended.", UserWarning)
self.load_lfb('cpu')
elif self.device == 'lmdb':
assert lmdb_imported, (
'Please install `lmdb` to load lfb on lmdb!')
self.lmdb_map_size = lmdb_map_size
self.construct_lmdb = construct_lmdb
self.lfb_lmdb_path = osp.normpath(
osp.join(self.lfb_prefix_path, 'lmdb'))
if rank == 0 and self.construct_lmdb:
print('Constructing LFB lmdb...')
self.load_lfb_on_lmdb()
# Synchronizes all processes to make sure lfb lmdb exist.
if world_size > 1:
dist.barrier()
self.lmdb_env = lmdb.open(self.lfb_lmdb_path, readonly=True)
else:
raise ValueError("Device must be 'gpu', 'cpu' or 'lmdb', ",
f'but get {self.device}.')
def load_lfb(self, map_location):
self.lfb = {}
for dataset_mode in self.dataset_modes:
lfb_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))
print(f'Loading LFB from {lfb_path}...')
self.lfb.update(torch.load(lfb_path, map_location=map_location))
print(f'LFB has been loaded on {map_location}.')
def load_lfb_on_lmdb(self):
lfb = {}
for dataset_mode in self.dataset_modes:
lfb_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))
lfb.update(torch.load(lfb_path, map_location='cpu'))
lmdb_env = lmdb.open(self.lfb_lmdb_path, map_size=self.lmdb_map_size)
for key, value in lfb.items():
txn = lmdb_env.begin(write=True)
buff = io.BytesIO()
torch.save(value, buff)
buff.seek(0)
txn.put(key.encode(), buff.read())
txn.commit()
buff.close()
print(f'LFB lmdb has been constructed on {self.lfb_lmdb_path}!')
def sample_long_term_features(self, video_id, timestamp):
if self.device == 'lmdb':
with self.lmdb_env.begin(write=False) as txn:
buf = txn.get(video_id.encode())
video_features = torch.load(io.BytesIO(buf))
else:
video_features = self.lfb[video_id]
# Sample long term features.
window_size, K = self.window_size, self.max_num_sampled_feat
start = timestamp - (window_size // 2)
lt_feats = torch.zeros(window_size * K, self.lfb_channels)
for idx, sec in enumerate(range(start, start + window_size)):
if sec in video_features:
# `num_feat` is the number of roi features in this second.
num_feat = len(video_features[sec])
num_feat_sampled = min(num_feat, K)
# Sample some roi features randomly.
random_lfb_indices = np.random.choice(
range(num_feat), num_feat_sampled, replace=False)
for k, rand_idx in enumerate(random_lfb_indices):
lt_feats[idx * K + k] = video_features[sec][rand_idx]
# [window_size * max_num_sampled_feat, lfb_channels]
return lt_feats
def __getitem__(self, img_key):
"""Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb`
is a instance of class LFB."""
video_id, timestamp = img_key.split(',')
return self.sample_long_term_features(video_id, int(timestamp))
def __len__(self):
"""The number of videos whose ROI features are stored in LFB."""
return len(self.lfb)
| 7,493 | 38.650794 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/common/conv2plus1d.py | import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, build_norm_layer, constant_init, kaiming_init
from torch.nn.modules.utils import _triple
@CONV_LAYERS.register_module()
class Conv2plus1d(nn.Module):
"""(2+1)d Conv module for R(2+1)d backbone.
https://arxiv.org/pdf/1711.11248.pdf.
Args:
in_channels (int): Same as nn.Conv3d.
out_channels (int): Same as nn.Conv3d.
kernel_size (int | tuple[int]): Same as nn.Conv3d.
stride (int | tuple[int]): Same as nn.Conv3d.
padding (int | tuple[int]): Same as nn.Conv3d.
dilation (int | tuple[int]): Same as nn.Conv3d.
groups (int): Same as nn.Conv3d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
norm_cfg=dict(type='BN3d')):
super().__init__()
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
assert len(kernel_size) == len(stride) == len(padding) == 3
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.norm_cfg = norm_cfg
self.output_padding = (0, 0, 0)
self.transposed = False
# The middle-plane is calculated according to:
# M_i = \floor{\frac{t * d^2 N_i-1 * N_i}
# {d^2 * N_i-1 + t * N_i}}
# where d, t are spatial and temporal kernel, and
# N_i, N_i-1 are planes
# and inplanes. https://arxiv.org/pdf/1711.11248.pdf
mid_channels = 3 * (
in_channels * out_channels * kernel_size[1] * kernel_size[2])
mid_channels /= (
in_channels * kernel_size[1] * kernel_size[2] + 3 * out_channels)
mid_channels = int(mid_channels)
self.conv_s = nn.Conv3d(
in_channels,
mid_channels,
kernel_size=(1, kernel_size[1], kernel_size[2]),
stride=(1, stride[1], stride[2]),
padding=(0, padding[1], padding[2]),
bias=bias)
_, self.bn_s = build_norm_layer(self.norm_cfg, mid_channels)
self.relu = nn.ReLU(inplace=True)
self.conv_t = nn.Conv3d(
mid_channels,
out_channels,
kernel_size=(kernel_size[0], 1, 1),
stride=(stride[0], 1, 1),
padding=(padding[0], 0, 0),
bias=bias)
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.conv_s(x)
x = self.bn_s(x)
x = self.relu(x)
x = self.conv_t(x)
return x
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_s)
kaiming_init(self.conv_t)
constant_init(self.bn_s, 1, bias=0)
| 3,453 | 31.895238 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/necks/tpn.py | import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..builder import NECKS, build_loss
class Identity(nn.Module):
"""Identity mapping."""
def forward(self, x):
return x
class DownSample(nn.Module):
"""DownSample modules.
It uses convolution and maxpooling to downsample the input feature,
and specifies downsample position to determine `pool-conv` or `conv-pool`.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output feature.
kernel_size (int | tuple[int]): Same as :class:`ConvModule`.
Default: (3, 1, 1).
stride (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 1, 1).
padding (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 0, 0).
groups (int): Same as :class:`ConvModule`. Default: 1.
bias (bool | str): Same as :class:`ConvModule`. Default: False.
conv_cfg (dict | None): Same as :class:`ConvModule`.
Default: dict(type='Conv3d').
norm_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
act_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
downsample_position (str): Type of downsample position. Options are
'before' and 'after'. Default: 'after'.
downsample_scale (int | tuple[int]): downsample scale for maxpooling.
It will be used for kernel size and stride of maxpooling.
Default: (1, 2, 2).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=(3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
groups=1,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=None,
downsample_position='after',
downsample_scale=(1, 2, 2)):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert downsample_position in ['before', 'after']
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(
downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x
class LevelFusion(nn.Module):
"""Level Fusion module.
This module is used to aggregate the hierarchical features dynamic in
visual tempos and consistent in spatial semantics. The top/bottom features
for top-down/bottom-up flow would be combined to achieve two additional
options, namely 'Cascade Flow' or 'Parallel Flow'. While applying a
bottom-up flow after a top-down flow will lead to the cascade flow,
applying them simultaneously will result in the parallel flow.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
mid_channels (tuple[int]): Channel numbers of middle features tuple.
out_channels (int): Channel numbers of output features.
downsample_scales (tuple[int | tuple[int]]): downsample scales for
each :class:`DownSample` module. Default: ((1, 1, 1), (1, 1, 1)).
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
downsample_scales=((1, 1, 1), (1, 1, 1))):
super().__init__()
num_stages = len(in_channels)
self.downsamples = nn.ModuleList()
for i in range(num_stages):
downsample = DownSample(
in_channels[i],
mid_channels[i],
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
bias=False,
padding=(0, 0, 0),
groups=32,
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
downsample_position='before',
downsample_scale=downsample_scales[i])
self.downsamples.append(downsample)
self.fusion_conv = ConvModule(
sum(mid_channels),
out_channels,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True))
def forward(self, x):
out = [self.downsamples[i](feature) for i, feature in enumerate(x)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out
class SpatialModulation(nn.Module):
"""Spatial Semantic Modulation.
This module is used to align spatial semantics of features in the
multi-depth pyramid. For each but the top-level feature, a stack
of convolutions with level-specific stride are applied to it, matching
its spatial shape and receptive field with the top one.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel numbers of output features tuple.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.spatial_modulation = nn.ModuleList()
for channel in in_channels:
downsample_scale = out_channels // channel
downsample_factor = int(np.log2(downsample_scale))
op = nn.ModuleList()
if downsample_factor < 1:
op = Identity()
else:
for factor in range(downsample_factor):
in_factor = 2**factor
out_factor = 2**(factor + 1)
op.append(
ConvModule(
channel * in_factor,
channel * out_factor, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)))
self.spatial_modulation.append(op)
def forward(self, x):
out = []
for i, _ in enumerate(x):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = x[i]
for op in self.spatial_modulation[i]:
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](x[i]))
return out
class AuxHead(nn.Module):
"""Auxiliary Head.
This auxiliary head is appended to receive stronger supervision,
leading to enhanced semantics.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
loss_weight (float): weight of loss for the auxiliary head.
Default: 0.5.
loss_cls (dict): loss_cls (dict): Config for building loss.
Default: ``dict(type='CrossEntropyLoss')``.
"""
def __init__(self,
in_channels,
out_channels,
loss_weight=0.5,
loss_cls=dict(type='CrossEntropyLoss')):
super().__init__()
self.conv = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc = nn.Linear(in_channels * 2, out_channels)
self.loss_cls = build_loss(loss_cls)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=0.01)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def forward(self, x, target=None):
losses = dict()
if target is None:
return losses
x = self.conv(x)
x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(x)
x = self.fc(x)
if target.shape == torch.Size([]):
target = target.unsqueeze(0)
losses['loss_aux'] = self.loss_weight * self.loss_cls(x, target)
return losses
class TemporalModulation(nn.Module):
"""Temporal Rate Modulation.
The module is used to equip TPN with a similar flexibility for temporal
tempo modulation as in the input-level frame pyramid.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
downsample_scale (int): Downsample scale for maxpooling. Default: 8.
"""
def __init__(self, in_channels, out_channels, downsample_scale=8):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels, (3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
bias=False,
groups=32,
conv_cfg=dict(type='Conv3d'),
act_cfg=None)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1),
(downsample_scale, 1, 1), (0, 0, 0),
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
@NECKS.register_module()
class TPN(nn.Module):
"""TPN neck.
This module is proposed in `Temporal Pyramid Network for Action Recognition
<https://arxiv.org/pdf/2004.03548.pdf>`_
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel number of output feature.
spatial_modulation_cfg (dict | None): Config for spatial modulation
layers. Required keys are `in_channels` and `out_channels`.
Default: None.
temporal_modulation_cfg (dict | None): Config for temporal modulation
layers. Default: None.
upsample_cfg (dict | None): Config for upsample layers. The keys are
same as that in :class:``nn.Upsample``. Default: None.
downsample_cfg (dict | None): Config for downsample layers.
Default: None.
level_fusion_cfg (dict | None): Config for level fusion layers.
Required keys are 'in_channels', 'mid_channels', 'out_channels'.
Default: None.
aux_head_cfg (dict | None): Config for aux head layers.
Required keys are 'out_channels'. Default: None.
flow_type (str): Flow type to combine the features. Options are
'cascade' and 'parallel'. Default: 'cascade'.
"""
def __init__(self,
in_channels,
out_channels,
spatial_modulation_cfg=None,
temporal_modulation_cfg=None,
upsample_cfg=None,
downsample_cfg=None,
level_fusion_cfg=None,
aux_head_cfg=None,
flow_type='cascade'):
super().__init__()
assert isinstance(in_channels, tuple)
assert isinstance(out_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_tpn_stages = len(in_channels)
assert spatial_modulation_cfg is None or isinstance(
spatial_modulation_cfg, dict)
assert temporal_modulation_cfg is None or isinstance(
temporal_modulation_cfg, dict)
assert upsample_cfg is None or isinstance(upsample_cfg, dict)
assert downsample_cfg is None or isinstance(downsample_cfg, dict)
assert aux_head_cfg is None or isinstance(aux_head_cfg, dict)
assert level_fusion_cfg is None or isinstance(level_fusion_cfg, dict)
if flow_type not in ['cascade', 'parallel']:
raise ValueError(
f"flow type in TPN should be 'cascade' or 'parallel', "
f'but got {flow_type} instead.')
self.flow_type = flow_type
self.temporal_modulation_ops = nn.ModuleList()
self.upsample_ops = nn.ModuleList()
self.downsample_ops = nn.ModuleList()
self.level_fusion_1 = LevelFusion(**level_fusion_cfg)
self.spatial_modulation = SpatialModulation(**spatial_modulation_cfg)
for i in range(self.num_tpn_stages):
if temporal_modulation_cfg is not None:
downsample_scale = temporal_modulation_cfg[
'downsample_scales'][i]
temporal_modulation = TemporalModulation(
in_channels[-1], out_channels, downsample_scale)
self.temporal_modulation_ops.append(temporal_modulation)
if i < self.num_tpn_stages - 1:
if upsample_cfg is not None:
upsample = nn.Upsample(**upsample_cfg)
self.upsample_ops.append(upsample)
if downsample_cfg is not None:
downsample = DownSample(out_channels, out_channels,
**downsample_cfg)
self.downsample_ops.append(downsample)
out_dims = level_fusion_cfg['out_channels']
# two pyramids
self.level_fusion_2 = LevelFusion(**level_fusion_cfg)
self.pyramid_fusion = ConvModule(
out_dims * 2,
2048,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if aux_head_cfg is not None:
self.aux_head = AuxHead(self.in_channels[-2], **aux_head_cfg)
else:
self.aux_head = None
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
if self.aux_head is not None:
self.aux_head.init_weights()
def forward(self, x, target=None):
loss_aux = dict()
# Auxiliary loss
if self.aux_head is not None:
loss_aux = self.aux_head(x[-2], target)
# Spatial Modulation
spatial_modulation_outs = self.spatial_modulation(x)
# Temporal Modulation
temporal_modulation_outs = []
for i, temporal_modulation in enumerate(self.temporal_modulation_ops):
temporal_modulation_outs.append(
temporal_modulation(spatial_modulation_outs[i]))
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.upsample_ops) != 0:
for i in range(self.num_tpn_stages - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i])
# Get top-down outs
top_down_outs = self.level_fusion_1(outs)
# Build bottom-up flow using downsample operation
if self.flow_type == 'parallel':
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.downsample_ops) != 0:
for i in range(self.num_tpn_stages - 1):
outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i])
# Get bottom-up outs
botton_up_outs = self.level_fusion_2(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion(
torch.cat([top_down_outs, botton_up_outs], 1))
return outs, loss_aux
| 16,411 | 35.552339 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/roi_extractors/single_straight3d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.utils import import_module_error_class
try:
from mmcv.ops import RoIAlign, RoIPool
except (ImportError, ModuleNotFoundError):
@import_module_error_class('mmcv-full')
class RoIAlign(nn.Module):
pass
@import_module_error_class('mmcv-full')
class RoIPool(nn.Module):
pass
try:
from mmdet.models import ROI_EXTRACTORS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class SingleRoIExtractor3D(nn.Module):
"""Extract RoI features from a single level feature map.
Args:
roi_layer_type (str): Specify the RoI layer type. Default: 'RoIAlign'.
featmap_stride (int): Strides of input feature maps. Default: 16.
output_size (int | tuple): Size or (Height, Width). Default: 16.
sampling_ratio (int): number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
Default: 0.
pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
Default: 'avg'.
aligned (bool): if False, use the legacy implementation in
MMDetection. If True, align the results more perfectly.
Default: True.
with_temporal_pool (bool): if True, avgpool the temporal dim.
Default: True.
with_global (bool): if True, concatenate the RoI feature with global
feature. Default: False.
Note that sampling_ratio, pool_mode, aligned only apply when roi_layer_type
is set as RoIAlign.
"""
def __init__(self,
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=16,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
temporal_pool_mode='avg',
with_global=False):
super().__init__()
self.roi_layer_type = roi_layer_type
assert self.roi_layer_type in ['RoIPool', 'RoIAlign']
self.featmap_stride = featmap_stride
self.spatial_scale = 1. / self.featmap_stride
self.output_size = output_size
self.sampling_ratio = sampling_ratio
self.pool_mode = pool_mode
self.aligned = aligned
self.with_temporal_pool = with_temporal_pool
self.temporal_pool_mode = temporal_pool_mode
self.with_global = with_global
if self.roi_layer_type == 'RoIPool':
self.roi_layer = RoIPool(self.output_size, self.spatial_scale)
else:
self.roi_layer = RoIAlign(
self.output_size,
self.spatial_scale,
sampling_ratio=self.sampling_ratio,
pool_mode=self.pool_mode,
aligned=self.aligned)
self.global_pool = nn.AdaptiveAvgPool2d(self.output_size)
def init_weights(self):
pass
# The shape of feat is N, C, T, H, W
def forward(self, feat, rois):
if not isinstance(feat, tuple):
feat = (feat, )
if len(feat) >= 2:
maxT = max([x.shape[2] for x in feat])
max_shape = (maxT, ) + feat[0].shape[3:]
# resize each feat to the largest shape (w. nearest)
feat = [F.interpolate(x, max_shape).contiguous() for x in feat]
if self.with_temporal_pool:
if self.temporal_pool_mode == 'avg':
feat = [torch.mean(x, 2, keepdim=True) for x in feat]
elif self.temporal_pool_mode == 'max':
feat = [torch.max(x, 2, keepdim=True)[0] for x in feat]
else:
raise NotImplementedError
feat = torch.cat(feat, axis=1).contiguous()
roi_feats = []
for t in range(feat.size(2)):
frame_feat = feat[:, :, t].contiguous()
roi_feat = self.roi_layer(frame_feat, rois)
if self.with_global:
global_feat = self.global_pool(frame_feat.contiguous())
inds = rois[:, 0].type(torch.int64)
global_feat = global_feat[inds]
roi_feat = torch.cat([roi_feat, global_feat], dim=1)
roi_feat = roi_feat.contiguous()
roi_feats.append(roi_feat)
return torch.stack(roi_feats, dim=2), feat
if mmdet_imported:
ROI_EXTRACTORS.register_module()(SingleRoIExtractor3D)
| 4,474 | 33.689922 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/base.py | from abc import ABCMeta, abstractmethod
import torch.nn as nn
class BaseWeightedLoss(nn.Module, metaclass=ABCMeta):
"""Base class for loss.
All subclass should overwrite the ``_forward()`` method which returns the
normal loss without loss weights.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
@abstractmethod
def _forward(self, *args, **kwargs):
pass
def forward(self, *args, **kwargs):
"""Defines the computation performed at every call.
Args:
*args: The positional arguments for the corresponding
loss.
**kwargs: The keyword arguments for the corresponding
loss.
Returns:
torch.Tensor: The calculated loss.
"""
ret = self._forward(*args, **kwargs)
if isinstance(ret, dict):
for k in ret:
if 'loss' in k:
ret[k] *= self.loss_weight
else:
ret *= self.loss_weight
return ret
| 1,181 | 25.266667 | 77 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/margin_loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
def batched_index_select(input, dim, index):
for i in range(1, len(input.shape)):
if i != dim:
index = index.unsqueeze(i)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
@LOSSES.register_module()
class MarginLoss(nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, margin=0.5, alpha1=2, alpha2=0.5, loss_weight=1.):
super().__init__()
self.alpha1 = alpha1
self.alpha2 = alpha2
self.margin = margin
def forward(self, cls_score, labels, bottom_outputs):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
base_loss = F.cross_entropy(cls_score, labels)
if bottom_outputs is None:
loss = base_loss
else:
labels = labels.unsqueeze(1)
outputs = F.softmax(cls_score, dim=-1)
bottom_outputs = F.softmax(bottom_outputs, dim=-1)
topk_prob = batched_index_select(outputs, dim=1, index=labels)
bottom_prob = batched_index_select(bottom_outputs, dim=1, index=labels)
margin_loss = bottom_prob - topk_prob + self.margin
margin_loss = F.relu(margin_loss).mean()
loss = base_loss * self.alpha1 + margin_loss * self.alpha2
return loss | 2,004 | 34.175439 | 91 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/distill_loss.py | import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
@LOSSES.register_module()
class DistillationLoss(nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, distill_type='hard', alpha=0.5, tau=1.0, loss_weight=0.):
super().__init__()
assert distill_type in ['none', 'soft', 'hard']
self.distillation_type = distill_type
self.alpha = alpha
self.tau = tau
def forward(self, cls_score, labels, teacher_outputs):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
if cls_score.size() == labels.size():
# calculate loss for soft label
lsm = F.log_softmax(cls_score, 1)
loss_cls = -(labels * lsm).sum(1)
base_loss = loss_cls.mean()
else:
# calculate loss for hard label
base_loss = F.cross_entropy(cls_score, labels)
# base_loss = F.cross_entropy(cls_score, labels)
if self.distillation_type == 'none':
return base_loss
if self.distillation_type == 'soft':
T = self.tau
distillation_loss = F.kl_div(
F.log_softmax(cls_score / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / cls_score.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(cls_score, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,144 | 34.75 | 91 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/ohem_hinge_loss.py | import torch
class OHEMHingeLoss(torch.autograd.Function):
"""This class is the core implementation for the completeness loss in
paper.
It compute class-wise hinge loss and performs online hard example mining
(OHEM).
"""
@staticmethod
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size):
"""Calculate OHEM hinge loss.
Args:
pred (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
is_positive (int): Set to 1 when proposals are positive and
set to -1 when proposals are incomplete.
ohem_ratio (float): Ratio of hard examples.
group_size (int): Number of proposals sampled per video.
Returns:
torch.Tensor: Returned class-wise hinge loss.
"""
num_samples = pred.size(0)
if num_samples != len(labels):
raise ValueError(f'Number of samples should be equal to that '
f'of labels, but got {num_samples} samples and '
f'{len(labels)} labels.')
losses = torch.zeros(num_samples, device=pred.device)
slopes = torch.zeros(num_samples, device=pred.device)
for i in range(num_samples):
losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1])
slopes[i] = -is_positive if losses[i] != 0 else 0
losses = losses.view(-1, group_size).contiguous()
sorted_losses, indices = torch.sort(losses, dim=1, descending=True)
keep_length = int(group_size * ohem_ratio)
loss = torch.zeros(1, device=pred.device)
for i in range(losses.size(0)):
loss += sorted_losses[i, :keep_length].sum()
ctx.loss_index = indices[:, :keep_length]
ctx.labels = labels
ctx.slopes = slopes
ctx.shape = pred.size()
ctx.group_size = group_size
ctx.num_groups = losses.size(0)
return loss
@staticmethod
def backward(ctx, grad_output):
labels = ctx.labels
slopes = ctx.slopes
grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device)
for group in range(ctx.num_groups):
for idx in ctx.loss_index[group]:
loc = idx + group * ctx.group_size
grad_in[loc, labels[loc] - 1] = (
slopes[loc] * grad_output.data[0])
return torch.autograd.Variable(grad_in), None, None, None, None
| 2,497 | 37.430769 | 77 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/bmn_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .binary_logistic_regression_loss import binary_logistic_regression_loss
@LOSSES.register_module()
class BMNLoss(nn.Module):
"""BMN Loss.
From paper https://arxiv.org/abs/1907.09702,
code https://github.com/JJBOY/BMN-Boundary-Matching-Network.
It will calculate loss for BMN Model. This loss is a weighted sum of
1) temporal evaluation loss based on confidence score of start and
end positions.
2) proposal evaluation regression loss based on confidence scores of
candidate proposals.
3) proposal evaluation classification loss based on classification
results of candidate proposals.
"""
@staticmethod
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
@staticmethod
def pem_reg_loss(pred_score,
gt_iou_map,
mask,
high_temporal_iou_threshold=0.7,
low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evalutaion regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) &
(gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) &
(gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(
loss * torch.ones_like(weights)) / torch.sum(weights)
return loss
@staticmethod
def pem_cls_loss(pred_score,
gt_iou_map,
mask,
threshold=0.9,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evalutaion classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
def forward(self,
pred_bm,
pred_start,
pred_end,
gt_iou_map,
gt_start,
gt_end,
bm_mask,
weight_tem=1.0,
weight_pem_reg=10.0,
weight_pem_cls=1.0):
"""Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
"""
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end)
loss = (
weight_tem * tem_loss + weight_pem_reg * pem_reg_loss +
weight_pem_cls * pem_cls_loss)
return loss, tem_loss, pem_reg_loss, pem_cls_loss
| 7,173 | 38.635359 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/nll_loss.py | import torch.nn.functional as F
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class NLLLoss(BaseWeightedLoss):
"""NLL Loss.
It will calculate NLL loss given cls_score and label.
"""
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate nll loss.
Returns:
torch.Tensor: The returned nll loss.
"""
loss_cls = F.nll_loss(cls_score, label, **kwargs)
return loss_cls
| 688 | 24.518519 | 74 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/hvu_loss.py | import torch
import torch.nn.functional as F
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class HVULoss(BaseWeightedLoss):
"""Calculate the BCELoss for HVU.
Args:
categories (tuple[str]): Names of tag categories, tags are organized in
this order. Default: ['action', 'attribute', 'concept', 'event',
'object', 'scene'].
category_nums (tuple[int]): Number of tags for each category. Default:
(739, 117, 291, 69, 1678, 248).
category_loss_weights (tuple[float]): Loss weights of categories, it
applies only if `loss_type == 'individual'`. The loss weights will
be normalized so that the sum equals to 1, so that you can give any
positive number as loss weight. Default: (1, 1, 1, 1, 1, 1).
loss_type (str): The loss type we calculate, we can either calculate
the BCELoss for all tags, or calculate the BCELoss for tags in each
category. Choices are 'individual' or 'all'. Default: 'all'.
with_mask (bool): Since some tag categories are missing for some video
clips. If `with_mask == True`, we will not calculate loss for these
missing categories. Otherwise, these missing categories are treated
as negative samples.
reduction (str): Reduction way. Choices are 'mean' or 'sum'. Default:
'mean'.
loss_weight (float): The loss weight. Default: 1.0.
"""
def __init__(self,
categories=('action', 'attribute', 'concept', 'event',
'object', 'scene'),
category_nums=(739, 117, 291, 69, 1678, 248),
category_loss_weights=(1, 1, 1, 1, 1, 1),
loss_type='all',
with_mask=False,
reduction='mean',
loss_weight=1.0):
super().__init__(loss_weight)
self.categories = categories
self.category_nums = category_nums
self.category_loss_weights = category_loss_weights
assert len(self.category_nums) == len(self.category_loss_weights)
for category_loss_weight in self.category_loss_weights:
assert category_loss_weight >= 0
self.loss_type = loss_type
self.with_mask = with_mask
self.reduction = reduction
self.category_startidx = [0]
for i in range(len(self.category_nums) - 1):
self.category_startidx.append(self.category_startidx[-1] +
self.category_nums[i])
assert self.loss_type in ['individual', 'all']
assert self.reduction in ['mean', 'sum']
def _forward(self, cls_score, label, mask, category_mask):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
mask (torch.Tensor): The mask of tags. 0 indicates that the
category of this tag is missing in the label of the video.
category_mask (torch.Tensor): The category mask. For each sample,
it's a tensor with length `len(self.categories)`, denotes that
if the category is labeled for this video.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if self.loss_type == 'all':
loss_cls = F.binary_cross_entropy_with_logits(
cls_score, label, reduction='none')
if self.with_mask:
w_loss_cls = mask * loss_cls
w_loss_cls = torch.sum(w_loss_cls, dim=1)
if self.reduction == 'mean':
w_loss_cls = w_loss_cls / torch.sum(mask, dim=1)
w_loss_cls = torch.mean(w_loss_cls)
return dict(loss_cls=w_loss_cls)
if self.reduction == 'sum':
loss_cls = torch.sum(loss_cls, dim=-1)
return dict(loss_cls=torch.mean(loss_cls))
if self.loss_type == 'individual':
losses = {}
loss_weights = {}
for name, num, start_idx in zip(self.categories,
self.category_nums,
self.category_startidx):
category_score = cls_score[:, start_idx:start_idx + num]
category_label = label[:, start_idx:start_idx + num]
category_loss = F.binary_cross_entropy_with_logits(
category_score, category_label, reduction='none')
if self.reduction == 'mean':
category_loss = torch.mean(category_loss, dim=1)
elif self.reduction == 'sum':
category_loss = torch.sum(category_loss, dim=1)
idx = self.categories.index(name)
if self.with_mask:
category_mask_i = category_mask[:, idx].reshape(-1)
# there should be at least one sample which contains tags
# in thie category
if torch.sum(category_mask_i) < 0.5:
losses[f'{name}_LOSS'] = torch.tensor(.0).cuda()
loss_weights[f'{name}_LOSS'] = .0
continue
category_loss = torch.sum(category_loss * category_mask_i)
category_loss = category_loss / torch.sum(category_mask_i)
else:
category_loss = torch.mean(category_loss)
# We name the loss of each category as 'LOSS', since we only
# want to monitor them, not backward them. We will also provide
# the loss used for backward in the losses dictionary
losses[f'{name}_LOSS'] = category_loss
loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx]
loss_weight_sum = sum(loss_weights.values())
loss_weights = {
k: v / loss_weight_sum
for k, v in loss_weights.items()
}
loss_cls = sum([losses[k] * loss_weights[k] for k in losses])
losses['loss_cls'] = loss_cls
# We also trace the loss weights
losses.update({
k + '_weight': torch.tensor(v).to(losses[k].device)
for k, v in loss_weights.items()
})
# Note that the loss weights are just for reference.
return losses
else:
raise ValueError("loss_type should be 'all' or 'individual', "
f'but got {self.loss_type}')
| 6,676 | 46.021127 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/binary_logistic_regression_loss.py | import torch
import torch.nn as nn
from ..builder import LOSSES
def binary_logistic_regression_loss(reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Binary Logistic Regression Loss."""
label = label.view(-1).to(reg_score.device)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float().to(reg_score.device)
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
# clip ratio value between ratio_range
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (
1.0 - pmask) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
| 2,061 | 32.258065 | 75 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/ssn_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .ohem_hinge_loss import OHEMHingeLoss
@LOSSES.register_module()
class SSNLoss(nn.Module):
@staticmethod
def activity_loss(activity_score, labels, activity_indexer):
"""Activity Loss.
It will calculate activity loss given activity_score and label.
Args:
activity_score (torch.Tensor): Predicted activity score.
labels (torch.Tensor): Groundtruth class label.
activity_indexer (torch.Tensor): Index slices of proposals.
Returns:
torch.Tensor: Returned cross entropy loss.
"""
pred = activity_score[activity_indexer, :]
gt = labels[activity_indexer]
return F.cross_entropy(pred, gt)
@staticmethod
def completeness_loss(completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=0.17):
"""Completeness Loss.
It will calculate completeness loss given completeness_score and label.
Args:
completeness_score (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
completeness_indexer (torch.Tensor): Index slices of positive and
incomplete proposals.
positive_per_video (int): Number of positive proposals sampled
per video.
incomplete_per_video (int): Number of incomplete proposals sampled
pre video.
ohem_ratio (float): Ratio of online hard example mining.
Default: 0.17.
Returns:
torch.Tensor: Returned class-wise completeness loss.
"""
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video,
pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1),
-1, ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
return ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
@staticmethod
def classwise_regression_loss(bbox_pred, labels, bbox_targets,
regression_indexer):
"""Classwise Regression Loss.
It will calculate classwise_regression loss given
class_reg_pred and targets.
Args:
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
regression_indexer (torch.Tensor): Index slices of
positive proposals.
Returns:
torch.Tensor: Returned class-wise regression loss.
"""
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat(
(torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
loss = F.smooth_l1_loss(
classwise_reg_pred.view(-1), reg_target.view(-1)) * 2
return loss
def forward(self, activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg):
"""Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
"""
self.sampler = train_cfg.ssn.sampler
self.loss_weight = train_cfg.ssn.loss_weight
losses = dict()
proposal_type = proposal_type.view(-1)
labels = labels.view(-1)
activity_indexer = ((proposal_type == 0) +
(proposal_type == 2)).nonzero().squeeze(1)
completeness_indexer = ((proposal_type == 0) +
(proposal_type == 1)).nonzero().squeeze(1)
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
positive_per_video = int(self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
incomplete_per_video = (
self.sampler.num_per_video - positive_per_video -
background_per_video)
losses['loss_activity'] = self.activity_loss(activity_score, labels,
activity_indexer)
losses['loss_completeness'] = self.completeness_loss(
completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=positive_per_video / incomplete_per_video)
losses['loss_completeness'] *= self.loss_weight.comp_loss_weight
if bbox_pred is not None:
regression_indexer = (proposal_type == 0).nonzero().squeeze(1)
bbox_targets = bbox_targets.view(-1, 2)
losses['loss_reg'] = self.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
losses['loss_reg'] *= self.loss_weight.reg_loss_weight
return losses
| 7,274 | 39.416667 | 102 | py |
STTS | STTS-main/VideoSwin/mmaction/models/losses/cross_entropy_loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class CrossEntropyLoss(BaseWeightedLoss):
"""Cross Entropy Loss.
Support two kinds of labels and their corresponding loss type. It's worth
mentioning that loss type will be detected by the shape of ``cls_score``
and ``label``.
1) Hard label: This label is an integer array and all of the elements are
in the range [0, num_classes - 1]. This label's shape should be
``cls_score``'s shape with the `num_classes` dimension removed.
2) Soft label(probablity distribution over classes): This label is a
probability distribution and all of the elements are in the range
[0, 1]. This label's shape must be the same as ``cls_score``. For now,
only 2-dim soft label is supported.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
class_weight (list[float] | None): Loss weight for each class. If set
as None, use the same weight 1 for all classes. Only applies
to CrossEntropyLoss and BCELossWithLogits (should not be set when
using other losses). Default: None.
"""
def __init__(self, loss_weight=1.0, class_weight=None):
super().__init__(loss_weight=loss_weight)
self.class_weight = None
if class_weight is not None:
self.class_weight = torch.Tensor(class_weight)
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if cls_score.size() == label.size():
# calculate loss for soft label
assert cls_score.dim() == 2, 'Only support 2-dim soft label'
assert len(kwargs) == 0, \
('For now, no extra args are supported for soft label, '
f'but get {kwargs}')
lsm = F.log_softmax(cls_score, 1)
if self.class_weight is not None:
lsm = lsm * self.class_weight.unsqueeze(0)
loss_cls = -(label * lsm).sum(1)
# default reduction 'mean'
if self.class_weight is not None:
# Use weighted average as pytorch CrossEntropyLoss does.
# For more information, please visit https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html # noqa
loss_cls = loss_cls.sum() / torch.sum(
self.class_weight.unsqueeze(0) * label)
else:
loss_cls = loss_cls.mean()
else:
# calculate loss for hard label
if self.class_weight is not None:
assert 'weight' not in kwargs, \
"The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.cross_entropy(cls_score, label, **kwargs)
return loss_cls
@LOSSES.register_module()
class BCELossWithLogits(BaseWeightedLoss):
"""Binary Cross Entropy Loss with logits.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
class_weight (list[float] | None): Loss weight for each class. If set
as None, use the same weight 1 for all classes. Only applies
to CrossEntropyLoss and BCELossWithLogits (should not be set when
using other losses). Default: None.
"""
def __init__(self, loss_weight=1.0, class_weight=None):
super().__init__(loss_weight=loss_weight)
self.class_weight = None
if class_weight is not None:
self.class_weight = torch.Tensor(class_weight)
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
"""
if self.class_weight is not None:
assert 'weight' not in kwargs, "The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.binary_cross_entropy_with_logits(cls_score, label,
**kwargs)
return loss_cls
| 4,774 | 39.12605 | 132 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/mobilenet_v2.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number down to the nearest value that can
be divisible by the divisor.
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int, optional): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float, optional): The minimum ratio of the rounded channel
number to the original channel number. Default: 0.9.
Returns:
int: The modified output channel number
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(nn.Module):
"""MobileNetV2 backbone.
Args:
pretrained (str | None): Name of pretrained model. Default: None.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
pretrained=None,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU6', inplace=True),
norm_eval=False,
with_cp=False):
super().__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self):
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 10,933 | 35.691275 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/c3d.py | import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init, normal_init
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
@BACKBONES.register_module()
class C3D(nn.Module):
"""C3D backbone.
Args:
pretrained (str | None): Name of pretrained model.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config dict for convolution layer.
If set to None, it uses ``dict(type='Conv3d')`` to construct
layers. Default: None.
norm_cfg (dict | None): Config for norm layers. required keys are
``type``, Default: None.
act_cfg (dict | None): Config dict for activation layer. If set to
None, it uses ``dict(type='ReLU')`` to construct layers.
Default: None.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation of fc layers. Default: 0.01.
"""
def __init__(self,
pretrained=None,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
dropout_ratio=0.5,
init_std=0.005):
super().__init__()
if conv_cfg is None:
conv_cfg = dict(type='Conv3d')
if act_cfg is None:
act_cfg = dict(type='ReLU')
self.pretrained = pretrained
self.style = style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.dropout_ratio = dropout_ratio
self.init_std = init_std
c3d_conv_param = dict(
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv1a = ConvModule(3, 64, **c3d_conv_param)
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2a = ConvModule(64, 128, **c3d_conv_param)
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = ConvModule(128, 256, **c3d_conv_param)
self.conv3b = ConvModule(256, 256, **c3d_conv_param)
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = ConvModule(256, 512, **c3d_conv_param)
self.conv4b = ConvModule(512, 512, **c3d_conv_param)
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = ConvModule(512, 512, **c3d_conv_param)
self.conv5b = ConvModule(512, 512, **c3d_conv_param)
self.pool5 = nn.MaxPool3d(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=self.dropout_ratio)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
the size of x is (num_batches, 3, 16, 112, 112).
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1a(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.pool2(x)
x = self.conv3a(x)
x = self.conv3b(x)
x = self.pool3(x)
x = self.conv4a(x)
x = self.conv4b(x)
x = self.pool4(x)
x = self.conv5a(x)
x = self.conv5b(x)
x = self.pool5(x)
x = x.flatten(start_dim=1)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
return x
| 4,771 | 33.085714 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/checkpoint.py | # Copyright (c) OpenMMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import re
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
import mmcv
from mmcv.fileio import FileClient
from mmcv.fileio import load as load_file
from mmcv.parallel import is_module_wrapper
from mmcv.utils import mkdir_or_exist
from mmcv.runner.dist_utils import get_dist_info
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
class CheckpointLoader:
"""A general checkpoint loader to manage all schemes."""
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if (prefix not in cls._schemes) or force:
cls._schemes[prefix] = loader
else:
raise KeyError(
f'{prefix} is already registered as a loader backend, '
'add "force=True" if you want to override it')
# sort, longer prefixes take priority
cls._schemes = OrderedDict(
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
@classmethod
def register_scheme(cls, prefixes, loader=None, force=False):
"""Register a loader to CheckpointLoader.
This method can be used as a normal class method or a decorator.
Args:
prefixes (str or list[str] or tuple[str]):
The prefix of the registered loader.
loader (function, optional): The loader function to be registered.
When this method is used as a decorator, loader is None.
Defaults to None.
force (bool, optional): Whether to override the loader
if the prefix has already been registered. Defaults to False.
"""
if loader is not None:
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
@classmethod
def _get_checkpoint_loader(cls, path):
"""Finds a loader that supports the given path. Falls back to the local
loader if no other loader is found.
Args:
path (str): checkpoint path
Returns:
loader (function): checkpoint loader
"""
for p in cls._schemes:
if path.startswith(p):
return cls._schemes[p]
@classmethod
def load_checkpoint(cls, filename, map_location=None, logger=None):
"""load checkpoint through URL scheme path.
Args:
filename (str): checkpoint file name with given prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
logger (:mod:`logging.Logger`, optional): The logger for message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint_loader = cls._get_checkpoint_loader(filename)
class_name = checkpoint_loader.__name__
mmcv.print_log(f'Use {class_name} loader', logger)
return checkpoint_loader(filename, map_location)
@CheckpointLoader.register_scheme(prefixes='')
def load_from_local(filename, map_location):
"""load checkpoint by local file path.
Args:
filename (str): local checkpoint file path
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
def load_from_http(filename, map_location=None, model_dir=None):
"""load checkpoint through HTTP or HTTPS scheme path. In distributed
setting, this function only download checkpoint at local rank 0.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
model_dir (string, optional): directory in which to save the object,
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='pavi://')
def load_from_pavi(filename, map_location=None):
"""load checkpoint through the file path prefixed with pavi. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with pavi prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
assert filename.startswith('pavi://'), \
f'Expected filename startswith `pavi://`, but get {filename}'
model_path = filename[7:]
try:
from pavi import modelcloud
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='s3://')
def load_from_ceph(filename, map_location=None, backend='ceph'):
"""load checkpoint through the file path prefixed with s3. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with s3 prefix
map_location (str, optional): Same as :func:`torch.load`.
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb". Default: 'ceph'
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
def load_from_torchvision(filename, map_location=None):
"""load checkpoint through the file path prefixed with modelzoo or
torchvision.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_torchvision_models()
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_name = filename[11:]
else:
model_name = filename[14:]
return load_from_http(model_urls[model_name], map_location=map_location)
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
def load_from_openmmlab(filename, map_location=None):
"""load checkpoint through the file path prefixed with open-mmlab or
openmmlab.
Args:
filename (str): checkpoint file path with open-mmlab or
openmmlab prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_external_models()
prefix_str = 'open-mmlab://'
if filename.startswith(prefix_str):
model_name = filename[13:]
else:
model_name = filename[12:]
prefix_str = 'openmmlab://'
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
f'of {prefix_str}{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_from_http(model_url, map_location=map_location)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='mmcls://')
def load_from_mmcls(filename, map_location=None):
"""load checkpoint through the file path prefixed with mmcls.
Args:
filename (str): checkpoint file path with mmcls prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_from_http(
model_urls[model_name], map_location=map_location)
checkpoint = _process_mmcls_checkpoint(checkpoint)
return checkpoint
def _load_checkpoint(filename, map_location=None, logger=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str, optional): Same as :func:`torch.load`.
Default: None.
logger (:mod:`logging.Logger`, optional): The logger for error message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
"""Load partial pretrained model with specific prefix.
Args:
prefix (str): The prefix of sub-module.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location=map_location)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if not prefix.endswith('.'):
prefix += '.'
prefix_len = len(prefix)
state_dict = {
k[prefix_len:]: v
for k, v in state_dict.items() if k.startswith(prefix)
}
assert state_dict, f'{prefix} is not in the pretrained model'
return state_dict
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('backbone.', '')
new_state_dict[name] = v
state_dict = new_state_dict
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
# Keep metadata in state_dict
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi import exception
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except exception.NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| 24,129 | 34.021771 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet3d_csn.py | import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.utils import _BatchNorm
from ..builder import BACKBONES
from .resnet3d import Bottleneck3d, ResNet3d
class CSNBottleneck3d(Bottleneck3d):
"""Channel-Separated Bottleneck Block.
This module is proposed in
"Video Classification with Channel-Separated Convolutional Networks"
Link: https://arxiv.org/pdf/1711.11248.pdf
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ir'.
args (position arguments): Position arguments for Bottleneck.
kwargs (dict, optional): Keyword arguments for Bottleneck.
"""
def __init__(self,
inplanes,
planes,
*args,
bottleneck_mode='ir',
**kwargs):
super(CSNBottleneck3d, self).__init__(inplanes, planes, *args,
**kwargs)
self.bottleneck_mode = bottleneck_mode
conv2 = []
if self.bottleneck_mode == 'ip':
conv2.append(
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False))
conv2_kernel_size = self.conv2.conv.kernel_size
conv2_stride = self.conv2.conv.stride
conv2_padding = self.conv2.conv.padding
conv2_dilation = self.conv2.conv.dilation
conv2_bias = bool(self.conv2.conv.bias)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=conv2_stride,
padding=conv2_padding,
dilation=conv2_dilation,
bias=conv2_bias,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
groups=planes)
conv2.append(self.conv2)
self.conv2 = nn.Sequential(*conv2)
@BACKBONES.register_module()
class ResNet3dCSN(ResNet3d):
"""ResNet backbone for CSN.
Args:
depth (int): Depth of ResNetCSN, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
temporal_strides (tuple[int]):
Temporal strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
conv1_kernel (tuple[int]): Kernel size of the first conv layer.
Default: (3, 7, 7).
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
norm_cfg (dict): Config for norm layers. required keys are `type` and
`requires_grad`.
Default: dict(type='BN3d', requires_grad=True, eps=1e-3).
inflate_style (str): `3x1x1` or `3x3x3`. which determines the kernel
sizes and padding strides for conv1 and conv2 in each block.
Default: '3x3x3'.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ip'.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
depth,
pretrained,
temporal_strides=(1, 2, 2, 2),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
norm_cfg=dict(type='BN3d', requires_grad=True, eps=1e-3),
inflate_style='3x3x3',
bottleneck_mode='ir',
bn_frozen=False,
**kwargs):
self.arch_settings = {
# 18: (BasicBlock3d, (2, 2, 2, 2)),
# 34: (BasicBlock3d, (3, 4, 6, 3)),
50: (CSNBottleneck3d, (3, 4, 6, 3)),
101: (CSNBottleneck3d, (3, 4, 23, 3)),
152: (CSNBottleneck3d, (3, 8, 36, 3))
}
self.bn_frozen = bn_frozen
if bottleneck_mode not in ['ip', 'ir']:
raise ValueError(f'Bottleneck mode must be "ip" or "ir",'
f'but got {bottleneck_mode}.')
super(ResNet3dCSN, self).__init__(
depth,
pretrained,
temporal_strides=temporal_strides,
conv1_kernel=conv1_kernel,
conv1_stride_t=conv1_stride_t,
pool1_stride_t=pool1_stride_t,
norm_cfg=norm_cfg,
inflate_style=inflate_style,
bottleneck_mode=bottleneck_mode,
**kwargs)
def train(self, mode=True):
super(ResNet3d, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if self.bn_frozen:
for param in m.parameters():
param.requires_grad = False
| 6,234 | 40.845638 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/swin_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import DropPath, trunc_normal_
# from mmcv.runner import load_checkpoint
from .checkpoint import load_checkpoint
from mmaction.utils import get_root_logger
from ..builder import BACKBONES
from functools import reduce, lru_cache
from operator import mul
from einops import rearrange
from mmaction.models.backbones.topk import PatchNet
import math
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, D, H, W, C)
window_size (tuple[int]): window size
Returns:
windows: (B*num_windows, window_size*window_size, C)
"""
B, D, H, W, C = x.shape
x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)
return windows
def window_reverse(windows, window_size, B, D, H, W):
"""
Args:
windows: (B*num_windows, window_size, window_size, C)
window_size (tuple[int]): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, D, H, W, C)
"""
x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1], window_size[2], -1)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
class WindowAttention3D(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The temporal length, height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wd, Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1), num_heads)) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, N, N) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index[:N, :N].reshape(-1)].reshape(
N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock3D(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, num_heads, window_size=(2,7,7), shift_size=(0,0,0),
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_checkpoint=False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint=use_checkpoint
assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention3D(
dim, window_size=self.window_size, num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward_part1(self, x, mask_matrix):
B, D, H, W, C = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)
x = self.norm1(x)
# pad feature maps to multiples of window size
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0]
pad_b = (window_size[1] - H % window_size[1]) % window_size[1]
pad_r = (window_size[2] - W % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))
_, Dp, Hp, Wp, _ = x.shape
# cyclic shift
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, window_size) # B*nW, Wd*Wh*Ww, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # B*nW, Wd*Wh*Ww, C
# merge windows
attn_windows = attn_windows.view(-1, *(window_size+(C,)))
shifted_x = window_reverse(attn_windows, window_size, B, Dp, Hp, Wp) # B D' H' W' C
# reverse cyclic shift
if any(i > 0 for i in shift_size):
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
else:
x = shifted_x
if pad_d1 >0 or pad_r > 0 or pad_b > 0:
x = x[:, :D, :H, :W, :].contiguous()
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
mask_matrix: Attention mask for cyclic shift.
"""
shortcut = x
if self.use_checkpoint:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.use_checkpoint:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
"""
B, D, H, W, C = x.shape
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C
x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C
x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C
x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
# cache each stage results
@lru_cache()
def compute_mask(D, H, W, window_size, shift_size, device):
img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1
cnt = 0
for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0],None):
for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1],None):
for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2],None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1
mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (tuple[int]): Local window size. Default: (1,7,7).
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
"""
def __init__(self,
dim,
depth,
num_heads,
window_size=(2,7,7),
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False):
super().__init__()
self.window_size = window_size
self.shift_size = tuple(i // 2 for i in window_size)
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock3D(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=(0,0,0) if (i % 2 == 0) else self.shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
)
for i in range(depth)])
self.downsample = downsample
if self.downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, C, D, H, W).
"""
# calculate attention mask for SW-MSA
B, C, D, H, W = x.shape
window_size, shift_size = get_window_size((D,H,W), self.window_size, self.shift_size)
x = rearrange(x, 'b c d h w -> b d h w c')
Dp = int(np.ceil(D / window_size[0])) * window_size[0]
Hp = int(np.ceil(H / window_size[1])) * window_size[1]
Wp = int(np.ceil(W / window_size[2])) * window_size[2]
attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(B, D, H, W, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, 'b d h w c -> b c d h w')
return x
class PatchEmbed3D(nn.Module):
""" Video to Patch Embedding.
Args:
patch_size (int): Patch token size. Default: (2,4,4).
in_chans (int): Number of input video channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, num_frames=32, image_size=224, patch_size=(2,4,4), in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.num_patches = (num_frames // patch_size[0], image_size // patch_size[1], image_size // patch_size[2])
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, D, H, W = x.size()
if W % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))
if H % self.patch_size[1] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))
if D % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0]))
x = self.proj(x) # B C D Wh Ww
if self.norm is not None:
D, Wh, Ww = x.size(2), x.size(3), x.size(4)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww)
return x
@BACKBONES.register_module()
class SwinTransformer3D(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
patch_size (int | tuple(int)): Patch size. Default: (4,4,4).
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer: Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
"""
def __init__(self,
pretrained=None,
pretrained2d=True,
num_frames=32,
image_size=224,
patch_size=(4,4,4),
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(2,7,7),
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
patch_norm=False,
frozen_stages=-1,
use_checkpoint=False,
time_pruning_loc=None,
time_left_ratio=[0.5],
time_score='tconv',
space_pruning_loc=None,
space_left_ratio=[0.5],
space_score='spool',
sigma=0.05):
super().__init__()
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.frozen_stages = frozen_stages
self.window_size = window_size
self.patch_size = patch_size
# split image into non-overlapping patches
self.patch_embed = PatchEmbed3D(
num_frames=num_frames, image_size=image_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
temporal_size = num_patches[0]
spatial_size = num_patches[1] * num_patches[2]
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.time_pruning_loc = time_pruning_loc
time_left_ratio = time_left_ratio
time_score = time_score
self.space_pruning_loc = space_pruning_loc
space_left_ratio = space_left_ratio
space_score = space_score
self.sigma_max = sigma
self.sigma = sigma
# build layers
out_dims = []
self.layers = nn.ModuleList()
embedding_temporal_size = temporal_size
embedding_spatial_size = spatial_size
time_score_predictor = nn.ModuleList()
space_score_predictor = nn.ModuleList()
s_count = 0
t_count = 0
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2**i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if i_layer<self.num_layers-1 else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
out_dims.append(int(embed_dim * 2**i_layer))
if self.time_pruning_loc is not None and i_layer in self.time_pruning_loc:
left_frames = int(embedding_temporal_size * time_left_ratio[t_count])
t_count += 1
patchnet = PatchNet(score=time_score, k=left_frames, in_channels = int(embed_dim * 2**i_layer))
time_score_predictor.append(patchnet)
embedding_temporal_size = left_frames
if self.space_pruning_loc is not None and i_layer in self.space_pruning_loc:
left_patches = int(embedding_spatial_size * space_left_ratio[s_count])
s_count += 1
patchnet = PatchNet(score=space_score, k=left_patches, in_channels = int(embed_dim * 2**i_layer))
space_score_predictor.append(patchnet)
embedding_spatial_size = left_patches
embedding_spatial_size = embedding_spatial_size // 4
if len(time_score_predictor) > 0:
self.time_score_predictor = time_score_predictor
if len(space_score_predictor) > 0:
self.space_score_predictor = space_score_predictor
self.num_features = int(embed_dim * 2**(self.num_layers-1))
# add a norm layer for each output
self.norm = norm_layer(self.num_features)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1:
self.pos_drop.eval()
for i in range(0, self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def inflate_weights(self, logger):
"""Inflate the swin2d parameters to swin3d.
The differences between swin3d and swin2d mainly lie in an extra
axis. To utilize the pretrained parameters in 2d model,
the weight of swin2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
checkpoint = torch.load(self.pretrained, map_location='cpu')
state_dict = checkpoint['model']
# delete relative_position_index since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_position_index" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]
for k in attn_mask_keys:
del state_dict[k]
state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).repeat(1,1,self.patch_size[0],1,1) / self.patch_size[0]
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = self.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
L2 = (2*self.window_size[1]-1) * (2*self.window_size[2]-1)
wd = self.window_size[0]
if nH1 != nH2:
logger.warning(f"Error in loading {k}, passing")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(2*self.window_size[1]-1, 2*self.window_size[2]-1),
mode='bicubic')
relative_position_bias_table_pretrained = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
state_dict[k] = relative_position_bias_table_pretrained.repeat(2*wd-1,1)
msg = self.load_state_dict(state_dict, strict=False)
logger.info(msg)
logger.info(f"=> loaded successfully '{self.pretrained}'")
del checkpoint
torch.cuda.empty_cache()
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def update_sigma(self, cur_step, total_steps):
process = cur_step / total_steps
sigma_multiplier = 1 - process
self.sigma = self.sigma_max * sigma_multiplier
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
T = x.size(2)
N = x.size(3) * x.size(4)
# B C T H W
x = self.pos_drop(x)
t_count = 0
s_count = 0
for i, layer in enumerate(self.layers):
if hasattr(self, 'time_score_predictor') and i in self.time_pruning_loc:
x = self.time_score_predictor[t_count](x, 'time', N, T, self.sigma)
T = x.size(2)
t_count += 1
if hasattr(self, 'space_score_predictor') and i in self.space_pruning_loc:
x = self.space_score_predictor[s_count](x, 'space', N, T, self.sigma)
N = x.size(3) * x.size(4)
s_count += 1
# print(i, x.size())
x = layer(x.contiguous())
T = x.size(2)
N = x.size(3) * x.size(4)
x = rearrange(x, 'n c d h w -> n d h w c')
x = self.norm(x)
x = rearrange(x, 'n d h w c -> n c d h w')
return x
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer3D, self).train(mode)
self._freeze_stages() | 30,486 | 40.032301 | 156 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet.py | import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.utils import checkpoint as cp
from ...utils import get_root_logger
from ..builder import BACKBONES
class BasicBlock(nn.Module):
"""Basic block for ResNet.
Args:
inplanes (int): Number of channels for the input in first conv2d layer.
planes (int): Number of channels produced by some norm/conv2d layers.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.style = style
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
assert not with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
Args:
inplanes (int):
Number of channels for the input feature in first conv layer.
planes (int):
Number of channels produced by some norm layes and conv layers
stride (int): Spatial stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.inplanes = inplanes
self.planes = planes
if style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
self.with_cp = with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for ResNet.
Args:
block: (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature in each block.
planes (int): Number of channels for the output feature in each block.
blocks (int): Number of residual blocks.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
dilations (Sequence[int]): Dilation of each stage.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: ``pytorch``.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
partial_bn (bool): Whether to use partial bn. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained=None,
torchvision_pretrain=True,
in_channels=3,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
style='pytorch',
frozen_stages=-1,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
partial_bn=False,
with_cp=False):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.in_channels = in_channels
self.pretrained = pretrained
self.torchvision_pretrain = torchvision_pretrain
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.partial_bn = partial_bn
self.with_cp = with_cp
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@staticmethod
def _load_conv_params(conv, state_dict_tv, module_name_tv,
loaded_param_names):
"""Load the conv parameters of resnet from torchvision.
Args:
conv (nn.Module): The destination conv module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding conv module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
weight_tv_name = module_name_tv + '.weight'
if conv.weight.data.shape == state_dict_tv[weight_tv_name].shape:
conv.weight.data.copy_(state_dict_tv[weight_tv_name])
loaded_param_names.append(weight_tv_name)
if getattr(conv, 'bias') is not None:
bias_tv_name = module_name_tv + '.bias'
if conv.bias.data.shape == state_dict_tv[bias_tv_name].shape:
conv.bias.data.copy_(state_dict_tv[bias_tv_name])
loaded_param_names.append(bias_tv_name)
@staticmethod
def _load_bn_params(bn, state_dict_tv, module_name_tv, loaded_param_names):
"""Load the bn parameters of resnet from torchvision.
Args:
bn (nn.Module): The destination bn module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding bn module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
for param_name, param in bn.named_parameters():
param_tv_name = f'{module_name_tv}.{param_name}'
param_tv = state_dict_tv[param_tv_name]
if param.data.shape == param_tv.shape:
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
for param_name, param in bn.named_buffers():
param_tv_name = f'{module_name_tv}.{param_name}'
# some buffers like num_batches_tracked may not exist
if param_tv_name in state_dict_tv:
param_tv = state_dict_tv[param_tv_name]
if param.data.shape == param_tv.shape:
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
def _load_torchvision_checkpoint(self, logger=None):
"""Initiate the parameters from torchvision pretrained checkpoint."""
state_dict_torchvision = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_torchvision:
state_dict_torchvision = state_dict_torchvision['state_dict']
loaded_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
self._load_conv_params(module.conv, state_dict_torchvision,
original_conv_name, loaded_param_names)
self._load_bn_params(module.bn, state_dict_torchvision,
original_bn_name, loaded_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_torchvision.keys()) - set(loaded_param_names)
if remaining_names:
logger.info(
f'These parameters in pretrained checkpoint are not loaded'
f': {remaining_names}')
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
if self.torchvision_pretrain:
# torchvision's
self._load_torchvision_checkpoint(logger)
else:
# ours
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in self.conv1.modules():
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def _partial_bn(self):
logger = get_root_logger()
logger.info('Freezing BatchNorm2D except the first one.')
count_bn = 0
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if mode and self.partial_bn:
self._partial_bn()
| 21,448 | 35.292724 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet_audio.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.utils import _ntuple
from ...utils import get_root_logger
from ..builder import BACKBONES
class Bottleneck2dAudio(nn.Module):
"""Bottleneck2D block for ResNet2D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
stride (int | tuple[int]): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module): Downsample layer. Default: None.
factorize (bool): Whether to factorize kernel. Default: True.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=2,
dilation=1,
downsample=None,
factorize=True,
norm_cfg=None,
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.factorize = factorize
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.conv1_stride = 1
self.conv2_stride = stride
conv1_kernel_size = (1, 1)
conv1_padding = 0
conv2_kernel_size = (3, 3)
conv2_padding = (dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=conv1_kernel_size,
padding=conv1_padding,
dilation=dilation,
norm_cfg=self.norm_cfg,
bias=False)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=conv2_kernel_size,
stride=stride,
padding=conv2_padding,
dilation=dilation,
bias=False,
conv_cfg=dict(type='ConvAudio') if factorize else dict(
type='Conv'),
norm_cfg=None,
act_cfg=None)
self.conv3 = ConvModule(
2 * planes if factorize else planes,
planes * self.expansion,
kernel_size=1,
bias=False,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetAudio(nn.Module):
"""ResNet 2d audio backbone. Reference:
<https://arxiv.org/abs/2001.08740>`_.
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
pretrained (str | None): Name of pretrained model.
in_channels (int): Channel num of input features. Default: 1.
base_channels (int): Channel num of stem output features. Default: 32.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilations (Sequence[int]): Dilation of each stage.
Default: (1, 1, 1, 1).
conv1_kernel (int): Kernel size of the first conv layer. Default: 9.
conv1_stride (int | tuple[int]): Stride of the first conv layer.
Default: 1.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
factorize (Sequence[int]): factorize Dims of each block for audio.
Default: (1, 1, 0, 0).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
"""
arch_settings = {
# 18: (BasicBlock2dAudio, (2, 2, 2, 2)),
# 34: (BasicBlock2dAudio, (3, 4, 6, 3)),
50: (Bottleneck2dAudio, (3, 4, 6, 3)),
101: (Bottleneck2dAudio, (3, 4, 23, 3)),
152: (Bottleneck2dAudio, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
in_channels=1,
num_stages=4,
base_channels=32,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
conv1_kernel=9,
conv1_stride=1,
frozen_stages=-1,
factorize=(1, 1, 0, 0),
norm_eval=False,
with_cp=False,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
zero_init_residual=True):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.dilations = dilations
self.conv1_kernel = conv1_kernel
self.conv1_stride = conv1_stride
self.frozen_stages = frozen_stages
self.stage_factorization = _ntuple(num_stages)(factorize)
self.norm_eval = norm_eval
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
factorize=self.stage_factorization[i],
norm_cfg=self.norm_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
factorize=1,
norm_cfg=None,
with_cp=False):
"""Build residual layer for ResNetAudio.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
stride (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilation (int): Spacing between kernel elements. Default: 1.
factorize (int | Sequence[int]): Determine whether to factorize
for each block. Default: 1.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
A residual layer for the given config.
"""
factorize = factorize if not isinstance(
factorize, int) else (factorize, ) * blocks
assert len(factorize) == blocks
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
factorize=(factorize[0] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
factorize=(factorize[i] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=self.conv1_stride,
bias=False,
conv_cfg=dict(type='ConvAudio', op='sum'),
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in [self.conv1.conv, self.conv1.bn]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck2dAudio):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 13,252 | 34.435829 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/topk.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import einops
from einops import rearrange
from math import sqrt
class PredictorLG(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, embed_dim=384):
super().__init__()
self.in_conv = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, embed_dim),
nn.GELU()
)
self.out_conv = nn.Sequential(
nn.Linear(embed_dim, embed_dim // 2),
nn.GELU(),
nn.Linear(embed_dim // 2, embed_dim // 4),
nn.GELU(),
nn.Linear(embed_dim // 4, 1)
)
def forward(self, x):
x = self.in_conv(x)
B, N, C = x.size()
local_x = x[:,:, :C//2]
global_x = torch.mean(x[:,:, C//2:], dim=1, keepdim=True)
x = torch.cat([local_x, global_x.expand(B, N, C//2)], dim=-1)
return self.out_conv(x)
def HardTopK(k, x):
topk_results = torch.topk(x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, k
indices = torch.sort(indices, dim=-1).values
return indices
class PerturbedTopK(nn.Module):
def __init__(self, k: int, num_samples: int = 1000):
super(PerturbedTopK, self).__init__()
self.num_samples = num_samples
self.k = k
def __call__(self, x, sigma):
return PerturbedTopKFunction.apply(x, self.k, self.num_samples, sigma)
class PerturbedTopKFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, k: int, num_samples: int = 1000, sigma: float = 0.05):
b, d = x.shape
# for Gaussian: noise and gradient are the same.
noise = torch.normal(mean=0.0, std=1.0, size=(b, num_samples, d)).to(x.device)
perturbed_x = x[:, None, :] + noise * sigma # b, nS, d
topk_results = torch.topk(perturbed_x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, nS, k
indices = torch.sort(indices, dim=-1).values # b, nS, k
# b, nS, k, d
perturbed_output = torch.nn.functional.one_hot(indices, num_classes=d).float()
indicators = perturbed_output.mean(dim=1) # b, k, d
# constants for backward
ctx.k = k
ctx.num_samples = num_samples
ctx.sigma = sigma
# tensors for backward
ctx.perturbed_output = perturbed_output
ctx.noise = noise
return indicators
@staticmethod
def backward(ctx, grad_output):
if grad_output is None:
return tuple([None] * 5)
noise_gradient = ctx.noise
# import pdb; pdb.Pdb(nosigint=True).set_trace()
if ctx.sigma <= 1e-20:
b, _, k, d = ctx.perturbed_output.size()
expected_gradient = torch.zeros(b, k, d).to(grad_output.device)
else:
expected_gradient = (
torch.einsum("bnkd,bnd->bkd", ctx.perturbed_output, noise_gradient)
/ ctx.num_samples
/ (ctx.sigma)
)
grad_input = torch.einsum("bkd,bkd->bd", grad_output, expected_gradient)
return (grad_input,) + tuple([None] * 5)
def batched_index_select(input, dim, index):
for i in range(1, len(input.shape)):
if i != dim:
index = index.unsqueeze(i)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
def extract_patches_from_indices(x, indices):
batch_size, _, channels = x.shape
k = indices.shape[-1]
patches = x
patches = batched_index_select(patches, 1, indices)
patches = patches.contiguous().view(batch_size, k, channels)
return patches
def extract_patches_from_indicators(x, indicators):
indicators = rearrange(indicators, "b d k -> b k d")
patches = torch.bmm(indicators, x)
return patches
def min_max_norm(x):
flatten_score_min = x.min(axis=-1, keepdim=True).values
flatten_score_max = x.max(axis=-1, keepdim=True).values
norm_flatten_score = (x - flatten_score_min) / (flatten_score_max - flatten_score_min + 1e-5)
return norm_flatten_score
class PatchNet(nn.Module):
def __init__(self, score, k, in_channels, stride=None, num_samples=500):
super(PatchNet, self).__init__()
self.k = k
self.anchor_size = int(sqrt(k))
self.stride = stride
self.score = score
self.in_channels = in_channels
self.num_samples = num_samples
if score == 'tpool':
self.score_network = PredictorLG(embed_dim=2*in_channels)
elif score == 'spatch':
self.score_network = PredictorLG(embed_dim=in_channels)
self.init = torch.eye(self.k).unsqueeze(0).unsqueeze(-1).cuda()
def get_indicator(self, scores, k, sigma):
indicator = PerturbedTopKFunction.apply(scores, k, self.num_samples, sigma)
indicator = einops.rearrange(indicator, "b k d -> b d k")
return indicator
def get_indices(self, scores, k):
indices = HardTopK(k, scores)
return indices
def generate_random_indices(self, b, n, k):
indices = []
for _ in range(b):
indice = np.sort(np.random.choice(n, k, replace=False))
indices.append(indice)
indices = np.vstack(indices)
indices = torch.Tensor(indices).long().cuda()
return indices
def generate_uniform_indices(self, b, n, k):
indices = torch.linspace(0, n-1, steps=k).long()
indices = indices.unsqueeze(0).cuda()
indices = indices.repeat(b, 1)
return indices
def forward(self, x, type, N, T, sigma):
B = x.size(0)
H = W = int(sqrt(N))
indicator = None
indices = None
if type == 'time':
if self.score == 'tpool':
x = rearrange(x, 'b c t h w -> b t (h w) c')
avg = torch.mean(x, dim=2, keepdim=False)
max_ = torch.max(x, dim=2).values
x_ = torch.cat((avg, max_), dim=2)
scores = self.score_network(x_).squeeze(-1)
scores = min_max_norm(scores)
if self.training:
indicator = self.get_indicator(scores, self.k, sigma)
else:
indices = self.get_indices(scores, self.k)
x = rearrange(x, 'b t n c -> b t (n c)')
else:
s = self.stride if self.stride is not None else int(max((H - self.anchor_size) // 2, 1))
if self.score == 'spatch':
x = rearrange(x, 'b c t h w -> (b t) (h w) c')
scores = self.score_network(x)
scores = rearrange(scores, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
scores = F.unfold(scores, kernel_size=self.anchor_size, stride=s)
scores = scores.mean(dim=1)
scores = min_max_norm(scores)
x = rearrange(x, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
x = F.unfold(x, kernel_size=self.anchor_size, stride=s).permute(0, 2, 1).contiguous()
if self.training:
indicator = self.get_indicator(scores, 1, sigma)
else:
indices = self.get_indices(scores, 1)
if self.training:
if indicator is not None:
patches = extract_patches_from_indicators(x, indicator)
elif indices is not None:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (h w c) -> b c k h w', h=H, w=W)
elif self.score == 'spatch':
patches = patches.squeeze(1)
patches = rearrange(patches, '(b t) (c kh kw) -> b c t kh kw', b=B, c=self.in_channels, kh=self.anchor_size)
return patches
else:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (h w c) -> b c k h w', h=H, w=W)
elif self.score == 'spatch':
patches = patches.squeeze(1)
patches = rearrange(patches, '(b t) (c kh kw) -> b c t kh kw', b=B, c=self.in_channels, kh=self.anchor_size)
return patches
| 8,524 | 33.375 | 124 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet3d.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, NonLocal3d, build_activation_layer,
constant_init, kaiming_init)
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.nn.modules.utils import _ntuple, _triple
from ...utils import get_root_logger
from ..builder import BACKBONES
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
from mmdet.models import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class BasicBlock3d(nn.Module):
"""BasicBlock 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False,
**kwargs):
super().__init__()
assert style in ['pytorch', 'caffe']
# make sure that only ``inflate_style`` is passed into kwargs
assert set(kwargs).issubset(['inflate_style'])
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
conv1_kernel_size = (3, 3, 3)
conv1_padding = (1, dilation, dilation)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, 1, 1)
else:
conv1_kernel_size = (1, 3, 3)
conv1_padding = (0, dilation, dilation)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, 1, 1)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes * self.expansion,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv2.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
class Bottleneck3d(nn.Module):
"""Bottleneck 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
inflate_style='3x1x1',
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
assert inflate_style in ['3x1x1', '3x3x3']
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.inflate_style = inflate_style
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
if self.style == 'pytorch':
self.conv1_stride_s = 1
self.conv2_stride_s = spatial_stride
self.conv1_stride_t = 1
self.conv2_stride_t = temporal_stride
else:
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
if inflate_style == '3x1x1':
conv1_kernel_size = (3, 1, 1)
conv1_padding = (1, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
1,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
# No activation in the third ConvModule for bottleneck
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv3.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
@BACKBONES.register_module()
class ResNet3d(nn.Module):
"""ResNet 3d backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
stage_blocks (tuple | None): Set number of stages for each res layer.
Default: None.
pretrained2d (bool): Whether to load pretrained 2D model.
Default: True.
in_channels (int): Channel num of input features. Default: 3.
base_channels (int): Channel num of stem output features. Default: 64.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
temporal_strides (Sequence[int]):
Temporal strides of residual blocks of each stage.
Default: ``(1, 1, 1, 1)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
conv1_kernel (Sequence[int]): Kernel size of the first conv layer.
Default: ``(3, 7, 7)``.
conv1_stride_s (int): Spatial stride of the first conv layer.
Default: 2.
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_s (int): Spatial stride of the first pooling layer.
Default: 2.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
with_pool2 (bool): Whether to use pool2. Default: True.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
inflate (Sequence[int]): Inflate Dims of each block.
Default: (1, 1, 1, 1).
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
arch_settings = {
18: (BasicBlock3d, (2, 2, 2, 2)),
34: (BasicBlock3d, (3, 4, 6, 3)),
50: (Bottleneck3d, (3, 4, 6, 3)),
101: (Bottleneck3d, (3, 4, 23, 3)),
152: (Bottleneck3d, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
stage_blocks=None,
pretrained2d=True,
in_channels=3,
num_stages=4,
base_channels=64,
out_indices=(3, ),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 1, 1, 1),
dilations=(1, 1, 1, 1),
conv1_kernel=(3, 7, 7),
conv1_stride_s=2,
conv1_stride_t=1,
pool1_stride_s=2,
pool1_stride_t=1,
with_pool2=True,
style='pytorch',
frozen_stages=-1,
inflate=(1, 1, 1, 1),
inflate_style='3x1x1',
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
zero_init_residual=True,
**kwargs):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.stage_blocks = stage_blocks
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.spatial_strides = spatial_strides
self.temporal_strides = temporal_strides
self.dilations = dilations
assert len(spatial_strides) == len(temporal_strides) == len(
dilations) == num_stages
if self.stage_blocks is not None:
assert len(self.stage_blocks) == num_stages
self.conv1_kernel = conv1_kernel
self.conv1_stride_s = conv1_stride_s
self.conv1_stride_t = conv1_stride_t
self.pool1_stride_s = pool1_stride_s
self.pool1_stride_t = pool1_stride_t
self.with_pool2 = with_pool2
self.style = style
self.frozen_stages = frozen_stages
self.stage_inflations = _ntuple(num_stages)(inflate)
self.non_local_stages = _ntuple(num_stages)(non_local)
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
if self.stage_blocks is None:
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self.non_local_cfg = non_local_cfg
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
temporal_stride = temporal_strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
non_local=self.non_local_stages[i],
non_local_cfg=self.non_local_cfg,
inflate=self.stage_inflations[i],
inflate_style=self.inflate_style,
with_cp=with_cp,
**kwargs)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides in
residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer, otherwise
the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines
the kernel sizes and padding strides for conv1 and conv2
in each block. Default: '3x1x1'.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
downsample = None
if spatial_stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
downsample=downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
@staticmethod
def _inflate_conv_params(conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
kernel_t = conv3d.weight.data.shape[2]
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
@staticmethod
def _inflate_bn_params(bn3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a norm module from 2d to 3d.
Args:
bn3d (nn.Module): The destination bn3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding bn module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
for param_name, param in bn3d.named_parameters():
param_2d_name = f'{module_name_2d}.{param_name}'
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
for param_name, param in bn3d.named_buffers():
param_2d_name = f'{module_name_2d}.{param_name}'
# some buffers like num_batches_tracked may not exist in old
# checkpoints
if param_2d_name in state_dict_2d:
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
@staticmethod
def _inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
if original_conv_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_conv_name}')
else:
shape_2d = state_dict_r2d[original_conv_name +
'.weight'].shape
shape_3d = module.conv.weight.data.shape
if shape_2d != shape_3d[:2] + shape_3d[3:]:
logger.warning(f'Weight shape mismatch for '
f': {original_conv_name} : '
f'3d weight shape: {shape_3d}; '
f'2d weight shape: {shape_2d}. ')
else:
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
if original_bn_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name,
inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def inflate_weights(self, logger):
self._inflate_weights(self, logger)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=tuple([(k - 1) // 2 for k in _triple(self.conv1_kernel)]),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool3d(
kernel_size=(1, 3, 3),
stride=(self.pool1_stride_t, self.pool1_stride_s,
self.pool1_stride_s),
padding=(0, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
@staticmethod
def _init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch.
Args:
pretrained (str | None): The path of the pretrained weight. Will
override the original `pretrained` if set. The arg is added to
be compatible with mmdet. Default: None.
"""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck3d):
constant_init(m.conv3.bn, 0)
elif isinstance(m, BasicBlock3d):
constant_init(m.conv2.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def init_weights(self, pretrained=None):
self._init_weights(self, pretrained)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i == 0 and self.with_pool2:
x = self.pool2(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNet3dLayer(nn.Module):
"""ResNet 3d Layer.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
pretrained2d (bool): Whether to load pretrained 2D model.
Default: True.
stage (int): The index of Resnet stage. Default: 3.
base_channels (int): Channel num of stem output features. Default: 64.
spatial_stride (int): The 1st res block's spatial stride. Default 2.
temporal_stride (int): The 1st res block's temporal stride. Default 1.
dilation (int): The dilation. Default: 1.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
all_frozen (bool): Frozen all modules in the layer. Default: False.
inflate (int): Inflate Dims of each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
depth,
pretrained,
pretrained2d=True,
stage=3,
base_channels=64,
spatial_stride=2,
temporal_stride=1,
dilation=1,
style='pytorch',
all_frozen=False,
inflate=1,
inflate_style='3x1x1',
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.arch_settings = ResNet3d.arch_settings
assert depth in self.arch_settings
self.make_res_layer = ResNet3d.make_res_layer
self._inflate_conv_params = ResNet3d._inflate_conv_params
self._inflate_bn_params = ResNet3d._inflate_bn_params
self._inflate_weights = ResNet3d._inflate_weights
self._init_weights = ResNet3d._init_weights
self.depth = depth
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.stage = stage
# stage index is 0 based
assert 0 <= stage <= 3
self.base_channels = base_channels
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.all_frozen = all_frozen
self.stage_inflation = inflate
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
block, stage_blocks = self.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = self.make_res_layer(
block,
inplanes,
planes,
stage_block,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
inflate=self.stage_inflation,
inflate_style=self.inflate_style,
with_cp=with_cp,
**kwargs)
self.layer_name = f'layer{stage + 1}'
self.add_module(self.layer_name, res_layer)
def inflate_weights(self, logger):
self._inflate_weights(self, logger)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.all_frozen:
layer = getattr(self, self.layer_name)
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
self._init_weights(self, pretrained)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
res_layer = getattr(self, self.layer_name)
out = res_layer(x)
return out
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(ResNet3dLayer)
MMDET_BACKBONES.register_module()(ResNet3d)
| 40,219 | 38.277344 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet3d_slowfast.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import print_log
from ...utils import get_root_logger
from ..builder import BACKBONES
from .resnet3d import ResNet3d
try:
from mmdet.models import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class ResNet3dPathway(ResNet3d):
"""A pathway of Slowfast based on ResNet3d.
Args:
*args (arguments): Arguments same as :class:``ResNet3d``.
lateral (bool): Determines whether to enable the lateral connection
from another pathway. Default: False.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
``alpha`` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to ``beta`` in the paper.
Default: 8.
fusion_kernel (int): The kernel size of lateral fusion.
Default: 5.
**kwargs (keyword arguments): Keywords arguments for ResNet3d.
"""
def __init__(self,
*args,
lateral=False,
speed_ratio=8,
channel_ratio=8,
fusion_kernel=5,
**kwargs):
self.lateral = lateral
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
self.fusion_kernel = fusion_kernel
super().__init__(*args, **kwargs)
self.inplanes = self.base_channels
if self.lateral:
self.conv1_lateral = ConvModule(
self.inplanes // self.channel_ratio,
# https://arxiv.org/abs/1812.03982, the
# third type of lateral connection has out_channel:
# 2 * \beta * C
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.lateral_connections = []
for i in range(len(self.stage_blocks)):
planes = self.base_channels * 2**i
self.inplanes = planes * self.block.expansion
if lateral and i != self.num_stages - 1:
# no lateral connection needed in final stage
lateral_name = f'layer{(i + 1)}_lateral'
setattr(
self, lateral_name,
ConvModule(
self.inplanes // self.channel_ratio,
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None))
self.lateral_connections.append(lateral_name)
def make_res_layer(self,
block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for Slowfast.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input
feature in each block.
planes (int): Number of channels for the output
feature in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides
in residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer,
otherwise the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines
the kernel sizes and padding strides for conv1 and
conv2 in each block. Default: ``3x1x1``.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for conv layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
if self.lateral:
lateral_inplanes = inplanes * 2 // self.channel_ratio
else:
lateral_inplanes = 0
if (spatial_stride != 1
or (inplanes + lateral_inplanes) != planes * block.expansion):
downsample = ConvModule(
inplanes + lateral_inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
else:
downsample = None
layers = []
layers.append(
block(
inplanes + lateral_inplanes,
planes,
spatial_stride,
temporal_stride,
dilation,
downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
1,
dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d pathway.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart. For pathway the ``lateral_connection`` part should
not be inflated from 2d weights.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if 'lateral' in name:
continue
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
if original_conv_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_conv_name}')
else:
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
if original_bn_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name,
inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
The differences of conv modules betweene 2d and 3d in Pathway
mainly lie in the inplanes due to lateral connections. To fit the
shapes of the lateral connection counterpart, it will expand
parameters by concatting conv2d parameters and extra zero paddings.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
old_shape = conv2d_weight.shape
new_shape = conv3d.weight.data.shape
kernel_t = new_shape[2]
if new_shape[1] != old_shape[1]:
# Inplanes may be different due to lateral connections
new_channels = new_shape[1] - old_shape[1]
pad_shape = old_shape
pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:]
# Expand parameters by concat extra channels
conv2d_weight = torch.cat(
(conv2d_weight,
torch.zeros(pad_shape).type_as(conv2d_weight).to(
conv2d_weight.device)),
dim=1)
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
`self.frozen_stages`."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
if i != len(self.res_layers) and self.lateral:
# No fusion needed in the final stage
lateral_name = self.lateral_connections[i - 1]
conv_lateral = getattr(self, lateral_name)
conv_lateral.eval()
for param in conv_lateral.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if pretrained:
self.pretrained = pretrained
# Override the init_weights of i3d
super().init_weights()
for module_name in self.lateral_connections:
layer = getattr(self, module_name)
for m in layer.modules():
if isinstance(m, (nn.Conv3d, nn.Conv2d)):
kaiming_init(m)
pathway_cfg = {
'resnet3d': ResNet3dPathway,
# TODO: BNInceptionPathway
}
def build_pathway(cfg, *args, **kwargs):
"""Build pathway.
Args:
cfg (None or dict): cfg should contain:
- type (str): identify conv layer type.
Returns:
nn.Module: Created pathway.
"""
if not (isinstance(cfg, dict) and 'type' in cfg):
raise TypeError('cfg must be a dict containing the key "type"')
cfg_ = cfg.copy()
pathway_type = cfg_.pop('type')
if pathway_type not in pathway_cfg:
raise KeyError(f'Unrecognized pathway type {pathway_type}')
pathway_cls = pathway_cfg[pathway_type]
pathway = pathway_cls(*args, **kwargs, **cfg_)
return pathway
@BACKBONES.register_module()
class ResNet3dSlowFast(nn.Module):
"""Slowfast backbone.
This module is proposed in `SlowFast Networks for Video Recognition
<https://arxiv.org/abs/1812.03982>`_
Args:
pretrained (str): The file path to a pretrained model.
resample_rate (int): A large temporal stride ``resample_rate``
on input frames. The actual resample rate is calculated by
multipling the ``interval`` in ``SampleFrames`` in the
pipeline with ``resample_rate``, equivalent to the :math:`\\tau`
in the paper, i.e. it processes only one out of
``resample_rate * interval`` frames. Default: 8.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
:math:`\\alpha` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to :math:`\\beta` in the paper.
Default: 8.
slow_pathway (dict): Configuration of slow branch, should contain
necessary arguments for building the specific type of pathway
and:
type (str): type of backbone the pathway bases on.
lateral (bool): determine whether to build lateral connection
for the pathway.Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=True, depth=50, pretrained=None,
conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1),
conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))
fast_pathway (dict): Configuration of fast branch, similar to
`slow_pathway`. Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=False, depth=50, pretrained=None, base_channels=8,
conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1)
"""
def __init__(self,
pretrained,
resample_rate=8,
speed_ratio=8,
channel_ratio=8,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1)):
super().__init__()
self.pretrained = pretrained
self.resample_rate = resample_rate
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
if slow_pathway['lateral']:
slow_pathway['speed_ratio'] = speed_ratio
slow_pathway['channel_ratio'] = channel_ratio
self.slow_path = build_pathway(slow_pathway)
self.fast_path = build_pathway(fast_pathway)
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = get_root_logger()
msg = f'load model from: {self.pretrained}'
print_log(msg, logger=logger)
# Directly load 3D model.
load_checkpoint(self, self.pretrained, strict=True, logger=logger)
elif self.pretrained is None:
# Init two branch seperately.
self.fast_path.init_weights()
self.slow_path.init_weights()
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
tuple[torch.Tensor]: The feature of the input samples extracted
by the backbone.
"""
x_slow = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / self.resample_rate, 1.0, 1.0))
x_slow = self.slow_path.conv1(x_slow)
x_slow = self.slow_path.maxpool(x_slow)
x_fast = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0,
1.0))
x_fast = self.fast_path.conv1(x_fast)
x_fast = self.fast_path.maxpool(x_fast)
if self.slow_path.lateral:
x_fast_lateral = self.slow_path.conv1_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
for i, layer_name in enumerate(self.slow_path.res_layers):
res_layer = getattr(self.slow_path, layer_name)
x_slow = res_layer(x_slow)
res_layer_fast = getattr(self.fast_path, layer_name)
x_fast = res_layer_fast(x_fast)
if (i != len(self.slow_path.res_layers) - 1
and self.slow_path.lateral):
# No fusion needed in the final stage
lateral_name = self.slow_path.lateral_connections[i]
conv_lateral = getattr(self.slow_path, lateral_name)
x_fast_lateral = conv_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
out = (x_slow, x_fast)
return out
if mmdet_imported:
MMDET_BACKBONES.register_module()(ResNet3dSlowFast)
| 21,060 | 39.424184 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet2plus1d.py | from ..builder import BACKBONES
from .resnet3d import ResNet3d
@BACKBONES.register_module()
class ResNet2Plus1d(ResNet3d):
"""ResNet (2+1)d backbone.
This model is proposed in `A Closer Look at Spatiotemporal Convolutions for
Action Recognition <https://arxiv.org/abs/1711.11248>`_
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.pretrained2d is False
assert self.conv_cfg['type'] == 'Conv2plus1d'
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
# no pool2 in R(2+1)d
x = res_layer(x)
return x
| 1,482 | 28.66 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet_tsm.py | import torch
import torch.nn as nn
from mmcv.cnn import NonLocal3d
from torch.nn.modules.utils import _ntuple
from ..builder import BACKBONES
from .resnet import ResNet
class NL3DWrapper(nn.Module):
"""3D Non-local wrapper for ResNet50.
Wrap ResNet layers with 3D NonLocal modules.
Args:
block (nn.Module): Residual blocks to be built.
num_segments (int): Number of frame segments.
non_local_cfg (dict): Config for non-local layers. Default: ``dict()``.
"""
def __init__(self, block, num_segments, non_local_cfg=dict()):
super(NL3DWrapper, self).__init__()
self.block = block
self.non_local_cfg = non_local_cfg
self.non_local_block = NonLocal3d(self.block.conv3.norm.num_features,
**self.non_local_cfg)
self.num_segments = num_segments
def forward(self, x):
x = self.block(x)
n, c, h, w = x.size()
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2).contiguous()
x = self.non_local_block(x)
x = x.transpose(1, 2).contiguous().view(n, c, h, w)
return x
class TemporalShift(nn.Module):
"""Temporal shift module.
This module is proposed in
`TSM: Temporal Shift Module for Efficient Video Understanding
<https://arxiv.org/abs/1811.08383>`_
Args:
net (nn.module): Module to make temporal shift.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of divisions for shift. Default: 8.
"""
def __init__(self, net, num_segments=3, shift_div=8):
super().__init__()
self.net = net
self.num_segments = num_segments
self.shift_div = shift_div
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.shift(x, self.num_segments, shift_div=self.shift_div)
return self.net(x)
@staticmethod
def shift(x, num_segments, shift_div=3):
"""Perform temporal shift operation on the feature.
Args:
x (torch.Tensor): The input feature to be shifted.
num_segments (int): Number of frame segments.
shift_div (int): Number of divisions for shift. Default: 3.
Returns:
torch.Tensor: The shifted feature.
"""
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, num_segments, C, H*W]
# can't use 5 dimensional array on PPL2D backend for caffe
x = x.view(-1, num_segments, c, h * w)
# get shift fold
fold = c // shift_div
# split c channel into three parts:
# left_split, mid_split, right_split
left_split = x[:, :, :fold, :]
mid_split = x[:, :, fold:2 * fold, :]
right_split = x[:, :, 2 * fold:, :]
# can't use torch.zeros(*A.shape) or torch.zeros_like(A)
# because array on caffe inference must be got by computing
# shift left on num_segments channel in `left_split`
zeros = left_split - left_split
blank = zeros[:, :1, :, :]
left_split = left_split[:, 1:, :, :]
left_split = torch.cat((left_split, blank), 1)
# shift right on num_segments channel in `mid_split`
zeros = mid_split - mid_split
blank = zeros[:, :1, :, :]
mid_split = mid_split[:, :-1, :, :]
mid_split = torch.cat((blank, mid_split), 1)
# right_split: no shift
# concatenate
out = torch.cat((left_split, mid_split, right_split), 2)
# [N, C, H, W]
# restore the original dimension
return out.view(n, c, h, w)
@BACKBONES.register_module()
class ResNetTSM(ResNet):
"""ResNet backbone for TSM.
Args:
num_segments (int): Number of frame segments. Default: 8.
is_shift (bool): Whether to make temporal shift in reset layers.
Default: True.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
shift_div (int): Number of div for shift. Default: 8.
shift_place (str): Places in resnet layers for shift, which is chosen
from ['block', 'blockres'].
If set to 'block', it will apply temporal shift to all child blocks
in each resnet layer.
If set to 'blockres', it will apply temporal shift to each `conv1`
layer of all child blocks in each resnet layer.
Default: 'blockres'.
temporal_pool (bool): Whether to add temporal pooling. Default: False.
**kwargs (keyword arguments, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_shift=True,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
shift_div=8,
shift_place='blockres',
temporal_pool=False,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_shift = is_shift
self.shift_div = shift_div
self.shift_place = shift_place
self.temporal_pool = temporal_pool
self.non_local = non_local
self.non_local_stages = _ntuple(self.num_stages)(non_local)
self.non_local_cfg = non_local_cfg
def make_temporal_shift(self):
"""Make temporal shift for some layers."""
if self.temporal_pool:
num_segment_list = [
self.num_segments, self.num_segments // 2,
self.num_segments // 2, self.num_segments // 2
]
else:
num_segment_list = [self.num_segments] * 4
if num_segment_list[-1] <= 0:
raise ValueError('num_segment_list[-1] must be positive')
if self.shift_place == 'block':
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
blocks[i] = TemporalShift(
b, num_segments=num_segments, shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
elif 'blockres' in self.shift_place:
n_round = 1
if len(list(self.layer3.children())) >= 23:
n_round = 2
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
blocks[i].conv1.conv = TemporalShift(
b.conv1.conv,
num_segments=num_segments,
shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
else:
raise NotImplementedError
def make_temporal_pool(self):
"""Make temporal pooling between layer1 and layer2, using a 3D max
pooling layer."""
class TemporalPool(nn.Module):
"""Temporal pool module.
Wrap layer2 in ResNet50 with a 3D max pooling layer.
Args:
net (nn.Module): Module to make temporal pool.
num_segments (int): Number of frame segments.
"""
def __init__(self, net, num_segments):
super().__init__()
self.net = net
self.num_segments = num_segments
self.max_pool3d = nn.MaxPool3d(
kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
def forward(self, x):
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, C, num_segments, H, W]
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2)
# [N // num_segmnets, C, num_segments // 2, H, W]
x = self.max_pool3d(x)
# [N // 2, C, H, W]
x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w)
return self.net(x)
self.layer2 = TemporalPool(self.layer2, self.num_segments)
def make_non_local(self):
# This part is for ResNet50
for i in range(self.num_stages):
non_local_stage = self.non_local_stages[i]
if sum(non_local_stage) == 0:
continue
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
for idx, non_local in enumerate(non_local_stage):
if non_local:
res_layer[idx] = NL3DWrapper(res_layer[idx],
self.num_segments,
self.non_local_cfg)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super().init_weights()
if self.is_shift:
self.make_temporal_shift()
if len(self.non_local_cfg) != 0:
self.make_non_local()
if self.temporal_pool:
self.make_temporal_pool()
| 10,742 | 35.416949 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/resnet_tin.py | import torch
import torch.nn as nn
from mmaction.utils import import_module_error_func
from ..builder import BACKBONES
from .resnet_tsm import ResNetTSM
try:
from mmcv.ops import tin_shift
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmcv-full')
def tin_shift(*args, **kwargs):
pass
def linear_sampler(data, offset):
"""Differentiable Temporal-wise Frame Sampling, which is essentially a
linear interpolation process.
It gets the feature map which has been split into several groups
and shift them by different offsets according to their groups.
Then compute the weighted sum along with the temporal dimension.
Args:
data (torch.Tensor): Split data for certain group in shape
[N, num_segments, C, H, W].
offset (torch.Tensor): Data offsets for this group data in shape
[N, num_segments].
"""
# [N, num_segments, C, H, W]
n, t, c, h, w = data.shape
# offset0, offset1: [N, num_segments]
offset0 = torch.floor(offset).int()
offset1 = offset0 + 1
# data, data0, data1: [N, num_segments, C, H * W]
data = data.view(n, t, c, h * w).contiguous()
data0 = tin_shift(data, offset0)
data1 = tin_shift(data, offset1)
# weight0, weight1: [N, num_segments]
weight0 = 1 - (offset - offset0.float())
weight1 = 1 - weight0
# weight0, weight1:
# [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C]
group_size = offset.shape[1]
weight0 = weight0[:, :, None].repeat(1, 1, c // group_size)
weight0 = weight0.view(weight0.size(0), -1)
weight1 = weight1[:, :, None].repeat(1, 1, c // group_size)
weight1 = weight1.view(weight1.size(0), -1)
# weight0, weight1: [N, C] -> [N, 1, C, 1]
weight0 = weight0[:, None, :, None]
weight1 = weight1[:, None, :, None]
# output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W]
output = weight0 * data0 + weight1 * data1
output = output.view(n, t, c, h, w)
return output
class CombineNet(nn.Module):
"""Combine Net.
It combines Temporal interlace module with some part of ResNet layer.
Args:
net1 (nn.module): Temporal interlace module.
net2 (nn.module): Some part of ResNet layer.
"""
def __init__(self, net1, net2):
super().__init__()
self.net1 = net1
self.net2 = net2
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# input shape: [num_batches * num_segments, C, H, W]
# output x shape: [num_batches * num_segments, C, H, W]
x = self.net1(x)
# [num_batches * num_segments, C, H, W]
x = self.net2(x)
return x
class WeightNet(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# we set the initial bias of the convolution
# layer to 0, and the final initial output will be 1.0
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate weight
# [N, C, T]
n, _, t = x.shape
# [N, groups, T]
x = self.conv(x)
x = x.view(n, self.groups, t)
# [N, T, groups]
x = x.permute(0, 2, 1)
# scale the output to range (0, 2)
x = 2 * self.sigmoid(x)
# [N, T, groups]
return x
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
# hard code ``kernel_size`` and ``padding`` according to original repo.
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# The bias of the last fc layer is initialized to
# make the post-sigmoid output start from 1
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate offset
# [N, C, T]
n, _, t = x.shape
# [N, 1, T]
x = self.conv(x)
# [N, T]
x = x.view(n, t)
# [N, T]
x = self.relu(self.fc1(x))
# [N, groups]
x = self.fc2(x)
# [N, 1, groups]
x = x.view(n, 1, -1)
# to make sure the output is in (-t/2, t/2)
# where t = num_segments = 8
x = 4 * (self.sigmoid(x) - 0.5)
# [N, 1, groups]
return x
class TemporalInterlace(nn.Module):
"""Temporal interlace module.
This module is proposed in `Temporal Interlacing Network
<https://arxiv.org/abs/2001.06499>`_
Args:
in_channels (int): Channel num of input features.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of division parts for shift. Default: 1.
"""
def __init__(self, in_channels, num_segments=3, shift_div=1):
super().__init__()
self.num_segments = num_segments
self.shift_div = shift_div
self.in_channels = in_channels
# hard code ``deform_groups`` according to original repo.
self.deform_groups = 2
self.offset_net = OffsetNet(in_channels // shift_div,
self.deform_groups, num_segments)
self.weight_net = WeightNet(in_channels // shift_div,
self.deform_groups)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x: [N, C, H, W],
# where N = num_batches x num_segments, C = shift_div * num_folds
n, c, h, w = x.size()
num_batches = n // self.num_segments
num_folds = c // self.shift_div
# x_out: [num_batches x num_segments, C, H, W]
x_out = torch.zeros((n, c, h, w), device=x.device)
# x_descriptor: [num_batches, num_segments, num_folds, H, W]
x_descriptor = x[:, :num_folds, :, :].view(num_batches,
self.num_segments,
num_folds, h, w)
# x should only obtain information on temporal and channel dimensions
# x_pooled: [num_batches, num_segments, num_folds, W]
x_pooled = torch.mean(x_descriptor, 3)
# x_pooled: [num_batches, num_segments, num_folds]
x_pooled = torch.mean(x_pooled, 3)
# x_pooled: [num_batches, num_folds, num_segments]
x_pooled = x_pooled.permute(0, 2, 1).contiguous()
# Calculate weight and bias, here groups = 2
# x_offset: [num_batches, groups]
x_offset = self.offset_net(x_pooled).view(num_batches, -1)
# x_weight: [num_batches, num_segments, groups]
x_weight = self.weight_net(x_pooled)
# x_offset: [num_batches, 2 * groups]
x_offset = torch.cat([x_offset, -x_offset], 1)
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = linear_sampler(x_descriptor, x_offset)
# x_weight: [num_batches, num_segments, groups, 1]
x_weight = x_weight[:, :, :, None]
# x_weight:
# [num_batches, num_segments, groups * 2, c // self.shift_div // 4]
x_weight = x_weight.repeat(1, 1, 2, num_folds // 2 // 2)
# x_weight:
# [num_batches, num_segments, c // self.shift_div = num_folds]
x_weight = x_weight.view(x_weight.size(0), x_weight.size(1), -1)
# x_weight: [num_batches, num_segments, num_folds, 1, 1]
x_weight = x_weight[:, :, :, None, None]
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift * x_weight
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift.contiguous().view(n, num_folds, h, w)
# x_out: [num_batches x num_segments, C, H, W]
x_out[:, :num_folds, :] = x_shift
x_out[:, num_folds:, :] = x[:, num_folds:, :]
return x_out
@BACKBONES.register_module()
class ResNetTIN(ResNetTSM):
"""ResNet backbone for TIN.
Args:
depth (int): Depth of ResNet, from {18, 34, 50, 101, 152}.
num_segments (int): Number of frame segments. Default: 8.
is_tin (bool): Whether to apply temporal interlace. Default: True.
shift_div (int): Number of division parts for shift. Default: 4.
kwargs (dict, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_tin=True,
shift_div=4,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_tin = is_tin
self.shift_div = shift_div
def make_temporal_interlace(self):
"""Make temporal interlace for some layers."""
num_segment_list = [self.num_segments] * 4
assert num_segment_list[-1] > 0
n_round = 1
if len(list(self.layer3.children())) >= 23:
print(f'=> Using n_round {n_round} to insert temporal shift.')
def make_block_interlace(stage, num_segments, shift_div):
"""Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
tds = TemporalInterlace(
b.conv1.in_channels,
num_segments=num_segments,
shift_div=shift_div)
blocks[i].conv1.conv = CombineNet(tds,
blocks[i].conv1.conv)
return nn.Sequential(*blocks)
self.layer1 = make_block_interlace(self.layer1, num_segment_list[0],
self.shift_div)
self.layer2 = make_block_interlace(self.layer2, num_segment_list[1],
self.shift_div)
self.layer3 = make_block_interlace(self.layer3, num_segment_list[2],
self.shift_div)
self.layer4 = make_block_interlace(self.layer4, num_segment_list[3],
self.shift_div)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super(ResNetTSM, self).init_weights()
if self.is_tin:
self.make_temporal_interlace()
if len(self.non_local_cfg) != 0:
self.make_non_local()
| 13,132 | 33.651715 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/tanet.py | from copy import deepcopy
import torch.nn as nn
from torch.utils import checkpoint as cp
from ..builder import BACKBONES
from ..common import TAM
from .resnet import Bottleneck, ResNet
class TABlock(nn.Module):
"""Temporal Adaptive Block (TA-Block) for TANet.
This block is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
The temporal adaptive module (TAM) is embedded into ResNet-Block
after the first Conv2D, which turns the vanilla ResNet-Block
into TA-Block.
Args:
block (nn.Module): Residual blocks to be substituted.
num_segments (int): Number of frame segments.
tam_cfg (dict): Config for temporal adaptive module (TAM).
Default: dict().
"""
def __init__(self, block, num_segments, tam_cfg=dict()):
super().__init__()
self.tam_cfg = deepcopy(tam_cfg)
self.block = block
self.num_segments = num_segments
self.tam = TAM(
in_channels=block.conv1.out_channels,
num_segments=num_segments,
**self.tam_cfg)
if not isinstance(self.block, Bottleneck):
raise NotImplementedError('TA-Blocks have not been fully '
'implemented except the pattern based '
'on Bottleneck block.')
def forward(self, x):
assert isinstance(self.block, Bottleneck)
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if self.block.downsample is not None:
identity = self.block.downsample(x)
out = out + identity
return out
if self.block.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.block.relu(out)
return out
@BACKBONES.register_module()
class TANet(ResNet):
"""Temporal Adaptive Network (TANet) backbone.
This backbone is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
Embedding the temporal adaptive module (TAM) into ResNet to
instantiate TANet.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_segments (int): Number of frame segments.
tam_cfg (dict | None): Config for temporal adaptive module (TAM).
Default: dict().
**kwargs (keyword arguments, optional): Arguments for ResNet except
```depth```.
"""
def __init__(self, depth, num_segments, tam_cfg=dict(), **kwargs):
super().__init__(depth, **kwargs)
assert num_segments >= 3
self.num_segments = num_segments
self.tam_cfg = deepcopy(tam_cfg)
def init_weights(self):
super().init_weights()
self.make_tam_modeling()
def make_tam_modeling(self):
"""Replace ResNet-Block with TA-Block."""
def make_tam_block(stage, num_segments, tam_cfg=dict()):
blocks = list(stage.children())
for i, block in enumerate(blocks):
blocks[i] = TABlock(block, num_segments, deepcopy(tam_cfg))
return nn.Sequential(*blocks)
for i in range(self.num_stages):
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
setattr(self, layer_name,
make_tam_block(res_layer, self.num_segments, self.tam_cfg))
| 3,690 | 31.095652 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/backbones/x3d.py | import math
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, Swish, build_activation_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(
channels, self.bottleneck, kernel_size=1, padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(
self.bottleneck, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width,
int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BlockX3D(nn.Module):
"""BlockX3D 3d building block for X3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
outplanes (int): Number of channels produced by final the conv3d layer.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
inplanes,
planes,
outplanes,
spatial_stride=1,
downsample=None,
se_ratio=None,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.outplanes = outplanes
self.spatial_stride = spatial_stride
self.downsample = downsample
self.se_ratio = se_ratio
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.act_cfg_swish = dict(type='Swish')
self.with_cp = with_cp
self.conv1 = ConvModule(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# Here we use the channel-wise conv
self.conv2 = ConvModule(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=(1, self.spatial_stride, self.spatial_stride),
padding=1,
groups=planes,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.swish = Swish()
self.conv3 = ConvModule(
in_channels=planes,
out_channels=outplanes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
if self.se_ratio is not None:
self.se_module = SEModule(planes, self.se_ratio)
self.relu = build_activation_layer(self.act_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.se_ratio is not None:
out = self.se_module(out)
out = self.swish(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# We do not support initialize with 2D pretrain weight for X3D
@BACKBONES.register_module()
class X3D(nn.Module):
"""X3D backbone. https://arxiv.org/pdf/2004.04730.pdf.
Args:
gamma_w (float): Global channel width expansion factor. Default: 1.
gamma_b (float): Bottleneck channel width expansion factor. Default: 1.
gamma_d (float): Network depth expansion factor. Default: 1.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
frozen_stages (int): Stages to be frozen (all param fixed). If set to
-1, it means not freezing any parameters. Default: -1.
se_style (str): The style of inserting SE modules into BlockX3D, 'half'
denotes insert into half of the blocks, while 'all' denotes insert
into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: 1 / 16.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
gamma_w=1.0,
gamma_b=1.0,
gamma_d=1.0,
pretrained=None,
in_channels=3,
num_stages=4,
spatial_strides=(2, 2, 2, 2),
frozen_stages=-1,
se_style='half',
se_ratio=1 / 16,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.gamma_w = gamma_w
self.gamma_b = gamma_b
self.gamma_d = gamma_d
self.pretrained = pretrained
self.in_channels = in_channels
# Hard coded, can be changed by gamma_w
self.base_channels = 24
self.stage_blocks = [1, 2, 5, 3]
# apply parameters gamma_w and gamma_d
self.base_channels = self._round_width(self.base_channels,
self.gamma_w)
self.stage_blocks = [
self._round_repeats(x, self.gamma_d) for x in self.stage_blocks
]
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.spatial_strides = spatial_strides
assert len(spatial_strides) == num_stages
self.frozen_stages = frozen_stages
self.se_style = se_style
assert self.se_style in ['all', 'half']
self.se_ratio = se_ratio
assert (self.se_ratio is None) or (self.se_ratio > 0)
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block = BlockX3D
self.stage_blocks = self.stage_blocks[:num_stages]
self.layer_inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
inplanes = self.base_channels * 2**i
planes = int(inplanes * self.gamma_b)
res_layer = self.make_res_layer(
self.block,
self.layer_inplanes,
inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
se_style=self.se_style,
se_ratio=self.se_ratio,
use_swish=self.use_swish,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
with_cp=with_cp,
**kwargs)
self.layer_inplanes = inplanes
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.base_channels * 2**(len(self.stage_blocks) - 1)
self.conv5 = ConvModule(
self.feat_dim,
int(self.feat_dim * self.gamma_b),
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.feat_dim = int(self.feat_dim * self.gamma_b)
@staticmethod
def _round_width(width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
@staticmethod
def _round_repeats(repeats, multiplier):
"""Round number of layers based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
# the module is parameterized with gamma_b
# no temporal_stride
def make_res_layer(self,
block,
layer_inplanes,
inplanes,
planes,
blocks,
spatial_stride=1,
se_style='half',
se_ratio=None,
use_swish=True,
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if spatial_stride != 1 or layer_inplanes != inplanes:
downsample = ConvModule(
layer_inplanes,
inplanes,
kernel_size=1,
stride=(1, spatial_stride, spatial_stride),
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
use_se = [False] * blocks
if self.se_style == 'all':
use_se = [True] * blocks
elif self.se_style == 'half':
use_se = [i % 2 == 0 for i in range(blocks)]
else:
raise NotImplementedError
layers = []
layers.append(
block(
layer_inplanes,
planes,
inplanes,
spatial_stride=spatial_stride,
downsample=downsample,
se_ratio=se_ratio if use_se[0] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
inplanes,
spatial_stride=1,
se_ratio=se_ratio if use_se[i] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1_s = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.conv1_t = ConvModule(
self.base_channels,
self.base_channels,
kernel_size=(5, 1, 1),
stride=(1, 1, 1),
padding=(2, 0, 0),
groups=self.base_channels,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1_s.eval()
self.conv1_t.eval()
for param in self.conv1_s.parameters():
param.requires_grad = False
for param in self.conv1_t.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, BlockX3D):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1_s(x)
x = self.conv1_t(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
x = self.conv5(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 19,116 | 35.482824 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/base.py | from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from ...core import top_k_accuracy
from ..builder import build_loss
class AvgConsensus(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True)
class BaseHead(nn.Module, metaclass=ABCMeta):
"""Base class for head.
All Head should subclass it.
All subclass should overwrite:
- Methods:``init_weights``, initializing weights in some modules.
- Methods:``forward``, supporting to forward both for training and testing.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss', loss_weight=1.0).
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0),
multi_class=False,
label_smooth_eps=0.0):
super().__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.loss_type = loss_cls['type']
self.loss_cls = build_loss(loss_cls)
self.multi_class = multi_class
self.label_smooth_eps = label_smooth_eps
@abstractmethod
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
@abstractmethod
def forward(self, x):
"""Defines the computation performed at every call."""
def loss(self, cls_score, labels, **kwargs):
"""Calculate the loss given output ``cls_score``, target ``labels``.
Args:
cls_score (torch.Tensor): The output of the model.
labels (torch.Tensor): The target output of the model.
Returns:
dict: A dict containing field 'loss_cls'(mandatory)
and 'top1_acc', 'top5_acc'(optional).
"""
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_score.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
labels = labels.unsqueeze(0)
if not self.multi_class and cls_score.size() != labels.size():
top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses['top1_acc'] = torch.tensor(
top_k_acc[0], device=cls_score.device)
losses['top5_acc'] = torch.tensor(
top_k_acc[1], device=cls_score.device)
elif self.multi_class and self.label_smooth_eps != 0:
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_score, labels, **kwargs)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses
| 3,854 | 34.045455 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/fbo_head.py | import copy
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from mmaction.models.common import LFB
from mmaction.utils import get_root_logger
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class NonLocalLayer(nn.Module):
"""Non-local layer used in `FBONonLocal` is a variation of the vanilla non-
local block.
Args:
st_feat_channels (int): Channels of short-term features.
lt_feat_channels (int): Channels of long-term features.
latent_channels (int): Channels of latent features.
use_scale (bool): Whether to scale pairwise_weight by
`1/sqrt(latent_channels)`. Default: True.
pre_activate (bool): Whether to use the activation function before
upsampling. Default: False.
conv_cfg (Dict | None): The config dict for convolution layers. If
not specified, it will use `nn.Conv2d` for convolution layers.
Default: None.
norm_cfg (Dict | None): he config dict for normalization layers.
Default: None.
dropout_ratio (float, optional): Probability of dropout layer.
Default: 0.2.
zero_init_out_conv (bool): Whether to use zero initialization for
out_conv. Default: False.
"""
def __init__(self,
st_feat_channels,
lt_feat_channels,
latent_channels,
num_st_feat,
num_lt_feat,
use_scale=True,
pre_activate=True,
pre_activate_with_ln=True,
conv_cfg=None,
norm_cfg=None,
dropout_ratio=0.2,
zero_init_out_conv=False):
super().__init__()
if conv_cfg is None:
conv_cfg = dict(type='Conv3d')
self.st_feat_channels = st_feat_channels
self.lt_feat_channels = lt_feat_channels
self.latent_channels = latent_channels
self.num_st_feat = num_st_feat
self.num_lt_feat = num_lt_feat
self.use_scale = use_scale
self.pre_activate = pre_activate
self.pre_activate_with_ln = pre_activate_with_ln
self.dropout_ratio = dropout_ratio
self.zero_init_out_conv = zero_init_out_conv
self.st_feat_conv = ConvModule(
self.st_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.lt_feat_conv = ConvModule(
self.lt_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.global_conv = ConvModule(
self.lt_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
if pre_activate:
self.ln = nn.LayerNorm([latent_channels, num_st_feat, 1, 1])
else:
self.ln = nn.LayerNorm([st_feat_channels, num_st_feat, 1, 1])
self.relu = nn.ReLU()
self.out_conv = ConvModule(
self.latent_channels,
self.st_feat_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
if self.dropout_ratio > 0:
self.dropout = nn.Dropout(self.dropout_ratio)
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {pretrained}')
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_out_conv:
constant_init(self.out_conv, 0, bias=0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, st_feat, lt_feat):
n, c = st_feat.size(0), self.latent_channels
num_st_feat, num_lt_feat = self.num_st_feat, self.num_lt_feat
theta = self.st_feat_conv(st_feat)
theta = theta.view(n, c, num_st_feat)
phi = self.lt_feat_conv(lt_feat)
phi = phi.view(n, c, num_lt_feat)
g = self.global_conv(lt_feat)
g = g.view(n, c, num_lt_feat)
# (n, num_st_feat, c), (n, c, num_lt_feat)
# -> (n, num_st_feat, num_lt_feat)
theta_phi = torch.matmul(theta.permute(0, 2, 1), phi)
if self.use_scale:
theta_phi /= c**0.5
p = theta_phi.softmax(dim=-1)
# (n, c, num_lt_feat), (n, num_lt_feat, num_st_feat)
# -> (n, c, num_st_feat, 1, 1)
out = torch.matmul(g, p.permute(0, 2, 1)).view(n, c, num_st_feat, 1, 1)
# If need to activate it before out_conv, use relu here, otherwise
# use relu outside the non local layer.
if self.pre_activate:
if self.pre_activate_with_ln:
out = self.ln(out)
out = self.relu(out)
out = self.out_conv(out)
if not self.pre_activate:
out = self.ln(out)
if self.dropout_ratio > 0:
out = self.dropout(out)
return out
class FBONonLocal(nn.Module):
"""Non local feature bank operator.
Args:
st_feat_channels (int): Channels of short-term features.
lt_feat_channels (int): Channels of long-term features.
latent_channels (int): Channles of latent features.
num_st_feat (int): Number of short-term roi features.
num_lt_feat (int): Number of long-term roi features.
num_non_local_layers (int): Number of non-local layers, which is
at least 1. Default: 2.
st_feat_dropout_ratio (float): Probability of dropout layer for
short-term features. Default: 0.2.
lt_feat_dropout_ratio (float): Probability of dropout layer for
long-term features. Default: 0.2.
pre_activate (bool): Whether to use the activation function before
upsampling in non local layers. Default: True.
zero_init_out_conv (bool): Whether to use zero initialization for
out_conv in NonLocalLayer. Default: False.
"""
def __init__(self,
st_feat_channels,
lt_feat_channels,
latent_channels,
num_st_feat,
num_lt_feat,
num_non_local_layers=2,
st_feat_dropout_ratio=0.2,
lt_feat_dropout_ratio=0.2,
pre_activate=True,
zero_init_out_conv=False):
super().__init__()
assert num_non_local_layers >= 1, (
'At least one non_local_layer is needed.')
self.st_feat_channels = st_feat_channels
self.lt_feat_channels = lt_feat_channels
self.latent_channels = latent_channels
self.num_st_feat = num_st_feat
self.num_lt_feat = num_lt_feat
self.num_non_local_layers = num_non_local_layers
self.st_feat_dropout_ratio = st_feat_dropout_ratio
self.lt_feat_dropout_ratio = lt_feat_dropout_ratio
self.pre_activate = pre_activate
self.zero_init_out_conv = zero_init_out_conv
self.st_feat_conv = nn.Conv3d(
st_feat_channels, latent_channels, kernel_size=1)
self.lt_feat_conv = nn.Conv3d(
lt_feat_channels, latent_channels, kernel_size=1)
if self.st_feat_dropout_ratio > 0:
self.st_feat_dropout = nn.Dropout(self.st_feat_dropout_ratio)
if self.lt_feat_dropout_ratio > 0:
self.lt_feat_dropout = nn.Dropout(self.lt_feat_dropout_ratio)
if not self.pre_activate:
self.relu = nn.ReLU()
self.non_local_layers = []
for idx in range(self.num_non_local_layers):
layer_name = f'non_local_layer_{idx + 1}'
self.add_module(
layer_name,
NonLocalLayer(
latent_channels,
latent_channels,
latent_channels,
num_st_feat,
num_lt_feat,
pre_activate=self.pre_activate,
zero_init_out_conv=self.zero_init_out_conv))
self.non_local_layers.append(layer_name)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
kaiming_init(self.st_feat_conv)
kaiming_init(self.lt_feat_conv)
for layer_name in self.non_local_layers:
non_local_layer = getattr(self, layer_name)
non_local_layer.init_weights(pretrained=pretrained)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, st_feat, lt_feat):
# prepare st_feat
st_feat = self.st_feat_conv(st_feat)
if self.st_feat_dropout_ratio > 0:
st_feat = self.st_feat_dropout(st_feat)
# prepare lt_feat
lt_feat = self.lt_feat_conv(lt_feat)
if self.lt_feat_dropout_ratio > 0:
lt_feat = self.lt_feat_dropout(lt_feat)
# fuse short-term and long-term features in NonLocal Layer
for layer_name in self.non_local_layers:
identity = st_feat
non_local_layer = getattr(self, layer_name)
nl_out = non_local_layer(st_feat, lt_feat)
nl_out = identity + nl_out
if not self.pre_activate:
nl_out = self.relu(nl_out)
st_feat = nl_out
return nl_out
class FBOAvg(nn.Module):
"""Avg pool feature bank operator."""
def __init__(self):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, None, None))
def init_weights(self, pretrained=None):
# FBOAvg has no parameters to be initalized.
pass
def forward(self, st_feat, lt_feat):
out = self.avg_pool(lt_feat)
return out
class FBOMax(nn.Module):
"""Max pool feature bank operator."""
def __init__(self):
super().__init__()
self.max_pool = nn.AdaptiveMaxPool3d((1, None, None))
def init_weights(self, pretrained=None):
# FBOMax has no parameters to be initialized.
pass
def forward(self, st_feat, lt_feat):
out = self.max_pool(lt_feat)
return out
class FBOHead(nn.Module):
"""Feature Bank Operator Head.
Add feature bank operator for the spatiotemporal detection model to fuse
short-term features and long-term features.
Args:
lfb_cfg (Dict): The config dict for LFB which is used to sample
long-term features.
fbo_cfg (Dict): The config dict for feature bank operator (FBO). The
type of fbo is also in the config dict and supported fbo type is
`fbo_dict`.
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
"""
fbo_dict = {'non_local': FBONonLocal, 'avg': FBOAvg, 'max': FBOMax}
def __init__(self,
lfb_cfg,
fbo_cfg,
temporal_pool_type='avg',
spatial_pool_type='max'):
super().__init__()
fbo_type = fbo_cfg.pop('type', 'non_local')
assert fbo_type in FBOHead.fbo_dict
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.lfb_cfg = copy.deepcopy(lfb_cfg)
self.fbo_cfg = copy.deepcopy(fbo_cfg)
self.lfb = LFB(**self.lfb_cfg)
self.fbo = self.fbo_dict[fbo_type](**self.fbo_cfg)
# Pool by default
if temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
def init_weights(self, pretrained=None):
"""Initialize the weights in the module.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
self.fbo.init_weights(pretrained=pretrained)
def sample_lfb(self, rois, img_metas):
"""Sample long-term features for each ROI feature."""
inds = rois[:, 0].type(torch.int64)
lt_feat_list = []
for ind in inds:
lt_feat_list.append(self.lfb[img_metas[ind]['img_key']].to())
lt_feat = torch.stack(lt_feat_list, dim=0)
# [N, lfb_channels, window_size * max_num_feat_per_step]
lt_feat = lt_feat.permute(0, 2, 1).contiguous()
return lt_feat.unsqueeze(-1).unsqueeze(-1)
def forward(self, x, rois, img_metas, **kwargs):
# [N, C, 1, 1, 1]
st_feat = self.temporal_pool(x)
st_feat = self.spatial_pool(st_feat)
identity = st_feat
# [N, C, window_size * num_feat_per_step, 1, 1]
lt_feat = self.sample_lfb(rois, img_metas).to(st_feat.device)
fbo_feat = self.fbo(st_feat, lt_feat)
out = torch.cat([identity, fbo_feat], dim=1)
return out
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(FBOHead)
| 14,120 | 34.390977 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/ssn_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
def parse_stage_config(stage_cfg):
"""Parse config of STPP for three stages.
Args:
stage_cfg (int | tuple[int]):
Config of structured temporal pyramid pooling.
Returns:
tuple[tuple[int], int]:
Config of structured temporal pyramid pooling and
total number of parts(number of multipliers).
"""
if isinstance(stage_cfg, int):
return (stage_cfg, ), stage_cfg
if isinstance(stage_cfg, tuple):
return stage_cfg, sum(stage_cfg)
raise ValueError(f'Incorrect STPP config {stage_cfg}')
class STPPTrain(nn.Module):
"""Structured temporal pyramid pooling for SSN at training.
Args:
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
num_segments_list (tuple): Number of segments to be sampled
in three stages. Default: (2, 5, 2).
"""
def __init__(self, stpp_stage=(1, (1, 2), 1), num_segments_list=(2, 5, 2)):
super().__init__()
starting_part, starting_multiplier = parse_stage_config(stpp_stage[0])
course_part, course_multiplier = parse_stage_config(stpp_stage[1])
ending_part, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
self.stpp_stages = (starting_part, course_part, ending_part)
self.multiplier_list = (starting_multiplier, course_multiplier,
ending_multiplier)
self.num_segments_list = num_segments_list
@staticmethod
def _extract_stage_feature(stage_feat, stage_parts, num_multipliers,
scale_factors, num_samples):
"""Extract stage feature based on structured temporal pyramid pooling.
Args:
stage_feat (torch.Tensor): Stage features to be STPP.
stage_parts (tuple): Config of STPP.
num_multipliers (int): Total number of parts in the stage.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
num_samples (int): Number of samples.
Returns:
torch.Tensor: Features of the stage.
"""
stage_stpp_feat = []
stage_len = stage_feat.size(1)
for stage_part in stage_parts:
ticks = torch.arange(0, stage_len + 1e-5,
stage_len / stage_part).int()
for i in range(stage_part):
part_feat = stage_feat[:, ticks[i]:ticks[i + 1], :].mean(
dim=1) / num_multipliers
if scale_factors is not None:
part_feat = (
part_feat * scale_factors.view(num_samples, 1))
stage_stpp_feat.append(part_feat)
return stage_stpp_feat
def forward(self, x, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor]:
Features for predicting activity scores and
completeness scores.
"""
x0 = self.num_segments_list[0]
x1 = x0 + self.num_segments_list[1]
num_segments = x1 + self.num_segments_list[2]
feat_dim = x.size(1)
x = x.view(-1, num_segments, feat_dim)
num_samples = x.size(0)
scale_factors = scale_factors.view(-1, 2)
stage_stpp_feats = []
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0],
self.multiplier_list[0],
scale_factors[:, 0], num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1],
self.multiplier_list[1], None,
num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2],
self.multiplier_list[2],
scale_factors[:, 1], num_samples))
stpp_feat = torch.cat(stage_stpp_feats, dim=1)
course_feat = x[:, x0:x1, :].mean(dim=1)
return course_feat, stpp_feat
class STPPTest(nn.Module):
"""Structured temporal pyramid pooling for SSN at testing.
Args:
num_classes (int): Number of classes to be classified.
use_regression (bool): Whether to perform regression or not.
Default: True.
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
"""
def __init__(self,
num_classes,
use_regression=True,
stpp_stage=(1, (1, 2), 1)):
super().__init__()
self.activity_score_len = num_classes + 1
self.complete_score_len = num_classes
self.reg_score_len = num_classes * 2
self.use_regression = use_regression
starting_parts, starting_multiplier = parse_stage_config(stpp_stage[0])
course_parts, course_multiplier = parse_stage_config(stpp_stage[1])
ending_parts, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
if self.use_regression:
self.feat_dim = (
self.activity_score_len + self.num_multipliers *
(self.complete_score_len + self.reg_score_len))
else:
self.feat_dim = (
self.activity_score_len +
self.num_multipliers * self.complete_score_len)
self.stpp_stage = (starting_parts, course_parts, ending_parts)
self.activity_slice = slice(0, self.activity_score_len)
self.complete_slice = slice(
self.activity_slice.stop, self.activity_slice.stop +
self.complete_score_len * self.num_multipliers)
self.reg_slice = slice(
self.complete_slice.stop, self.complete_slice.stop +
self.reg_score_len * self.num_multipliers)
@staticmethod
def _pyramids_pooling(out_scores, index, raw_scores, ticks, scale_factors,
score_len, stpp_stage):
"""Perform pyramids pooling.
Args:
out_scores (torch.Tensor): Scores to be returned.
index (int): Index of output scores.
raw_scores (torch.Tensor): Raw scores before STPP.
ticks (list): Ticks of raw scores.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
score_len (int): Length of the score.
stpp_stage (tuple): Config of STPP.
"""
offset = 0
for stage_idx, stage_cfg in enumerate(stpp_stage):
if stage_idx == 0:
scale_factor = scale_factors[0]
elif stage_idx == len(stpp_stage) - 1:
scale_factor = scale_factors[1]
else:
scale_factor = 1.0
sum_parts = sum(stage_cfg)
tick_left = ticks[stage_idx]
tick_right = float(max(ticks[stage_idx] + 1, ticks[stage_idx + 1]))
if tick_right <= 0 or tick_left >= raw_scores.size(0):
offset += sum_parts
continue
for num_parts in stage_cfg:
part_ticks = torch.arange(tick_left, tick_right + 1e-5,
(tick_right - tick_left) /
num_parts).int()
for i in range(num_parts):
part_tick_left = part_ticks[i]
part_tick_right = part_ticks[i + 1]
if part_tick_right - part_tick_left >= 1:
raw_score = raw_scores[part_tick_left:part_tick_right,
offset *
score_len:(offset + 1) *
score_len]
raw_scale_score = raw_score.mean(dim=0) * scale_factor
out_scores[index, :] += raw_scale_score.detach().cpu()
offset += 1
return out_scores
def forward(self, x, proposal_ticks, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
proposal_ticks (list): Ticks of proposals to be STPP.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out_activity_scores (torch.Tensor): Activity scores
out_complete_scores (torch.Tensor): Completeness scores.
out_reg_scores (torch.Tensor): Regression scores.
"""
assert x.size(1) == self.feat_dim
num_ticks = proposal_ticks.size(0)
out_activity_scores = torch.zeros((num_ticks, self.activity_score_len),
dtype=x.dtype)
raw_activity_scores = x[:, self.activity_slice]
out_complete_scores = torch.zeros((num_ticks, self.complete_score_len),
dtype=x.dtype)
raw_complete_scores = x[:, self.complete_slice]
if self.use_regression:
out_reg_scores = torch.zeros((num_ticks, self.reg_score_len),
dtype=x.dtype)
raw_reg_scores = x[:, self.reg_slice]
else:
out_reg_scores = None
raw_reg_scores = None
for i in range(num_ticks):
ticks = proposal_ticks[i]
out_activity_scores[i, :] = raw_activity_scores[
ticks[1]:max(ticks[1] + 1, ticks[2]), :].mean(dim=0)
out_complete_scores = self._pyramids_pooling(
out_complete_scores, i, raw_complete_scores, ticks,
scale_factors[i], self.complete_score_len, self.stpp_stage)
if self.use_regression:
out_reg_scores = self._pyramids_pooling(
out_reg_scores, i, raw_reg_scores, ticks, scale_factors[i],
self.reg_score_len, self.stpp_stage)
return out_activity_scores, out_complete_scores, out_reg_scores
@HEADS.register_module()
class SSNHead(nn.Module):
"""The classification head for SSN.
Args:
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
in_channels (int): Number of channels for input data. Default: 1024.
num_classes (int): Number of classes to be classified. Default: 20.
consensus (dict): Config of segmental consensus.
use_regression (bool): Whether to perform regression or not.
Default: True.
init_std (float): Std value for Initiation. Default: 0.001.
"""
def __init__(self,
dropout_ratio=0.8,
in_channels=1024,
num_classes=20,
consensus=dict(
type='STPPTrain',
standalong_classifier=True,
stpp_cfg=(1, 1, 1),
num_seg=(2, 5, 2)),
use_regression=True,
init_std=0.001):
super().__init__()
self.dropout_ratio = dropout_ratio
self.num_classes = num_classes
self.use_regression = use_regression
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# Based on this copy, the model will utilize different
# structured temporal pyramid pooling at training and testing.
# Warning: this copy cannot be removed.
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'STPPTrain':
self.consensus = STPPTrain(**consensus_)
elif consensus_type == 'STPPTest':
consensus_['num_classes'] = self.num_classes
self.consensus = STPPTest(**consensus_)
self.in_channels_activity = in_channels
self.in_channels_complete = (
self.consensus.num_multipliers * in_channels)
self.activity_fc = nn.Linear(in_channels, num_classes + 1)
self.completeness_fc = nn.Linear(self.in_channels_complete,
num_classes)
if self.use_regression:
self.regressor_fc = nn.Linear(self.in_channels_complete,
num_classes * 2)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.activity_fc, std=self.init_std)
normal_init(self.completeness_fc, std=self.init_std)
if self.use_regression:
normal_init(self.regressor_fc, std=self.init_std)
def prepare_test_fc(self, stpp_feat_multiplier):
"""Reorganize the shape of fully connected layer at testing, in order
to improve testing efficiency.
Args:
stpp_feat_multiplier (int): Total number of parts.
Returns:
bool: Whether the shape transformation is ready for testing.
"""
in_features = self.activity_fc.in_features
out_features = (
self.activity_fc.out_features +
self.completeness_fc.out_features * stpp_feat_multiplier)
if self.use_regression:
out_features += (
self.regressor_fc.out_features * stpp_feat_multiplier)
self.test_fc = nn.Linear(in_features, out_features)
# Fetch weight and bias of the reorganized fc.
complete_weight = self.completeness_fc.weight.data.view(
self.completeness_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0, 1).contiguous().view(-1, in_features)
complete_bias = self.completeness_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.completeness_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((self.activity_fc.weight.data, complete_weight))
bias = torch.cat((self.activity_fc.bias.data, complete_bias))
if self.use_regression:
reg_weight = self.regressor_fc.weight.data.view(
self.regressor_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0,
1).contiguous().view(-1, in_features)
reg_bias = self.regressor_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.regressor_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((weight, reg_weight))
bias = torch.cat((bias, reg_bias))
self.test_fc.weight.data = weight
self.test_fc.bias.data = bias
return True
def forward(self, x, test_mode=False):
"""Defines the computation performed at every call."""
if not test_mode:
x, proposal_scale_factor = x
activity_feat, completeness_feat = self.consensus(
x, proposal_scale_factor)
if self.dropout is not None:
activity_feat = self.dropout(activity_feat)
completeness_feat = self.dropout(completeness_feat)
activity_scores = self.activity_fc(activity_feat)
complete_scores = self.completeness_fc(completeness_feat)
if self.use_regression:
bbox_preds = self.regressor_fc(completeness_feat)
bbox_preds = bbox_preds.view(-1,
self.completeness_fc.out_features,
2)
else:
bbox_preds = None
return activity_scores, complete_scores, bbox_preds
x, proposal_tick_list, scale_factor_list = x
test_scores = self.test_fc(x)
(activity_scores, completeness_scores,
bbox_preds) = self.consensus(test_scores, proposal_tick_list,
scale_factor_list)
return (test_scores, activity_scores, completeness_scores, bbox_preds)
| 16,778 | 39.627119 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/audio_tsn_head.py | import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class AudioTSNHead(BaseHead):
"""Classification head for TSN on audio.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, h, w]
x = self.avg_pool(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 2,421 | 31.72973 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/trn_head.py | import itertools
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
class RelationModule(nn.Module):
"""Relation Module of TRN.
Args:
hidden_dim (int): The dimension of hidden layer of MLP in relation
module.
num_segments (int): Number of frame segments.
num_classes (int): Number of classes to be classified.
"""
def __init__(self, hidden_dim, num_segments, num_classes):
super().__init__()
self.hidden_dim = hidden_dim
self.num_segments = num_segments
self.num_classes = num_classes
bottleneck_dim = 512
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(self.num_segments * self.hidden_dim, bottleneck_dim),
nn.ReLU(), nn.Linear(bottleneck_dim, self.num_classes))
def init_weights(self):
# Use the default kaiming_uniform for all nn.linear layers.
pass
def forward(self, x):
# [N, num_segs * hidden_dim]
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class RelationModuleMultiScale(nn.Module):
"""Relation Module with Multi Scale of TRN.
Args:
hidden_dim (int): The dimension of hidden layer of MLP in relation
module.
num_segments (int): Number of frame segments.
num_classes (int): Number of classes to be classified.
"""
def __init__(self, hidden_dim, num_segments, num_classes):
super().__init__()
self.hidden_dim = hidden_dim
self.num_segments = num_segments
self.num_classes = num_classes
# generate the multiple frame relations
self.scales = range(num_segments, 1, -1)
self.relations_scales = []
self.subsample_scales = []
max_subsample = 3
for scale in self.scales:
# select the different frame features for different scales
relations_scale = list(
itertools.combinations(range(self.num_segments), scale))
self.relations_scales.append(relations_scale)
# sample `max_subsample` relation_scale at most
self.subsample_scales.append(
min(max_subsample, len(relations_scale)))
assert len(self.relations_scales[0]) == 1
bottleneck_dim = 256
self.fc_fusion_scales = nn.ModuleList()
for scale in self.scales:
fc_fusion = nn.Sequential(
nn.ReLU(), nn.Linear(scale * self.hidden_dim, bottleneck_dim),
nn.ReLU(), nn.Linear(bottleneck_dim, self.num_classes))
self.fc_fusion_scales.append(fc_fusion)
def init_weights(self):
# Use the default kaiming_uniform for all nn.linear layers.
pass
def forward(self, x):
# the first one is the largest scale
act_all = x[:, self.relations_scales[0][0], :]
act_all = act_all.view(
act_all.size(0), self.scales[0] * self.hidden_dim)
act_all = self.fc_fusion_scales[0](act_all)
for scaleID in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(
len(self.relations_scales[scaleID]),
self.subsample_scales[scaleID],
replace=False)
for idx in idx_relations_randomsample:
act_relation = x[:, self.relations_scales[scaleID][idx], :]
act_relation = act_relation.view(
act_relation.size(0),
self.scales[scaleID] * self.hidden_dim)
act_relation = self.fc_fusion_scales[scaleID](act_relation)
act_all += act_relation
return act_all
@HEADS.register_module()
class TRNHead(BaseHead):
"""Class head for TRN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss. Default:
dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
relation_type (str): The relation module type. Choices are 'TRN' or
'TRNMultiScale'. Default: 'TRNMultiScale'.
hidden_dim (int): The dimension of hidden layer of MLP in relation
module. Default: 256.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.001.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
relation_type='TRNMultiScale',
hidden_dim=256,
dropout_ratio=0.8,
init_std=0.001,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.num_classes = num_classes
self.in_channels = in_channels
self.num_segments = num_segments
self.spatial_type = spatial_type
self.relation_type = relation_type
self.hidden_dim = hidden_dim
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.relation_type == 'TRN':
self.consensus = RelationModule(self.hidden_dim, self.num_segments,
self.num_classes)
elif self.relation_type == 'TRNMultiScale':
self.consensus = RelationModuleMultiScale(self.hidden_dim,
self.num_segments,
self.num_classes)
else:
raise ValueError(f'Unknown Relation Type {self.relation_type}!')
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.hidden_dim)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
self.consensus.init_weights()
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, num_segs, hidden_dim]
cls_score = self.fc_cls(x)
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, num_classes]
cls_score = self.consensus(cls_score)
return cls_score
| 7,868 | 36.293839 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/tsm_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSMHead(BaseHead):
"""Class head for TSM.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
is_shift (bool): Indicating whether the feature is shifted.
Default: True.
temporal_pool (bool): Indicating whether feature is temporal pooled.
Default: False.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.8,
init_std=0.001,
is_shift=True,
temporal_pool=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_segments = num_segments
self.init_std = init_std
self.is_shift = is_shift
self.temporal_pool = temporal_pool
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
cls_score = self.fc_cls(x)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1)
| 4,170 | 36.241071 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/bbox_head.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.core.bbox import bbox_target
try:
from mmdet.models.builder import HEADS as MMDET_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class BBoxHeadAVA(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively.
Args:
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
in_channels (int): The number of input channels. Default: 2048.
focal_alpha (float): The hyper-parameter alpha for Focal Loss.
When alpha == 1 and gamma == 0, Focal Loss degenerates to
BCELossWithLogits. Default: 1.
focal_gamma (float): The hyper-parameter gamma for Focal Loss.
When alpha == 1 and gamma == 0, Focal Loss degenerates to
BCELossWithLogits. Default: 0.
num_classes (int): The number of classes. Default: 81.
dropout_ratio (float): A float in [0, 1], indicates the dropout_ratio.
Default: 0.
dropout_before_pool (bool): Dropout Feature before spatial temporal
pooling. Default: True.
topk (int or tuple[int]): Parameter for evaluating multilabel accuracy.
Default: (3, 5)
multilabel (bool): Whether used for a multilabel task. Default: True.
(Only support multilabel == True now).
"""
def __init__(
self,
temporal_pool_type='avg',
spatial_pool_type='max',
in_channels=2048,
# The first class is reserved, to classify bbox as pos / neg
focal_gamma=0.,
focal_alpha=1.,
num_classes=81,
dropout_ratio=0,
dropout_before_pool=True,
topk=(3, 5),
multilabel=True):
super(BBoxHeadAVA, self).__init__()
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.temporal_pool_type = temporal_pool_type
self.spatial_pool_type = spatial_pool_type
self.in_channels = in_channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.dropout_before_pool = dropout_before_pool
self.multilabel = multilabel
self.focal_gamma = focal_gamma
self.focal_alpha = focal_alpha
if topk is None:
self.topk = ()
elif isinstance(topk, int):
self.topk = (topk, )
elif isinstance(topk, tuple):
assert all([isinstance(k, int) for k in topk])
self.topk = topk
else:
raise TypeError('topk should be int or tuple[int], '
f'but get {type(topk)}')
# Class 0 is ignored when calculaing multilabel accuracy,
# so topk cannot be equal to num_classes
assert all([k < num_classes for k in self.topk])
# Handle AVA first
assert self.multilabel
in_channels = self.in_channels
# Pool by default
if self.temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if self.spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
if dropout_ratio > 0:
self.dropout = nn.Dropout(dropout_ratio)
self.fc_cls = nn.Linear(in_channels, num_classes)
self.debug_imgs = None
def init_weights(self):
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
def forward(self, x):
if self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = self.temporal_pool(x)
x = self.spatial_pool(x)
if not self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
# We do not predict bbox, so return None
return cls_score, None
@staticmethod
def get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = bbox_target(pos_proposals, neg_proposals,
pos_gt_labels, rcnn_train_cfg)
return cls_reg_targets
@staticmethod
def recall_prec(pred_vec, target_vec):
"""
Args:
pred_vec (tensor[N x C]): each element is either 0 or 1
target_vec (tensor[N x C]): each element is either 0 or 1
"""
correct = pred_vec & target_vec
# Seems torch 1.5 has no auto type conversion
recall = correct.sum(1) / target_vec.sum(1).float()
prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6)
return recall.mean(), prec.mean()
def multi_label_accuracy(self, pred, target, thr=0.5):
pred = pred.sigmoid()
pred_vec = pred > thr
# Target is 0 or 1, so using 0.5 as the borderline is OK
target_vec = target > 0.5
recall_thr, prec_thr = self.recall_prec(pred_vec, target_vec)
recalls, precs = [], []
for k in self.topk:
_, pred_label = pred.topk(k, 1, True, True)
pred_vec = pred.new_full(pred.size(), 0, dtype=torch.bool)
num_sample = pred.shape[0]
for i in range(num_sample):
pred_vec[i, pred_label[i]] = 1
recall_k, prec_k = self.recall_prec(pred_vec, target_vec)
recalls.append(recall_k)
precs.append(prec_k)
return recall_thr, prec_thr, recalls, precs
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets=None,
bbox_weights=None,
reduce=True):
losses = dict()
if cls_score is not None:
# Only use the cls_score
labels = labels[:, 1:]
pos_inds = torch.sum(labels, dim=-1) > 0
cls_score = cls_score[pos_inds, 1:]
labels = labels[pos_inds]
bce_loss = F.binary_cross_entropy_with_logits
loss = bce_loss(cls_score, labels, reduction='none')
pt = torch.exp(-loss)
F_loss = self.focal_alpha * (1 - pt)**self.focal_gamma * loss
losses['loss_action_cls'] = torch.mean(F_loss)
recall_thr, prec_thr, recall_k, prec_k = self.multi_label_accuracy(
cls_score, labels, thr=0.5)
losses['recall@thr=0.5'] = recall_thr
losses['prec@thr=0.5'] = prec_thr
for i, k in enumerate(self.topk):
losses[f'recall@top{k}'] = recall_k[i]
losses[f'prec@top{k}'] = prec_k[i]
return losses
def get_det_bboxes(self,
rois,
cls_score,
img_shape,
flip=False,
crop_quadruple=None,
cfg=None):
# might be used by testing w. augmentation
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
assert self.multilabel
scores = cls_score.sigmoid() if cls_score is not None else None
bboxes = rois[:, 1:]
assert bboxes.shape[-1] == 4
# First reverse the flip
img_h, img_w = img_shape
if flip:
bboxes_ = bboxes.clone()
bboxes_[:, 0] = img_w - 1 - bboxes[:, 2]
bboxes_[:, 2] = img_w - 1 - bboxes[:, 0]
bboxes = bboxes_
# Then normalize the bbox to [0, 1]
bboxes[:, 0::2] /= img_w
bboxes[:, 1::2] /= img_h
def _bbox_crop_undo(bboxes, crop_quadruple):
decropped = bboxes.clone()
if crop_quadruple is not None:
x1, y1, tw, th = crop_quadruple
decropped[:, 0::2] = bboxes[..., 0::2] * tw + x1
decropped[:, 1::2] = bboxes[..., 1::2] * th + y1
return decropped
bboxes = _bbox_crop_undo(bboxes, crop_quadruple)
return bboxes, scores
if mmdet_imported:
MMDET_HEADS.register_module()(BBoxHeadAVA)
| 8,768 | 34.358871 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/misc_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.utils import _BatchNorm
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
# Note: All these heads take 5D Tensors as input (N, C, T, H, W)
class ACRNHead(nn.Module):
"""ACRN Head: Tile + 1x1 convolution + 3x3 convolution.
This module is proposed in
`Actor-Centric Relation Network
<https://arxiv.org/abs/1807.10982>`_
Args:
in_channels (int): The input channel.
out_channels (int): The output channel.
stride (int): The spatial stride.
num_convs (int): The number of 3x3 convolutions in ACRNHead.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
num_convs=1,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.num_convs = num_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.max_pool = nn.AdaptiveMaxPool3d(1)
self.conv1 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert num_convs >= 1
self.conv2 = ConvModule(
out_channels,
out_channels,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
convs = []
for _ in range(num_convs - 1):
conv = ConvModule(
out_channels,
out_channels,
kernel_size=(1, 3, 3),
padding=(0, 1, 1),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
convs.append(conv)
self.convs = nn.ModuleList(convs)
def init_weights(self, **kwargs):
"""Weight Initialization for ACRNHead."""
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
def forward(self, x, feat, rois, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
feat (torch.Tensor): The context feature.
rois (torch.Tensor): The regions of interest.
Returns:
torch.Tensor: The RoI features that have interacted with context
feature.
"""
# We use max pooling by default
x = self.max_pool(x)
h, w = feat.shape[-2:]
x_tile = x.repeat(1, 1, 1, h, w)
roi_inds = rois[:, 0].type(torch.long)
roi_gfeat = feat[roi_inds]
new_feat = torch.cat([x_tile, roi_gfeat], dim=1)
new_feat = self.conv1(new_feat)
new_feat = self.conv2(new_feat)
for conv in self.convs:
new_feat = conv(new_feat)
return new_feat
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(ACRNHead)
| 4,040 | 29.613636 | 76 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/tpn_head.py | import torch.nn as nn
from ..builder import HEADS
from .tsn_head import TSNHead
@HEADS.register_module()
class TPNHead(TSNHead):
"""Class head for TPN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: https://arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
self.new_cls = None
def _init_new_cls(self):
self.new_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, 1, 0)
if next(self.fc_cls.parameters()).is_cuda:
self.new_cls = self.new_cls.cuda()
self.new_cls.weight.copy_(self.fc_cls.weight[..., None, None, None])
self.new_cls.bias.copy_(self.fc_cls.bias)
def forward(self, x, num_segs=None, fcn_test=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int | None): Number of segments into which a video
is divided. Default: None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Default: False.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if fcn_test:
if self.avg_pool3d:
x = self.avg_pool3d(x)
if self.new_cls is None:
self._init_new_cls()
cls_score_feat_map = self.new_cls(x)
return cls_score_feat_map
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 3,306 | 35.340659 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/x3d_head.py | import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class X3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
fc1_bias (bool): If the first fc layer has bias. Default: False.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
fc1_bias=False):
super().__init__(num_classes, in_channels, loss_cls)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.in_channels = in_channels
self.mid_channels = 2048
self.num_classes = num_classes
self.fc1_bias = fc1_bias
self.fc1 = nn.Linear(
self.in_channels, self.mid_channels, bias=self.fc1_bias)
self.fc2 = nn.Linear(self.mid_channels, self.num_classes)
self.relu = nn.ReLU()
self.pool = None
if self.spatial_type == 'avg':
self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
elif self.spatial_type == 'max':
self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
else:
raise NotImplementedError
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc1, std=self.init_std)
normal_init(self.fc2, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, T, H, W]
assert self.pool is not None
x = self.pool(x)
# [N, in_channels, 1, 1, 1]
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
x = self.fc1(x)
# [N, 2048]
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
cls_score = self.fc2(x)
# [N, num_classes]
return cls_score
| 2,837 | 30.533333 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/slowfast_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class SlowFastHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.8,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)])
x_fast, x_slow = x
# ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1])
x_fast = self.avg_pool(x_fast)
x_slow = self.avg_pool(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_slow, x_fast), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score
| 2,542 | 30.7875 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/tsn_head.py | import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSNHead(BaseHead):
"""Class head for TSN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 3,148 | 33.228261 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/lfb_infer_head.py | import os.path as osp
import mmcv
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import get_dist_info
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class LFBInferHead(nn.Module):
"""Long-Term Feature Bank Infer Head.
This head is used to derive and save the LFB without affecting the input.
Args:
lfb_prefix_path (str): The prefix path to store the lfb.
dataset_mode (str, optional): Which dataset to be inferred. Choices are
'train', 'val' or 'test'. Default: 'train'.
use_half_precision (bool, optional): Whether to store the
half-precision roi features. Default: True.
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
"""
def __init__(self,
lfb_prefix_path,
dataset_mode='train',
use_half_precision=True,
temporal_pool_type='avg',
spatial_pool_type='max'):
super().__init__()
rank, _ = get_dist_info()
if rank == 0:
if not osp.exists(lfb_prefix_path):
print(f'lfb prefix path {lfb_prefix_path} does not exist. '
f'Creating the folder...')
mmcv.mkdir_or_exist(lfb_prefix_path)
print('\nInferring LFB...')
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.lfb_prefix_path = lfb_prefix_path
self.dataset_mode = dataset_mode
self.use_half_precision = use_half_precision
# Pool by default
if temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
self.all_features = []
self.all_metadata = []
def init_weights(self, pretrained=None):
# LFBInferHead has no parameters to be initialized.
pass
def forward(self, x, rois, img_metas, **kwargs):
# [N, C, 1, 1, 1]
features = self.temporal_pool(x)
features = self.spatial_pool(features)
if self.use_half_precision:
features = features.half()
inds = rois[:, 0].type(torch.int64)
for ind in inds:
self.all_metadata.append(img_metas[ind]['img_key'])
self.all_features += list(features)
# Return the input directly and doesn't affect the input.
return x
def __del__(self):
assert len(self.all_features) == len(self.all_metadata), (
'features and metadata are not equal in length!')
rank, world_size = get_dist_info()
if world_size > 1:
dist.barrier()
_lfb = {}
for feature, metadata in zip(self.all_features, self.all_metadata):
video_id, timestamp = metadata.split(',')
timestamp = int(timestamp)
if video_id not in _lfb:
_lfb[video_id] = {}
if timestamp not in _lfb[video_id]:
_lfb[video_id][timestamp] = []
_lfb[video_id][timestamp].append(torch.squeeze(feature))
_lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path,
f'_lfb_{self.dataset_mode}_{rank}.pkl'))
torch.save(_lfb, _lfb_file_path)
print(f'{len(self.all_features)} features from {len(_lfb)} videos '
f'on GPU {rank} have been stored in {_lfb_file_path}.')
# Synchronizes all processes to make sure all gpus have stored their
# roi features
if world_size > 1:
dist.barrier()
if rank > 0:
return
print('Gathering all the roi features...')
lfb = {}
for rank_id in range(world_size):
_lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path,
f'_lfb_{self.dataset_mode}_{rank_id}.pkl'))
# Since each frame will only be distributed to one GPU,
# the roi features on the same timestamp of the same video are all
# on the same GPU
_lfb = torch.load(_lfb_file_path)
for video_id in _lfb:
if video_id not in lfb:
lfb[video_id] = _lfb[video_id]
else:
lfb[video_id].update(_lfb[video_id])
lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{self.dataset_mode}.pkl'))
torch.save(lfb, lfb_file_path)
print(f'LFB has been constructed in {lfb_file_path}!')
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(LFBInferHead)
| 5,150 | 34.280822 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/i3d_head.py | import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class I3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 2,446 | 32.067568 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/models/heads/roi_head.py | import numpy as np
from mmaction.core.bbox import bbox2result
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import bbox2roi
from mmdet.models import HEADS as MMDET_HEADS
from mmdet.models.roi_heads import StandardRoIHead
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@MMDET_HEADS.register_module()
class AVARoIHead(StandardRoIHead):
def _bbox_forward(self, x, rois, img_metas):
"""Defines the computation performed to get bbox predictions.
Args:
x (torch.Tensor): The input tensor.
rois (torch.Tensor): The regions of interest.
img_metas (list): The meta info of images
Returns:
dict: bbox predictions with features and classification scores.
"""
bbox_feat, global_feat = self.bbox_roi_extractor(x, rois)
if self.with_shared_head:
bbox_feat = self.shared_head(
bbox_feat,
feat=global_feat,
rois=rois,
img_metas=img_metas)
cls_score, bbox_pred = self.bbox_head(bbox_feat)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feat)
return bbox_results
def _bbox_forward_train(self, x, sampling_results, gt_bboxes,
gt_labels, img_metas):
"""Run forward function and calculate loss for box head in
training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois, img_metas)
bbox_targets = self.bbox_head.get_targets(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Defines the computation performed for simple testing."""
assert self.with_bbox, 'Bbox head must be implemented.'
if isinstance(x, tuple):
x_shape = x[0].shape
else:
x_shape = x.shape
assert x_shape[0] == 1, 'only accept 1 sample at test mode'
assert x_shape[0] == len(img_metas) == len(proposal_list)
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(
det_bboxes,
det_labels,
self.bbox_head.num_classes,
thr=self.test_cfg.action_thr)
return [bbox_results]
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_results = self._bbox_forward(x, rois, img_metas)
cls_score = bbox_results['cls_score']
img_shape = img_metas[0]['img_shape']
crop_quadruple = np.array([0, 0, 1, 1])
flip = False
if 'crop_quadruple' in img_metas[0]:
crop_quadruple = img_metas[0]['crop_quadruple']
if 'flip' in img_metas[0]:
flip = img_metas[0]['flip']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
img_shape,
flip=flip,
crop_quadruple=crop_quadruple,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
else:
# Just define an empty class, so that __init__ can import it.
@import_module_error_class('mmdet')
class AVARoIHead:
pass
| 4,487 | 35.487805 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/base.py | import copy
import os.path as osp
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import mmcv
import numpy as np
import torch
from mmcv.utils import print_log
from torch.utils.data import Dataset
from ..core import (mean_average_precision, mean_class_accuracy,
mmit_mean_average_precision, top_k_accuracy)
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets to process video should subclass it.
All subclasses should overwrite:
- Methods:`load_annotations`, supporting to load information from an
annotation file.
- Methods:`prepare_train_frames`, providing train data.
- Methods:`prepare_test_frames`, providing test data.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
multi_class (bool): Determines whether the dataset is a multi-class
dataset. Default: False.
num_classes (int | None): Number of classes of the dataset, used in
multi-class datasets. Default: None.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow', 'Audio'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0,
dynamic_length=False):
super().__init__()
self.ann_file = ann_file
self.data_prefix = osp.realpath(
data_prefix) if data_prefix is not None and osp.isdir(
data_prefix) else data_prefix
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.sample_by_class = sample_by_class
self.power = power
self.dynamic_length = dynamic_length
assert not (self.multi_class and self.sample_by_class)
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
if self.sample_by_class:
self.video_infos_by_class = self.parse_by_class()
class_prob = []
for _, samples in self.video_infos_by_class.items():
class_prob.append(len(samples) / len(self.video_infos))
class_prob = [x**self.power for x in class_prob]
summ = sum(class_prob)
class_prob = [x / summ for x in class_prob]
self.class_prob = dict(zip(self.video_infos_by_class, class_prob))
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename'
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert self.num_classes is not None
else:
assert len(video_infos[i]['label']) == 1
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
def parse_by_class(self):
video_infos_by_class = defaultdict(list)
for item in self.video_infos:
label = item['label']
video_infos_by_class[label].append(item)
return video_infos_by_class
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='top_k_accuracy',
metric_options=dict(top_k_accuracy=dict(topk=(1, 5))),
logger=None,
**deprecated_kwargs):
"""Perform evaluation for common datasets.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'top_k_accuracy'.
metric_options (dict): Dict for metric options. Options are
``topk`` for ``top_k_accuracy``.
Default: ``dict(top_k_accuracy=dict(topk=(1, 5)))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['top_k_accuracy'] = dict(
metric_options['top_k_accuracy'], **deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = [
'top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision',
'mmit_mean_average_precision'
]
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = OrderedDict()
gt_labels = [ann['label'] for ann in self.video_infos]
for metric in metrics:
msg = f'Evaluating {metric} ...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'top_k_accuracy':
topk = metric_options.setdefault('top_k_accuracy',
{}).setdefault(
'topk', (1, 5))
if not isinstance(topk, (int, tuple)):
raise TypeError('topk must be int or tuple of int, '
f'but got {type(topk)}')
if isinstance(topk, int):
topk = (topk, )
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for k, acc in zip(topk, top_k_acc):
eval_results[f'top{k}_acc'] = acc
log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric == 'mean_class_accuracy':
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results['mean_class_accuracy'] = mean_acc
log_msg = f'\nmean_acc\t{mean_acc:.4f}'
print_log(log_msg, logger=logger)
continue
if metric in [
'mean_average_precision', 'mmit_mean_average_precision'
]:
gt_labels = [
self.label2array(self.num_classes, label)
for label in gt_labels
]
if metric == 'mean_average_precision':
mAP = mean_average_precision(results, gt_labels)
eval_results['mean_average_precision'] = mAP
log_msg = f'\nmean_average_precision\t{mAP:.4f}'
elif metric == 'mmit_mean_average_precision':
mAP = mmit_mean_average_precision(results, gt_labels)
eval_results['mmit_mean_average_precision'] = mAP
log_msg = f'\nmmit_mean_average_precision\t{mAP:.4f}'
print_log(log_msg, logger=logger)
continue
return eval_results
@staticmethod
def dump_results(results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
return self.prepare_train_frames(idx)
| 11,612 | 39.322917 | 105 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/audio_dataset.py | import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class AudioDataset(BaseDataset):
"""Audio dataset for video recognition. Extracts the audio feature on-the-
fly. Annotation file can be that of the rawframe dataset, or:
.. code-block:: txt
some/directory-1.wav 163 1
some/directory-2.wav 122 1
some/directory-3.wav 258 2
some/directory-4.wav 234 2
some/directory-5.wav 295 3
some/directory-6.wav 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio file. Default: '.wav'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.wav', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename + self.suffix)
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 2,555 | 35.514286 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/audio_feature_dataset.py | import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class AudioFeatureDataset(BaseDataset):
"""Audio feature dataset for video recognition. Reads the features
extracted off-line. Annotation file can be that of the rawframe dataset,
or:
.. code-block:: txt
some/directory-1.npy 163 1
some/directory-2.npy 122 1
some/directory-3.npy 258 2
some/directory-4.npy 234 2
some/directory-5.npy 295 3
some/directory-6.npy 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio feature file. Default: '.npy'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.npy', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename) + self.suffix
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 2,581 | 35.366197 | 76 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/blending_utils.py | from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from torch.distributions.beta import Beta
from .builder import BLENDINGS
__all__ = ['BaseMiniBatchBlending', 'MixupBlending', 'CutmixBlending', 'LabelSmoothing']
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
class BaseMiniBatchBlending(metaclass=ABCMeta):
"""Base class for Image Aliasing."""
def __init__(self, num_classes, smoothing=0.):
self.num_classes = num_classes
self.off_value = smoothing / self.num_classes
self.on_value = 1. - smoothing + self.off_value
@abstractmethod
def do_blending(self, imgs, label, **kwargs):
pass
def __call__(self, imgs, label, **kwargs):
"""Blending data in a mini-batch.
Images are float tensors with the shape of (B, N, C, H, W) for 2D
recognizers or (B, N, C, T, H, W) for 3D recognizers.
Besides, labels are converted from hard labels to soft labels.
Hard labels are integer tensors with the shape of (B, 1) and all of the
elements are in the range [0, num_classes - 1].
Soft labels (probablity distribution over classes) are float tensors
with the shape of (B, 1, num_classes) and all of the elements are in
the range [0, 1].
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): Hard labels, integer tensor with the shape
of (B, 1) and all elements are in range [0, num_classes).
kwargs (dict, optional): Other keyword argument to be used to
blending imgs and labels in a mini-batch.
Returns:
mixed_imgs (torch.Tensor): Blending images, float tensor with the
same shape of the input imgs.
mixed_label (torch.Tensor): Blended soft labels, float tensor with
the shape of (B, 1, num_classes) and all elements are in range
[0, 1].
"""
one_hot_label = one_hot(label, num_classes=self.num_classes, on_value=self.on_value, off_value=self.off_value, device=label.device)
mixed_imgs, mixed_label = self.do_blending(imgs, one_hot_label,
**kwargs)
return mixed_imgs, mixed_label
@BLENDINGS.register_module()
class MixupBlending(BaseMiniBatchBlending):
"""Implementing Mixup in a mini-batch.
This module is proposed in `mixup: Beyond Empirical Risk Minimization
<https://arxiv.org/abs/1710.09412>`_.
Code Reference https://github.com/open-mmlab/mmclassification/blob/master/mmcls/models/utils/mixup.py # noqa
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
def do_blending(self, imgs, label, **kwargs):
"""Blending images with mixup."""
assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}'
lam = self.beta.sample()
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :]
mixed_label = lam * label + (1 - lam) * label[rand_index, :]
return mixed_imgs, mixed_label
@BLENDINGS.register_module()
class CutmixBlending(BaseMiniBatchBlending):
"""Implementing Cutmix in a mini-batch.
This module is proposed in `CutMix: Regularization Strategy to Train Strong
Classifiers with Localizable Features <https://arxiv.org/abs/1905.04899>`_.
Code Reference https://github.com/clovaai/CutMix-PyTorch
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
@staticmethod
def rand_bbox(img_size, lam):
"""Generate a random boudning box."""
w = img_size[-1]
h = img_size[-2]
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.tensor(int(w * cut_rat))
cut_h = torch.tensor(int(h * cut_rat))
# uniform
cx = torch.randint(w, (1, ))[0]
cy = torch.randint(h, (1, ))[0]
bbx1 = torch.clamp(cx - cut_w // 2, 0, w)
bby1 = torch.clamp(cy - cut_h // 2, 0, h)
bbx2 = torch.clamp(cx + cut_w // 2, 0, w)
bby2 = torch.clamp(cy + cut_h // 2, 0, h)
return bbx1, bby1, bbx2, bby2
def do_blending(self, imgs, label, **kwargs):
"""Blending images with cutmix."""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}'
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
lam = self.beta.sample()
bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam)
imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2,
bbx1:bbx2]
lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) /
(imgs.size()[-1] * imgs.size()[-2]))
label = lam * label + (1 - lam) * label[rand_index, :]
return imgs, label
@BLENDINGS.register_module()
class LabelSmoothing(BaseMiniBatchBlending):
def do_blending(self, imgs, label, **kwargs):
return imgs, label
| 5,741 | 36.529412 | 139 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/rawframe_dataset.py | import copy
import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"""Rawframe dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, total frames of the video and
the label of a video, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int | None): Number of classes in the dataset.
Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0.,
dynamic_length=False):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
multi_class,
num_classes,
start_index,
modality,
sample_by_class=sample_by_class,
power=power,
dynamic_length=dynamic_length)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
| 6,689 | 35.358696 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/hvu_dataset.py | import copy
import os.path as osp
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from ..core import mean_average_precision
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class HVUDataset(BaseDataset):
"""HVU dataset, which supports the recognition tags of multiple categories.
Accept both video annotation files or rawframe annotation files.
The dataset loads videos or raw frames and applies specified transforms to
return a dict containing the frame tensors and other information.
The ann_file is a json file with multiple dictionaries, and each dictionary
indicates a sample video with the filename and tags, the tags are organized
as different categories. Example of a video dictionary:
.. code-block:: txt
{
'filename': 'gD_G1b0wV5I_001015_001035.mp4',
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Example of a rawframe dictionary:
.. code-block:: txt
{
'frame_dir': 'gD_G1b0wV5I_001015_001035',
'total_frames': 61
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Args:
ann_file (str): Path to the annotation file, should be a json file.
pipeline (list[dict | callable]): A sequence of data transforms.
tag_categories (list[str]): List of category names of tags.
tag_category_nums (list[int]): List of number of tags in each category.
filename_tmpl (str | None): Template for each filename. If set to None,
video dataset is used. Default: None.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
tag_categories,
tag_category_nums,
filename_tmpl=None,
**kwargs):
assert len(tag_categories) == len(tag_category_nums)
self.tag_categories = tag_categories
self.tag_category_nums = tag_category_nums
self.filename_tmpl = filename_tmpl
self.num_categories = len(self.tag_categories)
self.num_tags = sum(self.tag_category_nums)
self.category2num = dict(zip(tag_categories, tag_category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] +
self.tag_category_nums[i])
self.category2startidx = dict(zip(tag_categories, self.start_idx))
self.start_index = kwargs.pop('start_index', 0)
self.dataset_type = None
super().__init__(
ann_file, pipeline, start_index=self.start_index, **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
assert self.ann_file.endswith('.json')
return self.load_json_annotations()
def load_json_annotations(self):
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
video_info0 = video_infos[0]
assert ('filename' in video_info0) != ('frame_dir' in video_info0)
path_key = 'filename' if 'filename' in video_info0 else 'frame_dir'
self.dataset_type = 'video' if path_key == 'filename' else 'rawframe'
if self.dataset_type == 'rawframe':
assert self.filename_tmpl is not None
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
# We will convert label to torch tensors in the pipeline
video_infos[i]['categories'] = self.tag_categories
video_infos[i]['category_nums'] = self.tag_category_nums
if self.dataset_type == 'rawframe':
video_infos[i]['filename_tmpl'] = self.filename_tmpl
video_infos[i]['start_index'] = self.start_index
video_infos[i]['modality'] = self.modality
return video_infos
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='mean_average_precision',
metric_options=None,
logger=None):
"""Evaluation in HVU Video Dataset. We only support evaluating mAP for
each tag categories. Since some tag categories are missing for some
videos, we can not evaluate mAP for all tags.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mean_average_precision'.
metric_options (dict | None): Dict for metric options.
Default: None.
logger (logging.Logger | None): Logger for recording.
Default: None.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
# There should be only one metric in the metrics list:
# 'mean_average_precision'
assert len(metrics) == 1
metric = metrics[0]
assert metric == 'mean_average_precision'
gt_labels = [ann['label'] for ann in self.video_infos]
eval_results = OrderedDict()
for category in self.tag_categories:
start_idx = self.category2startidx[category]
num = self.category2num[category]
preds = [
result[start_idx:start_idx + num]
for video_idx, result in enumerate(results)
if category in gt_labels[video_idx]
]
gts = [
gt_label[category] for gt_label in gt_labels
if category in gt_label
]
gts = [self.label2array(num, item) for item in gts]
mAP = mean_average_precision(preds, gts)
eval_results[f'{category}_mAP'] = mAP
log_msg = f'\n{category}_mAP\t{mAP:.4f}'
print_log(log_msg, logger=logger)
return eval_results
| 7,052 | 35.734375 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/ssn_dataset.py | import copy
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..core import softmax
from ..localization import (eval_ap, load_localize_proposal_file,
perform_regression, temporal_iou, temporal_nms)
from ..utils import get_root_logger
from .base import BaseDataset
from .builder import DATASETS
class SSNInstance:
"""Proposal instance of SSN.
Args:
start_frame (int): Index of the proposal's start frame.
end_frame (int): Index of the proposal's end frame.
num_video_frames (int): Total frames of the video.
label (int | None): The category label of the proposal. Default: None.
best_iou (float): The highest IOU with the groundtruth instance.
Default: 0.
overlap_self (float): Percent of the proposal's own span contained
in a groundtruth instance. Default: 0.
"""
def __init__(self,
start_frame,
end_frame,
num_video_frames,
label=None,
best_iou=0,
overlap_self=0):
self.start_frame = start_frame
self.end_frame = min(end_frame, num_video_frames)
self.num_video_frames = num_video_frames
self.label = label if label is not None else -1
self.coverage = (end_frame - start_frame) / num_video_frames
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
self.regression_targets = [0., 0.]
def compute_regression_targets(self, gt_list):
"""Compute regression targets of positive proposals.
Args:
gt_list (list): The list of groundtruth instances.
"""
# Find the groundtruth instance with the highest IOU.
ious = [
temporal_iou(self.start_frame, self.end_frame, gt.start_frame,
gt.end_frame) for gt in gt_list
]
best_gt = gt_list[np.argmax(ious)]
# interval: [start_frame, end_frame)
proposal_center = (self.start_frame + self.end_frame - 1) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame - 1) / 2
proposal_size = self.end_frame - self.start_frame
gt_size = best_gt.end_frame - best_gt.start_frame
# Get regression targets:
# (1). Localization regression target:
# center shift proportional to the proposal duration
# (2). Duration/Size regression target:
# logarithm of the groundtruth duration over proposal duration
self.loc_reg = (gt_center - proposal_center) / proposal_size
self.size_reg = np.log(gt_size / proposal_size)
self.regression_targets = ([self.loc_reg, self.size_reg]
if self.loc_reg is not None else [0., 0.])
@DATASETS.register_module()
class SSNDataset(BaseDataset):
"""Proposal frame dataset for Structured Segment Networks.
Based on proposal information, the dataset loads raw frames and applies
specified transforms to return a dict containing the frame tensors and
other information.
The ann_file is a text file with multiple lines and each
video's information takes up several lines. This file can be a normalized
file with percent or standard file with specific frame indexes. If the file
is a normalized file, it will be converted into a standard file first.
Template information of a video in a standard file:
.. code-block:: txt
# index
video_id
num_frames
fps
num_gts
label, start_frame, end_frame
label, start_frame, end_frame
...
num_proposals
label, best_iou, overlap_self, start_frame, end_frame
label, best_iou, overlap_self, start_frame, end_frame
...
Example of a standard annotation file:
.. code-block:: txt
# 0
video_validation_0000202
5666
1
3
8 130 185
8 832 1136
8 1303 1381
5
8 0.0620 0.0620 790 5671
8 0.1656 0.1656 790 2619
8 0.0833 0.0833 3945 5671
8 0.0960 0.0960 4173 5671
8 0.0614 0.0614 3327 5671
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
train_cfg (dict): Config for training.
test_cfg (dict): Config for testing.
data_prefix (str): Path to a directory where videos are held.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
start_index (int): Specify a start index for frames in consideration of
different filename format. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
video_centric (bool): Whether to sample proposals just from
this video or sample proposals randomly from the entire dataset.
Default: True.
reg_normalize_constants (list): Regression target normalized constants,
including mean and standard deviation of location and duration.
body_segments (int): Number of segments in course period.
Default: 5.
aug_segments (list[int]): Number of segments in starting and
ending period. Default: (2, 2).
aug_ratio (int | float | tuple[int | float]): The ratio of the length
of augmentation to that of the proposal. Defualt: (0.5, 0.5).
clip_len (int): Frames of each sampled output clip.
Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
filter_gt (bool): Whether to filter videos with no annotation
during training. Default: True.
use_regression (bool): Whether to perform regression. Default: True.
verbose (bool): Whether to print full information or not.
Default: False.
"""
def __init__(self,
ann_file,
pipeline,
train_cfg,
test_cfg,
data_prefix,
test_mode=False,
filename_tmpl='img_{:05d}.jpg',
start_index=1,
modality='RGB',
video_centric=True,
reg_normalize_constants=None,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=(0.5, 0.5),
clip_len=1,
frame_interval=1,
filter_gt=True,
use_regression=True,
verbose=False):
self.logger = get_root_logger()
super().__init__(
ann_file,
pipeline,
data_prefix=data_prefix,
test_mode=test_mode,
start_index=start_index,
modality=modality)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.assigner = train_cfg.ssn.assigner
self.sampler = train_cfg.ssn.sampler
self.evaluater = test_cfg.ssn.evaluater
self.verbose = verbose
self.filename_tmpl = filename_tmpl
if filter_gt or not test_mode:
valid_inds = [
i for i, video_info in enumerate(self.video_infos)
if len(video_info['gts']) > 0
]
self.logger.info(f'{len(valid_inds)} out of {len(self.video_infos)} '
f'videos are valid.')
self.video_infos = [self.video_infos[i] for i in valid_inds]
# construct three pools:
# 1. Positive(Foreground)
# 2. Background
# 3. Incomplete
self.positive_pool = []
self.background_pool = []
self.incomplete_pool = []
self.construct_proposal_pools()
if reg_normalize_constants is None:
self.reg_norm_consts = self._compute_reg_normalize_constants()
else:
self.reg_norm_consts = reg_normalize_constants
self.video_centric = video_centric
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
self.positive_per_video = int(
self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
self.background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
self.incomplete_per_video = (
self.sampler.num_per_video - self.positive_per_video -
self.background_per_video)
self.test_interval = self.test_cfg.ssn.sampler.test_interval
# number of consecutive frames
self.clip_len = clip_len
# number of steps (sparse sampling for efficiency of io)
self.frame_interval = frame_interval
# test mode or not
self.filter_gt = filter_gt
self.use_regression = use_regression
self.test_mode = test_mode
# yapf: disable
if self.verbose:
self.logger.info(f"""
SSNDataset: proposal file {self.proposal_file} parsed.
There are {len(self.positive_pool) + len(self.background_pool) +
len(self.incomplete_pool)} usable proposals from {len(self.video_infos)} videos.
{len(self.positive_pool)} positive proposals
{len(self.incomplete_pool)} incomplete proposals
{len(self.background_pool)} background proposals
Sample config:
FG/BG/INCOMP: {self.positive_per_video}/{self.background_per_video}/{self.incomplete_per_video} # noqa:E501
Video Centric: {self.video_centric}
Regression Normalization Constants:
Location: mean {self.reg_norm_consts[0][0]:.05f} std {self.reg_norm_consts[1][0]:.05f} # noqa: E501
Duration: mean {self.reg_norm_consts[0][1]:.05f} std {self.reg_norm_consts[1][1]:.05f} # noqa: E501
""")
# yapf: enable
else:
self.logger.info(
f'SSNDataset: proposal file {self.proposal_file} parsed.')
def load_annotations(self):
"""Load annotation file to get video information."""
video_infos = []
if 'normalized_' in self.ann_file:
self.proposal_file = self.ann_file.replace('normalized_', '')
if not osp.exists(self.proposal_file):
raise Exception(f'Please refer to `$MMACTION2/tools/data` to'
f'denormalize {self.ann_file}.')
else:
self.proposal_file = self.ann_file
proposal_infos = load_localize_proposal_file(self.proposal_file)
# proposal_info:[video_id, num_frames, gt_list, proposal_list]
# gt_list member: [label, start_frame, end_frame]
# proposal_list member: [label, best_iou, overlap_self,
# start_frame, end_frame]
for proposal_info in proposal_infos:
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, proposal_info[0])
num_frames = int(proposal_info[1])
# gts:start, end, num_frames, class_label, tIoU=1
gts = []
for x in proposal_info[2]:
if int(x[2]) > int(x[1]) and int(x[1]) < num_frames:
ssn_instance = SSNInstance(
int(x[1]),
int(x[2]),
num_frames,
label=int(x[0]),
best_iou=1.0)
gts.append(ssn_instance)
# proposals:start, end, num_frames, class_label
# tIoU=best_iou, overlap_self
proposals = []
for x in proposal_info[3]:
if int(x[4]) > int(x[3]) and int(x[3]) < num_frames:
ssn_instance = SSNInstance(
int(x[3]),
int(x[4]),
num_frames,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]))
proposals.append(ssn_instance)
video_infos.append(
dict(
frame_dir=frame_dir,
video_id=proposal_info[0],
total_frames=num_frames,
gts=gts,
proposals=proposals))
return video_infos
def results_to_detections(self, results, top_k=2000, **kwargs):
"""Convert prediction results into detections.
Args:
results (list): Prediction results.
top_k (int): Number of top results. Default: 2000.
Returns:
list: Detection results.
"""
num_classes = results[0]['activity_scores'].shape[1] - 1
detections = [dict() for _ in range(num_classes)]
for idx in range(len(self)):
video_id = self.video_infos[idx]['video_id']
relative_proposals = results[idx]['relative_proposal_list']
if len(relative_proposals[0].shape) == 3:
relative_proposals = np.squeeze(relative_proposals, 0)
activity_scores = results[idx]['activity_scores']
completeness_scores = results[idx]['completeness_scores']
regression_scores = results[idx]['bbox_preds']
if regression_scores is None:
regression_scores = np.zeros(
(len(relative_proposals), num_classes, 2),
dtype=np.float32)
regression_scores = regression_scores.reshape((-1, num_classes, 2))
if top_k <= 0:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
for i in range(num_classes):
center_scores = regression_scores[:, i, 0][:, None]
duration_scores = regression_scores[:, i, 1][:, None]
detections[i][video_id] = np.concatenate(
(relative_proposals, combined_scores[:, i][:, None],
center_scores, duration_scores),
axis=1)
else:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
for k in keep_idx:
class_idx = k % num_classes
proposal_idx = k // num_classes
new_item = [
relative_proposals[proposal_idx, 0],
relative_proposals[proposal_idx,
1], combined_scores[proposal_idx,
class_idx],
regression_scores[proposal_idx, class_idx,
0], regression_scores[proposal_idx,
class_idx, 1]
]
if video_id not in detections[class_idx]:
detections[class_idx][video_id] = np.array([new_item])
else:
detections[class_idx][video_id] = np.vstack(
[detections[class_idx][video_id], new_item])
return detections
def evaluate(self,
results,
metrics='mAP',
metric_options=dict(mAP=dict(eval_dataset='thumos14')),
logger=None,
**deprecated_kwargs):
"""Evaluation in SSN proposal dataset.
Args:
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mAP'.
metric_options (dict): Dict for metric options. Options are
``eval_dataset`` for ``mAP``.
Default: ``dict(mAP=dict(eval_dataset='thumos14'))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results for evaluation metrics.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['mAP'] = dict(metric_options['mAP'],
**deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
detections = self.results_to_detections(results, **self.evaluater)
if self.use_regression:
self.logger.info('Performing location regression')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: perform_regression(v)
for k, v in detections[class_idx].items()
}
self.logger.info('Regression finished')
self.logger.info('Performing NMS')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: temporal_nms(v, self.evaluater.nms)
for k, v in detections[class_idx].items()
}
self.logger.info('NMS finished')
# get gts
all_gts = self.get_all_gts()
for class_idx, _ in enumerate(detections):
if class_idx not in all_gts:
all_gts[class_idx] = dict()
# get predictions
plain_detections = {}
for class_idx, _ in enumerate(detections):
detection_list = []
for video, dets in detections[class_idx].items():
detection_list.extend([[video, class_idx] + x[:3]
for x in dets.tolist()])
plain_detections[class_idx] = detection_list
eval_results = OrderedDict()
for metric in metrics:
if metric == 'mAP':
eval_dataset = metric_options.setdefault('mAP', {}).setdefault(
'eval_dataset', 'thumos14')
if eval_dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, .1)
ap_values = eval_ap(plain_detections, all_gts, iou_range)
map_ious = ap_values.mean(axis=0)
self.logger.info('Evaluation finished')
for iou, map_iou in zip(iou_range, map_ious):
eval_results[f'mAP@{iou:.02f}'] = map_iou
return eval_results
def construct_proposal_pools(self):
"""Construct positve proposal pool, incomplete proposal pool and
background proposal pool of the entire dataset."""
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
self.positive_pool.extend([(video_info['video_id'], proposal)
for proposal in positives])
incompletes, backgrounds = self.get_negatives(
video_info['proposals'],
self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
self.incomplete_pool.extend([(video_info['video_id'], proposal)
for proposal in incompletes])
self.background_pool.extend([video_info['video_id'], proposal]
for proposal in backgrounds)
def get_all_gts(self):
"""Fetch groundtruth instances of the entire dataset."""
gts = {}
for video_info in self.video_infos:
video = video_info['video_id']
for gt in video_info['gts']:
class_idx = gt.label - 1
# gt_info: [relative_start, relative_end]
gt_info = [
gt.start_frame / video_info['total_frames'],
gt.end_frame / video_info['total_frames']
]
gts.setdefault(class_idx, {}).setdefault(video,
[]).append(gt_info)
return gts
@staticmethod
def get_positives(gts, proposals, positive_threshold, with_gt=True):
"""Get positive/foreground proposals.
Args:
gts (list): List of groundtruth instances(:obj:`SSNInstance`).
proposals (list): List of proposal instances(:obj:`SSNInstance`).
positive_threshold (float): Minimum threshold of overlap of
positive/foreground proposals and groundtruths.
with_gt (bool): Whether to include groundtruth instances in
positive proposals. Default: True.
Returns:
list[:obj:`SSNInstance`]: (positives), positives is a list
comprised of positive proposal instances.
"""
positives = [
proposal for proposal in proposals
if proposal.best_iou > positive_threshold
]
if with_gt:
positives.extend(gts)
for proposal in positives:
proposal.compute_regression_targets(gts)
return positives
@staticmethod
def get_negatives(proposals,
incomplete_iou_threshold,
background_iou_threshold,
background_coverage_threshold=0.01,
incomplete_overlap_threshold=0.7):
"""Get negative proposals, including incomplete proposals and
background proposals.
Args:
proposals (list): List of proposal instances(:obj:`SSNInstance`).
incomplete_iou_threshold (float): Maximum threshold of overlap
of incomplete proposals and groundtruths.
background_iou_threshold (float): Maximum threshold of overlap
of background proposals and groundtruths.
background_coverage_threshold (float): Minimum coverage
of background proposals in video duration. Default: 0.01.
incomplete_overlap_threshold (float): Minimum percent of incomplete
proposals' own span contained in a groundtruth instance.
Default: 0.7.
Returns:
list[:obj:`SSNInstance`]: (incompletes, backgrounds), incompletes
and backgrounds are lists comprised of incomplete
proposal instances and background proposal instances.
"""
incompletes = []
backgrounds = []
for proposal in proposals:
if (proposal.best_iou < incomplete_iou_threshold
and proposal.overlap_self > incomplete_overlap_threshold):
incompletes.append(proposal)
elif (proposal.best_iou < background_iou_threshold
and proposal.coverage > background_coverage_threshold):
backgrounds.append(proposal)
return incompletes, backgrounds
def _video_centric_sampling(self, record):
"""Sample proposals from the this video instance.
Args:
record (dict): Information of the video instance(video_info[idx]).
key: frame_dir, video_id, total_frames,
gts: List of groundtruth instances(:obj:`SSNInstance`).
proposals: List of proposal instances(:obj:`SSNInstance`).
"""
positives = self.get_positives(record['gts'], record['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
incompletes, backgrounds = self.get_negatives(
record['proposals'], self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
def sample_video_proposals(proposal_type, video_id, video_pool,
num_requested_proposals, dataset_pool):
"""This method will sample proposals from the this video pool. If
the video pool is empty, it will fetch from the dataset pool
(collect proposal of the entire dataset).
Args:
proposal_type (int): Type id of proposal.
Positive/Foreground: 0
Negative:
Incomplete: 1
Background: 2
video_id (str): Name of the video.
video_pool (list): Pool comprised of proposals in this video.
num_requested_proposals (int): Number of proposals
to be sampled.
dataset_pool (list): Proposals of the entire dataset.
Returns:
list[(str, :obj:`SSNInstance`), int]:
video_id (str): Name of the video.
:obj:`SSNInstance`: Instance of class SSNInstance.
proposal_type (int): Type of proposal.
"""
if len(video_pool) == 0:
idx = np.random.choice(
len(dataset_pool), num_requested_proposals, replace=False)
return [(dataset_pool[x], proposal_type) for x in idx]
replicate = len(video_pool) < num_requested_proposals
idx = np.random.choice(
len(video_pool), num_requested_proposals, replace=replicate)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_proposals = []
out_proposals.extend(
sample_video_proposals(0, record['video_id'], positives,
self.positive_per_video,
self.positive_pool))
out_proposals.extend(
sample_video_proposals(1, record['video_id'], incompletes,
self.incomplete_per_video,
self.incomplete_pool))
out_proposals.extend(
sample_video_proposals(2, record['video_id'], backgrounds,
self.background_per_video,
self.background_pool))
return out_proposals
def _random_sampling(self):
"""Randomly sample proposals from the entire dataset."""
out_proposals = []
positive_idx = np.random.choice(
len(self.positive_pool),
self.positive_per_video,
replace=len(self.positive_pool) < self.positive_per_video)
out_proposals.extend([(self.positive_pool[x], 0)
for x in positive_idx])
incomplete_idx = np.random.choice(
len(self.incomplete_pool),
self.incomplete_per_video,
replace=len(self.incomplete_pool) < self.incomplete_per_video)
out_proposals.extend([(self.incomplete_pool[x], 1)
for x in incomplete_idx])
background_idx = np.random.choice(
len(self.background_pool),
self.background_per_video,
replace=len(self.background_pool) < self.background_per_video)
out_proposals.extend([(self.background_pool[x], 2)
for x in background_idx])
return out_proposals
def _get_stage(self, proposal, num_frames):
"""Fetch the scale factor of starting and ending stage and get the
stage split.
Args:
proposal (:obj:`SSNInstance`): Proposal instance.
num_frames (int): Total frames of the video.
Returns:
tuple[float, float, list]: (starting_scale_factor,
ending_scale_factor, stage_split), starting_scale_factor is
the ratio of the effective sampling length to augment length
in starting stage, ending_scale_factor is the ratio of the
effective sampling length to augment length in ending stage,
stage_split is ending segment id of starting, course and
ending stage.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
starting_scale_factor = ((valid_starting_length + ori_clip_len + 1) /
(duration * self.aug_ratio[0]))
ending_scale_factor = (valid_ending_length + ori_clip_len + 1) / (
duration * self.aug_ratio[1])
aug_start, aug_end = self.aug_segments
stage_split = [
aug_start, aug_start + self.body_segments,
aug_start + self.body_segments + aug_end
]
return starting_scale_factor, ending_scale_factor, stage_split
def _compute_reg_normalize_constants(self):
"""Compute regression target normalized constants."""
if self.verbose:
self.logger.info('Compute regression target normalized constants')
targets = []
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold, False)
for positive in positives:
targets.append(list(positive.regression_targets))
return np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
if self.video_centric:
# yapf: disable
results['out_proposals'] = self._video_centric_sampling(self.video_infos[idx]) # noqa: E501
# yapf: enable
else:
results['out_proposals'] = self._random_sampling()
out_proposal_scale_factor = []
out_proposal_type = []
out_proposal_labels = []
out_proposal_reg_targets = []
for _, proposal in enumerate(results['out_proposals']):
# proposal: [(video_id, SSNInstance), proposal_type]
num_frames = proposal[0][1].num_video_frames
(starting_scale_factor, ending_scale_factor,
_) = self._get_stage(proposal[0][1], num_frames)
# proposal[1]: Type id of proposal.
# Positive/Foreground: 0
# Negative:
# Incomplete: 1
# Background: 2
# Positivte/Foreground proposal
if proposal[1] == 0:
label = proposal[0][1].label
# Incomplete proposal
elif proposal[1] == 1:
label = proposal[0][1].label
# Background proposal
elif proposal[1] == 2:
label = 0
else:
raise ValueError(f'Proposal type should be 0, 1, or 2,'
f'but got {proposal[1]}')
out_proposal_scale_factor.append(
[starting_scale_factor, ending_scale_factor])
if not isinstance(label, int):
raise TypeError(f'proposal_label must be an int,'
f'but got {type(label)}')
out_proposal_labels.append(label)
out_proposal_type.append(proposal[1])
reg_targets = proposal[0][1].regression_targets
if proposal[1] == 0:
# Normalize regression targets of positive proposals.
reg_targets = ((reg_targets[0] - self.reg_norm_consts[0][0]) /
self.reg_norm_consts[1][0],
(reg_targets[1] - self.reg_norm_consts[0][1]) /
self.reg_norm_consts[1][1])
out_proposal_reg_targets.append(reg_targets)
results['reg_targets'] = np.array(
out_proposal_reg_targets, dtype=np.float32)
results['proposal_scale_factor'] = np.array(
out_proposal_scale_factor, dtype=np.float32)
results['proposal_labels'] = np.array(out_proposal_labels)
results['proposal_type'] = np.array(out_proposal_type)
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
proposals = results['proposals']
num_frames = results['total_frames']
ori_clip_len = self.clip_len * self.frame_interval
frame_ticks = np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=int) + 1
num_sampled_frames = len(frame_ticks)
if len(proposals) == 0:
proposals.append(SSNInstance(0, num_frames - 1, num_frames))
relative_proposal_list = []
proposal_tick_list = []
scale_factor_list = []
for proposal in proposals:
relative_proposal = (proposal.start_frame / num_frames,
proposal.end_frame / num_frames)
relative_duration = relative_proposal[1] - relative_proposal[0]
relative_starting_duration = relative_duration * self.aug_ratio[0]
relative_ending_duration = relative_duration * self.aug_ratio[1]
relative_starting = (
relative_proposal[0] - relative_starting_duration)
relative_ending = relative_proposal[1] + relative_ending_duration
real_relative_starting = max(0.0, relative_starting)
real_relative_ending = min(1.0, relative_ending)
starting_scale_factor = (
(relative_proposal[0] - real_relative_starting) /
relative_starting_duration)
ending_scale_factor = (
(real_relative_ending - relative_proposal[1]) /
relative_ending_duration)
proposal_ranges = (real_relative_starting, *relative_proposal,
real_relative_ending)
proposal_ticks = (np.array(proposal_ranges) *
num_sampled_frames).astype(np.int32)
relative_proposal_list.append(relative_proposal)
proposal_tick_list.append(proposal_ticks)
scale_factor_list.append(
(starting_scale_factor, ending_scale_factor))
results['relative_proposal_list'] = np.array(
relative_proposal_list, dtype=np.float32)
results['scale_factor_list'] = np.array(
scale_factor_list, dtype=np.float32)
results['proposal_tick_list'] = np.array(
proposal_tick_list, dtype=np.int32)
results['reg_norm_consts'] = self.reg_norm_consts
return self.pipeline(results)
| 37,498 | 41.515873 | 120 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/builder.py | import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import ClassSpecificDistributedSampler, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
BLENDINGS = Registry('blending')
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict | None, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if cfg['type'] == 'RepeatDataset':
from .dataset_wrappers import RepeatDataset
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
videos_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
videos_per_gpu (int): Number of videos on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
sample_by_class = getattr(dataset, 'sample_by_class', False)
if dist:
if sample_by_class:
dynamic_length = getattr(dataset, 'dynamic_length', True)
sampler = ClassSpecificDistributedSampler(
dataset,
world_size,
rank,
dynamic_length=dynamic_length,
shuffle=shuffle,
seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, seed=seed)
shuffle = False
batch_size = videos_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * videos_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=videos_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Init the random seed for various workers."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 4,449 | 32.458647 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/samplers/distributed_sampler.py | import math
from collections import defaultdict
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from
``torch.utils.data.DistributedSampler``.
In pytorch of lower versions, there is no ``shuffle`` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class ClassSpecificDistributedSampler(_DistributedSampler):
"""ClassSpecificDistributedSampler inheriting from
``torch.utils.data.DistributedSampler``.
Samples are sampled with a class specific probability, which should be an
attribute of the dataset (dataset.class_prob, which is a dictionary that
map label index to the prob). This sampler is only applicable to single
class recognition dataset. This sampler is also compatible with
RepeatDataset.
The default value of dynamic_length is True, which means we use
oversampling / subsampling, and the dataset length may changed. If
dynamic_length is set as False, the dataset length is fixed.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
dynamic_length=True,
shuffle=True,
seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
if type(dataset).__name__ == 'RepeatDataset':
dataset = dataset.dataset
assert hasattr(dataset, 'class_prob')
self.class_prob = dataset.class_prob
self.dynamic_length = dynamic_length
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
class_indices = defaultdict(list)
# To be compatible with RepeatDataset
times = 1
dataset = self.dataset
if type(dataset).__name__ == 'RepeatDataset':
times = dataset.times
dataset = dataset.dataset
for i, item in enumerate(dataset.video_infos):
class_indices[item['label']].append(i)
if self.dynamic_length:
indices = []
for k, prob in self.class_prob.items():
prob = prob * times
for i in range(int(prob // 1)):
indices.extend(class_indices[k])
rem = int((prob % 1) * len(class_indices[k]))
rem_indices = torch.randperm(
len(class_indices[k]), generator=g).tolist()[:rem]
indices.extend(rem_indices)
if self.shuffle:
shuffle = torch.randperm(len(indices), generator=g).tolist()
indices = [indices[i] for i in shuffle]
# re-calc num_samples & total_size
self.num_samples = math.ceil(len(indices) / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
else:
# We want to keep the dataloader length same as original
video_labels = [x['label'] for x in dataset.video_infos]
probs = [
self.class_prob[lb] / len(class_indices[lb])
for lb in video_labels
]
indices = torch.multinomial(
torch.Tensor(probs),
self.total_size,
replacement=True,
generator=g)
indices = indices.data.numpy().tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# retrieve indices for current process
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 4,908 | 35.362963 | 78 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/pipelines/loading.py | import io
import os
import os.path as osp
import shutil
import warnings
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from torch.nn.modules.utils import _pair
from ...utils import get_random_string, get_shm_dir, get_thread_id
from ..builder import PIPELINES
import random
@PIPELINES.register_module()
class LoadHVULabel:
"""Convert the HVU label from dictionaries to torch tensors.
Required keys are "label", "categories", "category_nums", added or modified
keys are "label", "mask" and "category_mask".
"""
def __init__(self, **kwargs):
self.hvu_initialized = False
self.kwargs = kwargs
def init_hvu_info(self, categories, category_nums):
assert len(categories) == len(category_nums)
self.categories = categories
self.category_nums = category_nums
self.num_categories = len(self.categories)
self.num_tags = sum(self.category_nums)
self.category2num = dict(zip(categories, category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] + self.category_nums[i])
self.category2startidx = dict(zip(categories, self.start_idx))
self.hvu_initialized = True
def __call__(self, results):
"""Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not self.hvu_initialized:
self.init_hvu_info(results['categories'], results['category_nums'])
onehot = torch.zeros(self.num_tags)
onehot_mask = torch.zeros(self.num_tags)
category_mask = torch.zeros(self.num_categories)
for category, tags in results['label'].items():
category_mask[self.categories.index(category)] = 1.
start_idx = self.category2startidx[category]
category_num = self.category2num[category]
tags = [idx + start_idx for idx in tags]
onehot[tags] = 1.
onehot_mask[start_idx:category_num + start_idx] = 1.
results['label'] = onehot
results['mask'] = onehot_mask
results['category_mask'] = category_mask
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'hvu_initialized={self.hvu_initialized})')
return repr_str
@PIPELINES.register_module()
class SampleFrames:
"""Sample frames from the video.
Required keys are "total_frames", "start_index" , added or modified keys
are "frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
twice_sample (bool): Whether to use twice sample when testing.
If set to True, it will sample frames with and without fixed shift,
which is commonly used for testing in TSM model. Default: False.
out_of_bound_opt (str): The way to deal with out of bounds frame
indexes. Available options are 'loop', 'repeat_last'.
Default: 'loop'.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None,
frame_uniform=False):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.temporal_jitter = temporal_jitter
self.twice_sample = twice_sample
self.out_of_bound_opt = out_of_bound_opt
self.test_mode = test_mode
self.frame_uniform = frame_uniform
assert self.out_of_bound_opt in ['loop', 'repeat_last']
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2. If set twice_sample True, it will sample
frames together without fixed shift. If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _sample_clips(self, num_frames):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
clip_offsets = self._get_train_clips(num_frames)
return clip_offsets
def get_seq_frames(self, num_frames):
"""
Modified from https://github.com/facebookresearch/SlowFast/blob/64abcc90ccfdcbb11cf91d6e525bed60e92a8796/slowfast/datasets/ssv2.py#L159
Given the video index, return the list of sampled frame indexes.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (list): the indexes of frames of sampled from the video.
"""
seg_size = float(num_frames - 1) / self.clip_len
seq = []
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
if not self.test_mode:
seq.append(random.randint(start, end))
else:
seq.append((start + end) // 2)
return np.array(seq)
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
if self.frame_uniform: # sthv2 sampling strategy
assert results['start_index'] == 0
frame_inds = self.get_seq_frames(total_frames)
else:
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'temporal_jitter={self.temporal_jitter}, '
f'twice_sample={self.twice_sample}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class UntrimmedSampleFrames:
"""Sample frames from the untrimmed video.
Required keys are "filename", "total_frames", added or modified keys are
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): The length of sampled clips. Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 16.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self, clip_len=1, frame_interval=16, start_index=None):
self.clip_len = clip_len
self.frame_interval = frame_interval
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
start_index = results['start_index']
clip_centers = np.arange(self.frame_interval // 2, total_frames,
self.frame_interval)
num_clips = clip_centers.shape[0]
frame_inds = clip_centers[:, None] + np.arange(
-(self.clip_len // 2), self.clip_len -
(self.clip_len // 2))[None, :]
# clip frame_inds to legal range
frame_inds = np.clip(frame_inds, 0, total_frames - 1)
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval})')
return repr_str
@PIPELINES.register_module()
class DenseSampleFrames(SampleFrames):
"""Select frames from the video by dense sample strategy.
Required keys are "filename", added or modified keys are "total_frames",
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
sample_range (int): Total sample range for dense sample.
Default: 64.
num_sample_positions (int): Number of sample start positions, Which is
only used in test mode. Default: 10. That is to say, by default,
there are at least 10 clips for one input sample in test mode.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
sample_range=64,
num_sample_positions=10,
temporal_jitter=False,
out_of_bound_opt='loop',
test_mode=False):
super().__init__(
clip_len,
frame_interval,
num_clips,
temporal_jitter,
out_of_bound_opt=out_of_bound_opt,
test_mode=test_mode)
self.sample_range = sample_range
self.num_sample_positions = num_sample_positions
def _get_train_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'sample_range={self.sample_range}, '
f'num_sample_positions={self.num_sample_positions}, '
f'temporal_jitter={self.temporal_jitter}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleAVAFrames(SampleFrames):
def __init__(self, clip_len, frame_interval=2, test_mode=False):
super().__init__(clip_len, frame_interval, test_mode=test_mode)
def _get_clips(self, center_index, skip_offsets, shot_info):
start = center_index - (self.clip_len // 2) * self.frame_interval
end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval
frame_inds = list(range(start, end, self.frame_interval))
if not self.test_mode:
frame_inds = frame_inds + skip_offsets
frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1)
return frame_inds
def __call__(self, results):
fps = results['fps']
timestamp = results['timestamp']
timestamp_start = results['timestamp_start']
shot_info = results['shot_info']
center_index = fps * (timestamp - timestamp_start) + 1
skip_offsets = np.random.randint(
-self.frame_interval // 2, (self.frame_interval + 1) // 2,
size=self.clip_len)
frame_inds = self._get_clips(center_index, skip_offsets, shot_info)
results['frame_inds'] = np.array(frame_inds, dtype=np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = 1
results['crop_quadruple'] = np.array([0, 0, 1, 1], dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleProposalFrames(SampleFrames):
"""Sample frames from proposals in the video.
Required keys are "total_frames" and "out_proposals", added or
modified keys are "frame_inds", "frame_interval", "num_clips",
'clip_len' and 'num_proposals'.
Args:
clip_len (int): Frames of each sampled output clip.
body_segments (int): Number of segments in course period.
aug_segments (list[int]): Number of segments in starting and
ending period.
aug_ratio (int | float | tuple[int | float]): The ratio
of the length of augmentation to that of the proposal.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
test_interval (int): Temporal interval of adjacent sampled frames
in test mode. Default: 6.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
mode (str): Choose 'train', 'val' or 'test' mode.
Default: 'train'.
"""
def __init__(self,
clip_len,
body_segments,
aug_segments,
aug_ratio,
frame_interval=1,
test_interval=6,
temporal_jitter=False,
mode='train'):
super().__init__(
clip_len,
frame_interval=frame_interval,
temporal_jitter=temporal_jitter)
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
assert mode in ['train', 'val', 'test']
self.mode = mode
self.test_interval = test_interval
@staticmethod
def _get_train_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in train mode.
It will calculate the average interval for each segment,
and randomly shift them within offsets between [0, average_duration].
If the total number of frames is smaller than num segments, it will
return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
avg_interval = (valid_length + 1) // num_segments
if avg_interval > 0:
base_offsets = np.arange(num_segments) * avg_interval
offsets = base_offsets + np.random.randint(
avg_interval, size=num_segments)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
@staticmethod
def _get_val_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in validation mode.
It will calculate the average interval for each segment.
If the total number of valid length is smaller than num segments,
it will return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in validation mode.
"""
if valid_length >= num_segments:
avg_interval = valid_length / float(num_segments)
base_offsets = np.arange(num_segments) * avg_interval
offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
def _get_proposal_clips(self, proposal, num_frames):
"""Get clip offsets in train mode.
It will calculate sampled frame indices in the proposal's three
stages: starting, course and ending stage.
Args:
proposal (obj): The proposal object.
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_length = duration - ori_clip_len
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
if self.mode == 'train':
starting_offsets = self._get_train_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_train_indices(valid_length,
self.body_segments)
ending_offsets = self._get_train_indices(valid_ending_length,
self.aug_segments[1])
elif self.mode == 'val':
starting_offsets = self._get_val_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_val_indices(valid_length,
self.body_segments)
ending_offsets = self._get_val_indices(valid_ending_length,
self.aug_segments[1])
starting_offsets += valid_starting
course_offsets += start_frame
ending_offsets += end_frame
offsets = np.concatenate(
(starting_offsets, course_offsets, ending_offsets))
return offsets
def _get_train_clips(self, num_frames, proposals):
"""Get clip offsets in train mode.
It will calculate sampled frame indices of each proposal, and then
assemble them.
Args:
num_frames (int): Total number of frame in the video.
proposals (list): Proposals fetched.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
clip_offsets = []
for proposal in proposals:
proposal_clip_offsets = self._get_proposal_clips(
proposal[0][1], num_frames)
clip_offsets = np.concatenate(
[clip_offsets, proposal_clip_offsets])
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
It will calculate sampled frame indices based on test interval.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
return np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=np.int)
def _sample_clips(self, num_frames, proposals):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
proposals (list | None): Proposals fetched.
It is set to None in test mode.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.mode == 'test':
clip_offsets = self._get_test_clips(num_frames)
else:
assert proposals is not None
clip_offsets = self._get_train_clips(num_frames, proposals)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
out_proposals = results.get('out_proposals', None)
clip_offsets = self._sample_clips(total_frames, out_proposals)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
start_index = results['start_index']
frame_inds = np.mod(frame_inds, total_frames) + start_index
results['frame_inds'] = np.array(frame_inds).astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = (
self.body_segments + self.aug_segments[0] + self.aug_segments[1])
if self.mode in ['train', 'val']:
results['num_proposals'] = len(results['out_proposals'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'body_segments={self.body_segments}, '
f'aug_segments={self.aug_segments}, '
f'aug_ratio={self.aug_ratio}, '
f'frame_interval={self.frame_interval}, '
f'test_interval={self.test_interval}, '
f'temporal_jitter={self.temporal_jitter}, '
f'mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class PyAVInit:
"""Using pyav to initialize the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "filename",
added or modified keys are "video_reader", and "total_frames".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
kwargs (dict): Args for file client.
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(io_backend=disk)'
return repr_str
@PIPELINES.register_module()
class PyAVDecode:
"""Using pyav to decode the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "video_reader" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
def __init__(self, multi_thread=False):
self.multi_thread = multi_thread
def __call__(self, results):
"""Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [imgs[i % len(imgs)] for i in results['frame_inds']]
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(multi_thread={self.multi_thread})'
return repr_str
@PIPELINES.register_module()
class PyAVDecodeMotionVector(PyAVDecode):
"""Using pyav to decode the motion vectors from video.
Reference: https://github.com/PyAV-Org/PyAV/
blob/main/tests/test_decode.py
Required keys are "video_reader" and "frame_inds",
added or modified keys are "motion_vectors", "frame_inds".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
@staticmethod
def _parse_vectors(mv, vectors, height, width):
"""Parse the returned vectors."""
(w, h, src_x, src_y, dst_x,
dst_y) = (vectors['w'], vectors['h'], vectors['src_x'],
vectors['src_y'], vectors['dst_x'], vectors['dst_y'])
val_x = dst_x - src_x
val_y = dst_y - src_y
start_x = dst_x - w // 2
start_y = dst_y - h // 2
end_x = start_x + w
end_y = start_y + h
for sx, ex, sy, ey, vx, vy in zip(start_x, end_x, start_y, end_y,
val_x, val_y):
if (sx >= 0 and ex < width and sy >= 0 and ey < height):
mv[sy:ey, sx:ex] = (vx, vy)
return mv
def __call__(self, results):
"""Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max index to make early stop
max_idx = max(results['frame_inds'])
i = 0
stream = container.streams.video[0]
codec_context = stream.codec_context
codec_context.options = {'flags2': '+export_mvs'}
for packet in container.demux(stream):
for frame in packet.decode():
if i > max_idx + 1:
break
i += 1
height = frame.height
width = frame.width
mv = np.zeros((height, width, 2), dtype=np.int8)
vectors = frame.side_data.get('MOTION_VECTORS')
if frame.key_frame:
# Key frame don't have motion vectors
assert vectors is None
if vectors is not None and len(vectors) > 0:
mv = self._parse_vectors(mv, vectors.to_ndarray(), height,
width)
imgs.append(mv)
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['motion_vectors'] = np.array(
[imgs[i % len(imgs)] for i in results['frame_inds']])
return results
@PIPELINES.register_module()
class DecordInit:
"""Using decord to initialize the video_reader.
Decord: https://github.com/dmlc/decord
Required keys are "filename",
added or modified keys are "video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', num_threads=1, **kwargs):
self.io_backend = io_backend
self.num_threads = num_threads
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the Decord initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import decord
except ImportError:
raise ImportError(
'Please run "pip install decord" to install Decord first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = decord.VideoReader(file_obj, num_threads=self.num_threads)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'num_threads={self.num_threads})')
return repr_str
@PIPELINES.register_module()
class DecordDecode:
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
"""
def __call__(self, results):
"""Perform the Decord decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
# Generate frame index mapping in order
frame_dict = {
idx: container[idx].asnumpy()
for idx in np.unique(frame_inds)
}
imgs = [frame_dict[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class OpenCVInit:
"""Using OpenCV to initialize the video_reader.
Required keys are "filename", added or modified keys are "new_path",
"video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
self.tmp_folder = None
if self.io_backend != 'disk':
random_string = get_random_string()
thread_id = get_thread_id()
self.tmp_folder = osp.join(get_shm_dir(),
f'{random_string}_{thread_id}')
os.mkdir(self.tmp_folder)
def __call__(self, results):
"""Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __del__(self):
if self.tmp_folder and osp.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend})')
return repr_str
@PIPELINES.register_module()
class OpenCVDecode:
"""Using OpenCV to decode the video.
Required keys are "video_reader", "filename" and "frame_inds", added or
modified keys are "imgs", "img_shape" and "original_shape".
"""
def __call__(self, results):
"""Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class RawFrameDecode:
"""Load and decode frames with given indices.
Required keys are "frame_dir", "filename_tmpl" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'decoding_backend={self.decoding_backend})')
return repr_str
@PIPELINES.register_module()
class ImageDecode:
"""Load and decode images.
Required key is "filename", added or modified keys are "imgs", "img_shape"
and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
filename = results['filename']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(img)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class AudioDecodeInit:
"""Using librosa to initialize the audio reader.
Required keys are "audio_path", added or modified keys are "length",
"sample_rate", "audios".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
sample_rate (int): Audio sampling times per second. Default: 16000.
"""
def __init__(self,
io_backend='disk',
sample_rate=16000,
pad_method='zero',
**kwargs):
self.io_backend = io_backend
self.sample_rate = sample_rate
if pad_method in ['random', 'zero']:
self.pad_method = pad_method
else:
raise NotImplementedError
self.kwargs = kwargs
self.file_client = None
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# librosa load raw audio file into a distribution of -1~+1
return np.random.rand(shape).astype(np.float32) * 2 - 1
def __call__(self, results):
"""Perform the librosa initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Please install librosa first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
if osp.exists(results['audio_path']):
file_obj = io.BytesIO(self.file_client.get(results['audio_path']))
y, sr = librosa.load(file_obj, sr=self.sample_rate)
else:
# Generate a random dummy 10s input
pad_func = getattr(self, f'_{self.pad_method}_pad')
y = pad_func(int(round(10.0 * self.sample_rate)))
sr = self.sample_rate
results['length'] = y.shape[0]
results['sample_rate'] = sr
results['audios'] = y
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'sample_rate={self.sample_rate}, '
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class LoadAudioFeature:
"""Load offline extracted audio features.
Required keys are "audio_path", added or modified keys are "length",
audios".
"""
def __init__(self, pad_method='zero'):
if pad_method not in ['zero', 'random']:
raise NotImplementedError
self.pad_method = pad_method
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# spectrogram is normalized into a distribution of 0~1
return np.random.rand(shape).astype(np.float32)
def __call__(self, results):
"""Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if osp.exists(results['audio_path']):
feature_map = np.load(results['audio_path'])
else:
# Generate a random dummy 10s input
# Some videos do not have audio stream
pad_func = getattr(self, f'_{self.pad_method}_pad')
feature_map = pad_func((640, 80))
results['length'] = feature_map.shape[0]
results['audios'] = feature_map
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class AudioDecode:
"""Sample the audio w.r.t. the frames selected.
Args:
fixed_length (int): As the audio clip selected by frames sampled may
not be exactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 32000.
Required keys are "frame_inds", "num_clips", "total_frames", "length",
added or modified keys are "audios", "audios_shape".
"""
def __init__(self, fixed_length=32000):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioDecode`` to pick audio clips."""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0])),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
@PIPELINES.register_module()
class BuildPseudoClip:
"""Build pseudo clips with one single image by repeating it n times.
Required key is "imgs", added or modified key is "imgs", "num_clips",
"clip_len".
Args:
clip_len (int): Frames of the generated pseudo clips.
"""
def __init__(self, clip_len):
self.clip_len = clip_len
def __call__(self, results):
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class FrameSelector(RawFrameDecode):
"""Deprecated class for ``RawFrameDecode``."""
def __init__(self, *args, **kwargs):
warnings.warn('"FrameSelector" is deprecated, please switch to'
'"RawFrameDecode"')
super().__init__(*args, **kwargs)
@PIPELINES.register_module()
class AudioFeatureSelector:
"""Sample the audio feature w.r.t. the frames selected.
Required keys are "audios", "frame_inds", "num_clips", "length",
"total_frames", added or modified keys are "audios", "audios_shape".
Args:
fixed_length (int): As the features selected by frames sampled may
not be extactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 128.
"""
def __init__(self, fixed_length=128):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx, :]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length, :]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class LoadLocalizationFeature:
"""Load Video features for localizer with given video_name list.
Required keys are "video_name" and "data_prefix", added or modified keys
are "raw_feature".
Args:
raw_feature_ext (str): Raw feature file extension. Default: '.csv'.
"""
def __init__(self, raw_feature_ext='.csv'):
valid_raw_feature_ext = ('.csv', )
if raw_feature_ext not in valid_raw_feature_ext:
raise NotImplementedError
self.raw_feature_ext = raw_feature_ext
def __call__(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
data_prefix = results['data_prefix']
data_path = osp.join(data_prefix, video_name + self.raw_feature_ext)
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'raw_feature_ext={self.raw_feature_ext})')
return repr_str
@PIPELINES.register_module()
class GenerateLocalizationLabels:
"""Load video label for localizer with given video_name list.
Required keys are "duration_frame", "duration_second", "feature_frame",
"annotations", added or modified keys are "gt_bbox".
"""
def __call__(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results
@PIPELINES.register_module()
class LoadProposals:
"""Loading proposals with given proposal results.
Required keys are "video_name", added or modified keys are 'bsp_feature',
'tmin', 'tmax', 'tmin_score', 'tmax_score' and 'reference_temporal_iou'.
Args:
top_k (int): The top k proposals to be loaded.
pgm_proposals_dir (str): Directory to load proposals.
pgm_features_dir (str): Directory to load proposal features.
proposal_ext (str): Proposal file extension. Default: '.csv'.
feature_ext (str): Feature file extension. Default: '.npy'.
"""
def __init__(self,
top_k,
pgm_proposals_dir,
pgm_features_dir,
proposal_ext='.csv',
feature_ext='.npy'):
self.top_k = top_k
self.pgm_proposals_dir = pgm_proposals_dir
self.pgm_features_dir = pgm_features_dir
valid_proposal_ext = ('.csv', )
if proposal_ext not in valid_proposal_ext:
raise NotImplementedError
self.proposal_ext = proposal_ext
valid_feature_ext = ('.npy', )
if feature_ext not in valid_feature_ext:
raise NotImplementedError
self.feature_ext = feature_ext
def __call__(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'top_k={self.top_k}, '
f'pgm_proposals_dir={self.pgm_proposals_dir}, '
f'pgm_features_dir={self.pgm_features_dir}, '
f'proposal_ext={self.proposal_ext}, '
f'feature_ext={self.feature_ext})')
return repr_str
| 60,896 | 35.334726 | 143 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/pipelines/augmentations.py | import random
import warnings
from collections.abc import Sequence
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
import timm.data as tdata
import torch
from ..builder import PIPELINES
def _combine_quadruple(a, b):
return (a[0] + a[2] * b[0], a[1] + a[3] * b[1], a[2] * b[2], a[3] * b[3])
def _flip_quadruple(a):
return (1 - a[0] - a[2], a[1], a[2], a[3])
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
@PIPELINES.register_module()
class PoseCompact:
"""Convert the coordinates of keypoints to make it more compact.
Specifically, it first find a tight bounding box that surrounds all joints
in each frame, then we expand the tight box by a given padding ratio. For
example, if 'padding == 0.25', then the expanded box has unchanged center,
and 1.25x width and height.
Required keys in results are "img_shape", "keypoint", add or modified keys
are "img_shape", "keypoint", "crop_quadruple".
Args:
padding (float): The padding size. Default: 0.25.
threshold (int): The threshold for the tight bounding box. If the width
or height of the tight bounding box is smaller than the threshold,
we do not perform the compact operation. Default: 10.
hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded
box. Float indicates the specific ratio and tuple indicates a
ratio range. If set as None, it means there is no requirement on
hw_ratio. Default: None.
allow_imgpad (bool): Whether to allow expanding the box outside the
image to meet the hw_ratio requirement. Default: True.
Returns:
type: Description of returned object.
"""
def __init__(self,
padding=0.25,
threshold=10,
hw_ratio=None,
allow_imgpad=True):
self.padding = padding
self.threshold = threshold
if hw_ratio is not None:
hw_ratio = _pair(hw_ratio)
self.hw_ratio = hw_ratio
self.allow_imgpad = allow_imgpad
assert self.padding >= 0
def __call__(self, results):
img_shape = results['img_shape']
h, w = img_shape
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
kp_x = kp[..., 0]
kp_y = kp[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return results
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
# the order is x, y, w, h (in [0, 1]), a tuple
crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))
new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,
(max_y - min_y) / h)
crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)
results['crop_quadruple'] = crop_quadruple
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '
f'threshold={self.threshold}, '
f'hw_ratio={self.hw_ratio}, '
f'allow_imgpad={self.allow_imgpad})')
return repr_str
class EntityBoxRescale:
def __init__(self, scale_factor):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxCrop:
def __init__(self, crop_bbox):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxFlip:
def __init__(self, img_shape):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class Imgaug:
"""Imgaug augmentation.
Adds custom transformations from imgaug library.
Please visit `https://imgaug.readthedocs.io/en/latest/index.html`
to get more information. Two demo configs could be found in tsn and i3d
config folder.
It's better to use uint8 images as inputs since imgaug works best with
numpy dtype uint8 and isn't well tested with other dtypes. It should be
noted that not all of the augmenters have the same input and output dtype,
which may cause unexpected results.
Required keys are "imgs", "img_shape"(if "gt_bboxes" is not None) and
"modality", added or modified keys are "imgs", "img_shape", "gt_bboxes"
and "proposals".
It is worth mentioning that `Imgaug` will NOT create custom keys like
"interpolation", "crop_bbox", "flip_direction", etc. So when using
`Imgaug` along with other mmaction2 pipelines, we should pay more attention
to required keys.
Two steps to use `Imgaug` pipeline:
1. Create initialization parameter `transforms`. There are three ways
to create `transforms`.
1) string: only support `default` for now.
e.g. `transforms='default'`
2) list[dict]: create a list of augmenters by a list of dicts, each
dict corresponds to one augmenter. Every dict MUST contain a key
named `type`. `type` should be a string(iaa.Augmenter's name) or
an iaa.Augmenter subclass.
e.g. `transforms=[dict(type='Rotate', rotate=(-20, 20))]`
e.g. `transforms=[dict(type=iaa.Rotate, rotate=(-20, 20))]`
3) iaa.Augmenter: create an imgaug.Augmenter object.
e.g. `transforms=iaa.Rotate(rotate=(-20, 20))`
2. Add `Imgaug` in dataset pipeline. It is recommended to insert imgaug
pipeline before `Normalize`. A demo pipeline is listed as follows.
```
pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Imgaug', transforms='default'),
# dict(type='Imgaug', transforms=[
# dict(type='Rotate', rotate=(-20, 20))
# ]),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
```
Args:
transforms (str | list[dict] | :obj:`iaa.Augmenter`): Three different
ways to create imgaug augmenter.
"""
def __init__(self, transforms):
import imgaug.augmenters as iaa
if transforms == 'default':
self.transforms = self.default_transforms()
elif isinstance(transforms, list):
assert all(isinstance(trans, dict) for trans in transforms)
self.transforms = transforms
elif isinstance(transforms, iaa.Augmenter):
self.aug = self.transforms = transforms
else:
raise ValueError('transforms must be `default` or a list of dicts'
' or iaa.Augmenter object')
if not isinstance(transforms, iaa.Augmenter):
self.aug = iaa.Sequential(
[self.imgaug_builder(t) for t in self.transforms])
@staticmethod
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Plase visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
]
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'children' in args:
args['children'] = [
self.imgaug_builder(child) for child in args['children']
]
return obj_cls(**args)
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.aug})'
return repr_str
def __call__(self, results):
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype.type
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results
@PIPELINES.register_module()
class RandomErasing(tdata.random_erasing.RandomErasing):
def __init__(self, device='cpu', **args):
super().__init__(device=device, **args)
def __call__(self, results):
in_type = results['imgs'][0].dtype.type
rand_state = random.getstate()
torchrand_state = torch.get_rng_state()
numpyrand_state = np.random.get_state()
# not using cuda to preserve the determiness
out_frame = []
for frame in results['imgs']:
random.setstate(rand_state)
torch.set_rng_state(torchrand_state)
np.random.set_state(numpyrand_state)
frame = super().__call__(torch.from_numpy(frame).permute(2, 0, 1)).permute(1, 2, 0).numpy()
out_frame.append(frame)
results['imgs'] = out_frame
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Timmaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
raise NotImplementedError('only support recognition now')
assert results['img_shape'] == (img_h, img_w)
return results
@PIPELINES.register_module()
class Fuse:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
@PIPELINES.register_module()
class RandomScale:
"""Resize images by a random scale.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy",
"scale", "resize_size". Required keys in "lazy" is None, added or
modified key is "interpolation".
Args:
scales (tuple[int]): Tuple of scales to be chosen for resize.
mode (str): Selection mode for choosing the scale. Options are "range"
and "value". If set to "range", The short edge will be randomly
chosen from the range of minimum and maximum on the shorter one
in all tuples. Otherwise, the longer edge will be randomly chosen
from the range of minimum and maximum on the longer one in all
tuples. Default: 'range'.
"""
def __init__(self, scales, mode='range', **kwargs):
warnings.warn('"RandomScale" is deprecated and will be removed in '
'later versions. It is currently not used in MMAction2')
self.mode = mode
if self.mode not in ['range', 'value']:
raise ValueError(f"mode should be 'range' or 'value', "
f'but got {self.mode}')
self.scales = scales
self.kwargs = kwargs
def select_scale(self, scales):
num_scales = len(scales)
if num_scales == 1:
# specify a fixed scale
scale = scales[0]
elif num_scales == 2:
if self.mode == 'range':
scale_long = [max(s) for s in scales]
scale_short = [min(s) for s in scales]
long_edge = np.random.randint(
min(scale_long),
max(scale_long) + 1)
short_edge = np.random.randint(
min(scale_short),
max(scale_short) + 1)
scale = (long_edge, short_edge)
elif self.mode == 'value':
scale = random.choice(scales)
else:
if self.mode != 'value':
raise ValueError("Only 'value' mode supports more than "
'2 image scales')
scale = random.choice(scales)
return scale
def __call__(self, results):
scale = self.select_scale(self.scales)
results['scale'] = scale
resize = Resize(scale, **self.kwargs)
results = resize(results)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scales={self.scales}, mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Vanilla square random crop that specifics the output size.
Required keys in results are "img_shape", "keypoint" (optional), "imgs"
(optional), added or modified keys are "keypoint", "imgs", "lazy"; Required
keys in "lazy" are "flip", "crop_bbox", added or modified key is
"crop_bbox".
Args:
size (int): The output size of the images.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, size, lazy=False):
if not isinstance(size, int):
raise TypeError(f'Size must be an int, but got {type(size)}')
self.size = size
self.lazy = lazy
@staticmethod
def _crop_kps(kps, crop_bbox):
return kps - crop_bbox[:2]
@staticmethod
def _crop_imgs(imgs, crop_bbox):
x1, y1, x2, y2 = crop_bbox
return [img[y1:y2, x1:x2] for img in imgs]
@staticmethod
def _box_crop(box, crop_bbox):
"""Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
x1, y1, x2, y2 = crop_bbox
img_w, img_h = x2 - x1, y2 - y1
box_ = box.copy()
box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)
box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1)
return box_
def _all_box_crop(self, results, crop_bbox):
"""Crop the gt_bboxes and proposals in results according to crop_bbox.
Args:
results (dict): All information about the sample, which contain
'gt_bboxes' and 'proposals' (optional).
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_crop(results['proposals'],
crop_bbox)
return results
def __call__(self, results):
"""Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
assert self.size <= img_h and self.size <= img_w
y_offset = 0
x_offset = 0
if img_h > self.size:
y_offset = int(np.random.randint(0, img_h - self.size))
if img_w > self.size:
x_offset = int(np.random.randint(0, img_w - self.size))
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = self.size / img_w, self.size / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
new_h, new_w = self.size, self.size
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
# Process entity boxes
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(size={self.size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(RandomCrop):
"""Random crop that specifics the area and height-weight ratio range.
Required keys in results are "img_shape", "crop_bbox", "imgs" (optional),
"keypoint" (optional), added or modified keys are "imgs", "keypoint",
"crop_bbox" and "lazy"; Required keys in "lazy" are "flip", "crop_bbox",
added or modified key is "crop_bbox".
Args:
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of
output cropped images. Default: (3 / 4, 4 / 3).
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
area_range=(0.08, 1.0),
aspect_ratio_range=(3 / 4, 4 / 3),
lazy=False):
self.area_range = area_range
self.aspect_ratio_range = aspect_ratio_range
self.lazy = lazy
if not mmcv.is_tuple_of(self.area_range, float):
raise TypeError(f'Area_range must be a tuple of float, '
f'but got {type(area_range)}')
if not mmcv.is_tuple_of(self.aspect_ratio_range, float):
raise TypeError(f'Aspect_ratio_range must be a tuple of float, '
f'but got {type(aspect_ratio_range)}')
@staticmethod
def get_crop_bbox(img_shape,
area_range,
aspect_ratio_range,
max_attempts=10):
"""Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range.
"""
assert 0 < area_range[0] <= area_range[1] <= 1
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
img_h, img_w = img_shape
area = img_h * img_w
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
np.random.uniform(
np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
candidate_crop_h = np.round(np.sqrt(target_areas /
aspect_ratios)).astype(np.int32)
for i in range(max_attempts):
crop_w = candidate_crop_w[i]
crop_h = candidate_crop_h[i]
if crop_h <= img_h and crop_w <= img_w:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h
# Fallback
crop_size = min(img_h, img_w)
x_offset = (img_w - crop_size) // 2
y_offset = (img_h - crop_size) // 2
return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size
def __call__(self, results):
"""Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
left, top, right, bottom = self.get_crop_bbox(
(img_h, img_w), self.area_range, self.aspect_ratio_range)
new_h, new_w = bottom - top, right - left
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'area_range={self.area_range}, '
f'aspect_ratio_range={self.aspect_ratio_range}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class MultiScaleCrop(RandomCrop):
"""Crop images with a list of randomly selected scales.
Randomly select the w and h scales from a list of scales. Scale of 1 means
the base size, which is the minimal of image width and height. The scale
level of w and h is controlled to be smaller than a certain value to
prevent too large or small aspect ratio.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "crop_bbox", "img_shape", "lazy" and
"scales". Required keys in "lazy" are "crop_bbox", added or modified key is
"crop_bbox".
Args:
input_size (int | tuple[int]): (w, h) of network input.
scales (tuple[float]): width and height scales to be selected.
max_wh_scale_gap (int): Maximum gap of w and h scale levels.
Default: 1.
random_crop (bool): If set to True, the cropping bbox will be randomly
sampled, otherwise it will be sampler from fixed regions.
Default: False.
num_fixed_crops (int): If set to 5, the cropping bbox will keep 5
basic fixed regions: "upper left", "upper right", "lower left",
"lower right", "center". If set to 13, the cropping bbox will
append another 8 fix regions: "center left", "center right",
"lower center", "upper center", "upper left quarter",
"upper right quarter", "lower left quarter", "lower right quarter".
Default: 5.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
input_size,
scales=(1, ),
max_wh_scale_gap=1,
random_crop=False,
num_fixed_crops=5,
lazy=False):
self.input_size = _pair(input_size)
if not mmcv.is_tuple_of(self.input_size, int):
raise TypeError(f'Input_size must be int or tuple of int, '
f'but got {type(input_size)}')
if not isinstance(scales, tuple):
raise TypeError(f'Scales must be tuple, but got {type(scales)}')
if num_fixed_crops not in [5, 13]:
raise ValueError(f'Num_fix_crops must be in {[5, 13]}, '
f'but got {num_fixed_crops}')
self.scales = scales
self.max_wh_scale_gap = max_wh_scale_gap
self.random_crop = random_crop
self.num_fixed_crops = num_fixed_crops
self.lazy = lazy
def __call__(self, results):
"""Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
base_size = min(img_h, img_w)
crop_sizes = [int(base_size * s) for s in self.scales]
candidate_sizes = []
for i, h in enumerate(crop_sizes):
for j, w in enumerate(crop_sizes):
if abs(i - j) <= self.max_wh_scale_gap:
candidate_sizes.append([w, h])
crop_size = random.choice(candidate_sizes)
for i in range(2):
if abs(crop_size[i] - self.input_size[i]) < 3:
crop_size[i] = self.input_size[i]
crop_w, crop_h = crop_size
if self.random_crop:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
else:
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
candidate_offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
if self.num_fixed_crops == 13:
extra_candidate_offsets = [
(0, 2 * h_step), # center left
(4 * w_step, 2 * h_step), # center right
(2 * w_step, 4 * h_step), # lower center
(2 * w_step, 0 * h_step), # upper center
(1 * w_step, 1 * h_step), # upper left quarter
(3 * w_step, 1 * h_step), # upper right quarter
(1 * w_step, 3 * h_step), # lower left quarter
(3 * w_step, 3 * h_step) # lower right quarter
]
candidate_offsets.extend(extra_candidate_offsets)
x_offset, y_offset = random.choice(candidate_offsets)
new_h, new_w = crop_h, crop_w
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
results['scales'] = self.scales
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'input_size={self.input_size}, scales={self.scales}, '
f'max_wh_scale_gap={self.max_wh_scale_gap}, '
f'random_crop={self.random_crop}, '
f'num_fixed_crops={self.num_fixed_crops}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Resize:
"""Resize images to a specific size.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "img_shape", "keep_ratio",
"scale_factor", "lazy", "resize_size". Required keys in "lazy" is None,
added or modified key is "interpolation".
Args:
scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size:
If it is a float number, the image will be rescaled by this
factor, else if it is a tuple of 2 integers, the image will
be rescaled as large as possible within the scale.
Otherwise, it serves as (w, h) of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: True.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
scale,
keep_ratio=True,
interpolation='bilinear',
lazy=False):
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
else:
raise TypeError(
f'Scale must be float or tuple of int, but got {type(scale)}')
self.scale = scale
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.lazy = lazy
def _resize_imgs(self, imgs, new_w, new_h):
return [
mmcv.imresize(
img, (new_w, new_h), interpolation=self.interpolation)
for img in imgs
]
@staticmethod
def _resize_kps(kps, scale_factor):
return kps * scale_factor
@staticmethod
def _box_resize(box, scale_factor):
"""Rescale the bounding boxes according to the scale_factor.
Args:
box (np.ndarray): The bounding boxes.
scale_factor (np.ndarray): The scale factor used for rescaling.
"""
assert len(scale_factor) == 2
scale_factor = np.concatenate([scale_factor, scale_factor])
return box * scale_factor
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
if 'scale_factor' not in results:
results['scale_factor'] = np.array([1, 1], dtype=np.float32)
img_h, img_w = results['img_shape']
if self.keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)
else:
new_w, new_h = self.scale
self.scale_factor = np.array([new_w / img_w, new_h / img_h],
dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
if not self.lazy:
if 'imgs' in results:
results['imgs'] = self._resize_imgs(results['imgs'], new_w,
new_h)
if 'keypoint' in results:
results['keypoint'] = self._resize_kps(results['keypoint'],
self.scale_factor)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
lazyop['interpolation'] = self.interpolation
if 'gt_bboxes' in results:
assert not self.lazy
results['gt_bboxes'] = self._box_resize(results['gt_bboxes'],
self.scale_factor)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_resize(
results['proposals'], self.scale_factor)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scale={self.scale}, keep_ratio={self.keep_ratio}, '
f'interpolation={self.interpolation}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomRescale:
"""Randomly resize images so that the short_edge is resized to a specific
size in a given range. The scale ratio is unchanged after resizing.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "resize_size",
"short_edge".
Args:
scale_range (tuple[int]): The range of short edge length. A closed
interval.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
"""
def __init__(self, scale_range, interpolation='bilinear'):
self.scale_range = scale_range
# make sure scale_range is legal, first make sure the type is OK
assert mmcv.is_tuple_of(scale_range, int)
assert len(scale_range) == 2
assert scale_range[0] < scale_range[1]
assert np.all([x > 0 for x in scale_range])
self.keep_ratio = True
self.interpolation = interpolation
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
resize = Resize((-1, short_edge),
keep_ratio=True,
interpolation=self.interpolation,
lazy=False)
results = resize(results)
results['short_edge'] = short_edge
return results
def __repr__(self):
scale_range = self.scale_range
repr_str = (f'{self.__class__.__name__}('
f'scale_range=({scale_range[0]}, {scale_range[1]}), '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input images with a probability.
Reverse the order of elements in the given imgs with a specific direction.
The shape of the imgs is preserved, but the elements are reordered.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "keypoint", "lazy" and
"flip_direction". Required keys in "lazy" is None, added or modified key
are "flip" and "flip_direction". The Flip augmentation should be placed
after any cropping / reshaping augmentations, to make sure crop_quadruple
is calculated properly.
Args:
flip_ratio (float): Probability of implementing flip. Default: 0.5.
direction (str): Flip imgs horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
flip_label_map (Dict[int, int] | None): Transform the label of the
flipped image with the specific label. Default: None.
left_kp (list[int]): Indexes of left keypoints, used to flip keypoints.
Default: None.
right_kp (list[ind]): Indexes of right keypoints, used to flip
keypoints. Default: None.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
_directions = ['horizontal', 'vertical']
def __init__(self,
flip_ratio=0.5,
direction='horizontal',
flip_label_map=None,
left_kp=None,
right_kp=None,
lazy=False):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported. '
f'Currently support ones are {self._directions}')
self.flip_ratio = flip_ratio
self.direction = direction
self.flip_label_map = flip_label_map
self.left_kp = left_kp
self.right_kp = right_kp
self.lazy = lazy
def _flip_imgs(self, imgs, modality):
_ = [mmcv.imflip_(img, self.direction) for img in imgs]
lt = len(imgs)
if modality == 'Flow':
# The 1st frame of each 2 frames is flow-x
for i in range(0, lt, 2):
imgs[i] = mmcv.iminvert(imgs[i])
return imgs
def _flip_kps(self, kps, kpscores, img_width):
kp_x = kps[..., 0]
kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0]
new_order = list(range(kps.shape[2]))
if self.left_kp is not None and self.right_kp is not None:
for left, right in zip(self.left_kp, self.right_kp):
new_order[left] = right
new_order[right] = left
kps = kps[:, :, new_order]
if kpscores is not None:
kpscores = kpscores[:, :, new_order]
return kps, kpscores
@staticmethod
def _box_flip(box, img_width):
"""Flip the bounding boxes given the width of the image.
Args:
box (np.ndarray): The bounding boxes.
img_width (int): The img width.
"""
box_ = box.copy()
box_[..., 0::4] = img_width - box[..., 2::4]
box_[..., 2::4] = img_width - box[..., 0::4]
return box_
def __call__(self, results):
"""Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
assert self.direction == 'horizontal', (
'Only horizontal flips are'
'supported for human keypoints')
modality = results['modality']
if modality == 'Flow':
assert self.direction == 'horizontal'
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
img_width = results['img_shape'][1]
if self.flip_label_map is not None and flip:
results['label'] = self.flip_label_map.get(results['label'],
results['label'])
if not self.lazy:
if flip:
if 'imgs' in results:
results['imgs'] = self._flip_imgs(results['imgs'],
modality)
if 'keypoint' in results:
kp = results['keypoint']
kpscore = results.get('keypoint_score', None)
kp, kpscore = self._flip_kps(kp, kpscore, img_width)
results['keypoint'] = kp
if 'keypoint_score' in results:
results['keypoint_score'] = kpscore
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Use one Flip please')
lazyop['flip'] = flip
lazyop['flip_direction'] = self.direction
if 'gt_bboxes' in results and flip:
assert not self.lazy and self.direction == 'horizontal'
width = results['img_shape'][1]
results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_flip(results['proposals'],
width)
return results
def __repr__(self):
repr_str = (
f'{self.__class__.__name__}('
f'flip_ratio={self.flip_ratio}, direction={self.direction}, '
f'flip_label_map={self.flip_label_map}, lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):
if not isinstance(mean, Sequence):
raise TypeError(
f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'
)
if not isinstance(std, Sequence):
raise TypeError(
f'Std must be list, tuple or np.ndarray, but got {type(std)}')
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_bgr = to_bgr
self.adjust_magnitude = adjust_magnitude
def __call__(self, results):
modality = results['modality']
if modality == 'RGB':
n = len(results['imgs'])
h, w, c = results['imgs'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs']):
imgs[i] = img
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
if modality == 'Flow':
num_imgs = len(results['imgs'])
assert num_imgs % 2 == 0
assert self.mean.shape[0] == 2
assert self.std.shape[0] == 2
n = num_imgs // 2
h, w = results['imgs'][0].shape
x_flow = np.empty((n, h, w), dtype=np.float32)
y_flow = np.empty((n, h, w), dtype=np.float32)
for i in range(n):
x_flow[i] = results['imgs'][2 * i]
y_flow[i] = results['imgs'][2 * i + 1]
x_flow = (x_flow - self.mean[0]) / self.std[0]
y_flow = (y_flow - self.mean[1]) / self.std[1]
if self.adjust_magnitude:
x_flow = x_flow * results['scale_factor'][0]
y_flow = y_flow * results['scale_factor'][1]
imgs = np.stack([x_flow, y_flow], axis=-1)
results['imgs'] = imgs
args = dict(
mean=self.mean,
std=self.std,
to_bgr=self.to_bgr,
adjust_magnitude=self.adjust_magnitude)
results['img_norm_cfg'] = args
return results
raise NotImplementedError
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'mean={self.mean}, '
f'std={self.std}, '
f'to_bgr={self.to_bgr}, '
f'adjust_magnitude={self.adjust_magnitude})')
return repr_str
@PIPELINES.register_module()
class ColorJitter:
"""Randomly distort the brightness, contrast, saturation and hue of images,
and add PCA based noise into images.
Note: The input images should be in RGB channel order.
Code Reference:
https://gluon-cv.mxnet.io/_modules/gluoncv/data/transforms/experimental/image.html
https://mxnet.apache.org/api/python/docs/_modules/mxnet/image/image.html#LightingAug
If specified to apply color space augmentation, it will distort the image
color space by changing brightness, contrast and saturation. Then, it will
add some random distort to the images in different color channels.
Note that the input images should be in original range [0, 255] and in RGB
channel sequence.
Required keys are "imgs", added or modified keys are "imgs", "eig_val",
"eig_vec", "alpha_std" and "color_space_aug".
Args:
color_space_aug (bool): Whether to apply color space augmentations. If
specified, it will change the brightness, contrast, saturation and
hue of images, then add PCA based noise to images. Otherwise, it
will directly add PCA based noise to images. Default: False.
alpha_std (float): Std in the normal Gaussian distribution of alpha.
eig_val (np.ndarray | None): Eigenvalues of [1 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvalues. Default: None.
eig_vec (np.ndarray | None): Eigenvectors of [3 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvectors. Default: None.
"""
def __init__(self,
color_space_aug=False,
alpha_std=0.1,
eig_val=None,
eig_vec=None):
if eig_val is None:
# note that the data range should be [0, 255]
self.eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
else:
self.eig_val = eig_val
if eig_vec is None:
self.eig_vec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype=np.float32)
else:
self.eig_vec = eig_vec
self.alpha_std = alpha_std
self.color_space_aug = color_space_aug
@staticmethod
def brightness(img, delta):
"""Brightness distortion.
Args:
img (np.ndarray): An input image.
delta (float): Delta value to distort brightness.
It ranges from [-32, 32).
Returns:
np.ndarray: A brightness distorted image.
"""
if np.random.rand() > 0.5:
img = img + np.float32(delta)
return img
@staticmethod
def contrast(img, alpha):
"""Contrast distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort contrast.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A contrast distorted image.
"""
if np.random.rand() > 0.5:
img = img * np.float32(alpha)
return img
@staticmethod
def saturation(img, alpha):
"""Saturation distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort the saturation.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A saturation distorted image.
"""
if np.random.rand() > 0.5:
gray = img * np.array([0.299, 0.587, 0.114], dtype=np.float32)
gray = np.sum(gray, 2, keepdims=True)
gray *= (1.0 - alpha)
img = img * alpha
img = img + gray
return img
@staticmethod
def hue(img, alpha):
"""Hue distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to control the degree of rotation
for hue. It ranges from [-18, 18).
Returns:
np.ndarray: A hue distorted image.
"""
if np.random.rand() > 0.5:
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]],
dtype=np.float32)
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]],
dtype=np.float32)
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]],
dtype=np.float32)
t = np.dot(np.dot(ityiq, bt), tyiq).T
t = np.array(t, dtype=np.float32)
img = np.dot(img, t)
return img
def __call__(self, results):
imgs = results['imgs']
out = []
if self.color_space_aug:
bright_delta = np.random.uniform(-32, 32)
contrast_alpha = np.random.uniform(0.6, 1.4)
saturation_alpha = np.random.uniform(0.6, 1.4)
hue_alpha = np.random.uniform(-18, 18)
jitter_coin = np.random.rand()
for img in imgs:
img = self.brightness(img, delta=bright_delta)
if jitter_coin > 0.5:
img = self.contrast(img, alpha=contrast_alpha)
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
else:
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
img = self.contrast(img, alpha=contrast_alpha)
out.append(img)
else:
out = imgs
# Add PCA based noise
alpha = np.random.normal(0, self.alpha_std, size=(3, ))
rgb = np.array(
np.dot(self.eig_vec * alpha, self.eig_val), dtype=np.float32)
rgb = rgb[None, None, ...]
results['imgs'] = [img + rgb for img in out]
results['eig_val'] = self.eig_val
results['eig_vec'] = self.eig_vec
results['alpha_std'] = self.alpha_std
results['color_space_aug'] = self.color_space_aug
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'color_space_aug={self.color_space_aug}, '
f'alpha_std={self.alpha_std}, '
f'eig_val={self.eig_val}, '
f'eig_vec={self.eig_vec})')
return repr_str
@PIPELINES.register_module()
class CenterCrop(RandomCrop):
"""Crop the center area from images.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "keypoint", "crop_bbox", "lazy" and
"img_shape". Required keys in "lazy" is "crop_bbox", added or modified key
is "crop_bbox".
Args:
crop_size (int | tuple[int]): (w, h) of crop size.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, crop_size, lazy=False):
self.crop_size = _pair(crop_size)
self.lazy = lazy
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
crop_w, crop_h = self.crop_size
left = (img_w - crop_w) // 2
top = (img_h - crop_h) // 2
right = left + crop_w
bottom = top + crop_h
new_h, new_w = bottom - top, right - left
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class ThreeCrop:
"""Crop images into three crops.
Crop the images equally into three crops with equal intervals along the
shorter side.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('ThreeCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
assert crop_h == img_h or crop_w == img_w
if crop_h == img_h:
w_step = (img_w - crop_w) // 2
offsets = [
(0, 0), # left
(2 * w_step, 0), # right
(w_step, 0), # middle
]
elif crop_w == img_w:
h_step = (img_h - crop_h) // 2
offsets = [
(0, 0), # top
(0, 2 * h_step), # down
(0, h_step), # middle
]
cropped = []
crop_bboxes = []
for x_offset, y_offset in offsets:
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
cropped.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = cropped
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class TenCrop:
"""Crop the images into 10 crops (corner + center + flip).
Crop the four corners and the center part of the image with the same
given crop_size, and flip it horizontally.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('TenCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
img_crops = list()
crop_bboxes = list()
for x_offset, y_offsets in offsets:
crop = [
img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
flip_crop = [np.flip(c, axis=1).copy() for c in crop]
bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]
img_crops.extend(crop)
img_crops.extend(flip_crop)
crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class MultiGroupCrop:
"""Randomly crop the images into several groups.
Crop the random region with the same given crop_size and bounding box
into several groups.
Required keys are "imgs", added or modified keys are "imgs", "crop_bbox"
and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
groups(int): Number of groups.
"""
def __init__(self, crop_size, groups):
self.crop_size = _pair(crop_size)
self.groups = groups
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError('Crop size must be int or tuple of int, '
f'but got {type(crop_size)}')
if not isinstance(groups, int):
raise TypeError(f'Groups must be int, but got {type(groups)}.')
if groups <= 0:
raise ValueError('Groups must be positive.')
def __call__(self, results):
"""Performs the MultiGroupCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('MultiGroupCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = imgs[0].shape[:2]
crop_w, crop_h = self.crop_size
img_crops = []
crop_bboxes = []
for _ in range(self.groups):
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
img_crops.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(crop_size={self.crop_size}, '
f'groups={self.groups})')
return repr_str
@PIPELINES.register_module()
class AudioAmplify:
"""Amplify the waveform.
Required keys are "audios", added or modified keys are "audios",
"amplify_ratio".
Args:
ratio (float): The ratio used to amplify the audio waveform.
"""
def __init__(self, ratio):
if isinstance(ratio, float):
self.ratio = ratio
else:
raise TypeError('Amplification ratio should be float.')
def __call__(self, results):
"""Perfrom the audio amplification.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'audios' in results
results['audios'] *= self.ratio
results['amplify_ratio'] = self.ratio
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(ratio={self.ratio})'
return repr_str
@PIPELINES.register_module()
class MelSpectrogram:
"""MelSpectrogram. Transfer an audio wave into a melspectogram figure.
Required keys are "audios", "sample_rate", "num_clips", added or modified
keys are "audios".
Args:
window_size (int): The window size in milisecond. Default: 32.
step_size (int): The step size in milisecond. Default: 16.
n_mels (int): Number of mels. Default: 80.
fixed_length (int): The sample length of melspectrogram maybe not
exactly as wished due to different fps, fix the length for batch
collation by truncating or padding. Default: 128.
"""
def __init__(self,
window_size=32,
step_size=16,
n_mels=80,
fixed_length=128):
if all(
isinstance(x, int)
for x in [window_size, step_size, n_mels, fixed_length]):
self.window_size = window_size
self.step_size = step_size
self.n_mels = n_mels
self.fixed_length = fixed_length
else:
raise TypeError('All arguments should be int.')
def __call__(self, results):
"""Perform MelSpectrogram transformation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Install librosa first.')
signals = results['audios']
sample_rate = results['sample_rate']
n_fft = int(round(sample_rate * self.window_size / 1000))
hop_length = int(round(sample_rate * self.step_size / 1000))
melspectrograms = list()
for clip_idx in range(results['num_clips']):
clip_signal = signals[clip_idx]
mel = librosa.feature.melspectrogram(
y=clip_signal,
sr=sample_rate,
n_fft=n_fft,
hop_length=hop_length,
n_mels=self.n_mels)
if mel.shape[0] >= self.fixed_length:
mel = mel[:self.fixed_length, :]
else:
mel = np.pad(
mel, ((0, mel.shape[-1] - self.fixed_length), (0, 0)),
mode='edge')
melspectrograms.append(mel)
results['audios'] = np.array(melspectrograms)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(window_size={self.window_size}), '
f'step_size={self.step_size}, '
f'n_mels={self.n_mels}, '
f'fixed_length={self.fixed_length})')
return repr_str
| 79,509 | 37.207593 | 104 | py |
STTS | STTS-main/VideoSwin/mmaction/datasets/pipelines/formating.py | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type in data
loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Rename:
"""Rename the key in results.
Args:
mapping (dict): The keys in results that need to be renamed. The key of
the dict is the original name, while the value is the new name. If
the original name not found in results, do nothing.
Default: dict().
"""
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, results):
for key, value in self.mapping.items():
if key in results:
assert isinstance(key, str) and isinstance(value, str)
assert value not in results, ('the new name already exists in '
'results')
results[value] = results[key]
results.pop(key)
return results
@PIPELINES.register_module()
class ToDataContainer:
"""Convert the data to DataContainer.
Args:
fields (Sequence[dict]): Required fields to be converted
with keys and attributes. E.g.
fields=(dict(key='gt_bbox', stack=False),).
Note that key can also be a list of keys, if so, every tensor in
the list will be converted to DataContainer.
"""
def __init__(self, fields):
self.fields = fields
def __call__(self, results):
"""Performs the ToDataContainer formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for field in self.fields:
_field = field.copy()
key = _field.pop('key')
if isinstance(key, list):
for item in key:
results[item] = DC(results[item], **_field)
else:
results[key] = DC(results[key], **_field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ImageToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose image channels to a given order.
Args:
keys (Sequence[str]): Required keys to be converted.
order (Sequence[int]): Image channel order.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, order={self.order})')
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_metas', the results will be a dict with
keys 'imgs' and 'img_metas', where 'img_metas' is a DataContainer of
another dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_metas".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
nested (bool): If set as True, will apply data[x] = [data[x]] to all
items in data. The arg is added for compatibility. Default: False.
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_metas',
nested=False):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
self.nested = nested
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
if self.nested:
for k in data:
data[k] = [data[k]]
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys}, '
f'nested={self.nested})')
@PIPELINES.register_module()
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
if self.collapse:
assert imgs.shape[0] == 1
imgs = imgs.squeeze(0)
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class FormatAudioShape:
"""Format final audio shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTF']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audios = results['audios']
# clip x sample x freq -> clip x channel x sample x freq
clip, sample, freq = audios.shape
audios = audios.reshape(clip, 1, sample, freq)
results['audios'] = audios
results['input_shape'] = audios.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
| 12,741 | 33.160858 | 79 | py |
STTS | STTS-main/VideoSwin/mmaction/utils/gradcam_utils.py | import torch
import torch.nn.functional as F
class GradCAM:
"""GradCAM class helps create visualization results.
Visualization results are blended by heatmaps and input images.
This class is modified from
https://github.com/facebookresearch/SlowFast/blob/master/slowfast/visualization/gradcam_utils.py # noqa
For more information about GradCAM, please visit:
https://arxiv.org/pdf/1610.02391.pdf
"""
def __init__(self, model, target_layer_name, colormap='viridis'):
"""Create GradCAM class with recognizer, target layername & colormap.
Args:
model (nn.Module): the recognizer model to be used.
target_layer_name (str): name of convolutional layer to
be used to get gradients and feature maps from for creating
localization maps.
colormap (Optional[str]): matplotlib colormap used to create
heatmap. Default: 'viridis'. For more information, please visit
https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html
"""
from ..models.recognizers import Recognizer2D, Recognizer3D
if isinstance(model, Recognizer2D):
self.is_recognizer2d = True
elif isinstance(model, Recognizer3D):
self.is_recognizer2d = False
else:
raise ValueError(
'GradCAM utils only support Recognizer2D & Recognizer3D.')
self.model = model
self.model.eval()
self.target_gradients = None
self.target_activations = None
import matplotlib.pyplot as plt
self.colormap = plt.get_cmap(colormap)
self.data_mean = torch.tensor(model.cfg.img_norm_cfg['mean'])
self.data_std = torch.tensor(model.cfg.img_norm_cfg['std'])
self._register_hooks(target_layer_name)
def _register_hooks(self, layer_name):
"""Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.target_gradients = grad_output[0].detach()
def get_activations(module, input, output):
self.target_activations = output.clone().detach()
layer_ls = layer_name.split('/')
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
target_layer = prev_module
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients)
def _calculate_localization_map(self, inputs, use_labels, delta=1e-20):
"""Calculate localization map for all inputs with Grad-CAM.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
tuple[torch.Tensor, torch.Tensor]: (localization_map, preds)
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions for `inputs` with
shape (batch_size, num_classes).
"""
inputs['imgs'] = inputs['imgs'].clone()
# model forward & backward
preds = self.model(gradcam=True, **inputs)
if use_labels:
labels = inputs['label']
if labels.ndim == 1:
labels = labels.unsqueeze(-1)
score = torch.gather(preds, dim=1, index=labels)
else:
score = torch.max(preds, dim=-1)[0]
self.model.zero_grad()
score = torch.sum(score)
score.backward()
if self.is_recognizer2d:
# [batch_size, num_segments, 3, H, W]
b, t, _, h, w = inputs['imgs'].size()
else:
# [batch_size, num_crops*num_clips, 3, clip_len, H, W]
b1, b2, _, t, h, w = inputs['imgs'].size()
b = b1 * b2
gradients = self.target_gradients
activations = self.target_activations
if self.is_recognizer2d:
# [B*Tg, C', H', W']
b_tg, c, _, _ = gradients.size()
tg = b_tg // b
else:
# source shape: [B, C', Tg, H', W']
_, c, tg, _, _ = gradients.size()
# target shape: [B, Tg, C', H', W']
gradients = gradients.permute(0, 2, 1, 3, 4)
activations = activations.permute(0, 2, 1, 3, 4)
# calculate & resize to [B, 1, T, H, W]
weights = torch.mean(gradients.view(b, tg, c, -1), dim=3)
weights = weights.view(b, tg, c, 1, 1)
activations = activations.view([b, tg, c] +
list(activations.size()[-2:]))
localization_map = torch.sum(
weights * activations, dim=2, keepdim=True)
localization_map = F.relu(localization_map)
localization_map = localization_map.permute(0, 2, 1, 3, 4)
localization_map = F.interpolate(
localization_map,
size=(t, h, w),
mode='trilinear',
align_corners=False)
# Normalize the localization map.
localization_map_min, localization_map_max = (
torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0],
torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0])
localization_map_min = torch.reshape(
localization_map_min, shape=(b, 1, 1, 1, 1))
localization_map_max = torch.reshape(
localization_map_max, shape=(b, 1, 1, 1, 1))
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + delta)
localization_map = localization_map.data
return localization_map.squeeze(dim=1), preds
def _alpha_blending(self, localization_map, input_imgs, alpha):
"""Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM
input_imgs (torch.Tensor): model inputs, normed images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
"""
# localization_map shape [B, T, H, W]
localization_map = localization_map.cpu()
# heatmap shape [B, T, H, W, 3] in RGB order
heatmap = self.colormap(localization_map.detach().numpy())
heatmap = heatmap[:, :, :, :, :3]
heatmap = torch.from_numpy(heatmap)
# Permute input imgs to [B, T, H, W, 3], like heatmap
if self.is_recognizer2d:
# Recognizer2D input (B, T, C, H, W)
curr_inp = input_imgs.permute(0, 1, 3, 4, 2)
else:
# Recognizer3D input (B', num_clips*num_crops, C, T, H, W)
# B = B' * num_clips * num_crops
curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:]))
curr_inp = curr_inp.permute(0, 2, 3, 4, 1)
# renormalize input imgs to [0, 1]
curr_inp = curr_inp.cpu()
curr_inp *= self.data_std
curr_inp += self.data_mean
curr_inp /= 255.
# alpha blending
blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp
return blended_imgs
def __call__(self, inputs, use_labels=False, alpha=0.5):
"""Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
"""
# localization_map shape [B, T, H, W]
# preds shape [batch_size, num_classes]
localization_map, preds = self._calculate_localization_map(
inputs, use_labels=use_labels)
# blended_imgs shape [B, T, H, W, 3]
blended_imgs = self._alpha_blending(localization_map, inputs['imgs'],
alpha)
# blended_imgs shape [B, T, H, W, 3]
# preds shape [batch_size, num_classes]
# Recognizer2D: B = batch_size, T = num_segments
# Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len
return blended_imgs, preds
| 9,645 | 40.577586 | 107 | py |
STTS | STTS-main/VideoSwin/mmaction/utils/precise_bn.py | # Adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/precise_bn.py # noqa: E501
# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501
import logging
import time
import mmcv
import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import Hook
from mmcv.utils import print_log
from torch.nn import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
def is_parallel_module(module):
"""Check if a module is a parallel module.
The following 3 modules (and their subclasses) are regarded as parallel
modules: DataParallel, DistributedDataParallel,
MMDistributedDataParallel (the deprecated version).
Args:
module (nn.Module): The module to be checked.
Returns:
bool: True if the input module is a parallel module.
"""
parallels = (DataParallel, DistributedDataParallel,
MMDistributedDataParallel)
return bool(isinstance(module, parallels))
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
"""Recompute and update the batch norm stats to make them more precise.
During
training both BN stats and the weight are changing after every iteration,
so the running average can not precisely reflect the actual stats of the
current model.
In this function, the BN stats are recomputed with fixed weights, to make
the running average more precise. Specifically, it computes the true
average of per-batch mean/variance instead of the running average.
Args:
model (nn.Module): The model whose bn stats will be recomputed.
data_loader (iterator): The DataLoader iterator.
num_iters (int): number of iterations to compute the stats.
logger (:obj:`logging.Logger` | None): Logger for logging.
Default: None.
"""
model.train()
assert len(data_loader) >= num_iters, (
f'length of dataloader {len(data_loader)} must be greater than '
f'iteration number {num_iters}')
if is_parallel_module(model):
parallel_module = model
model = model.module
else:
parallel_module = model
# Finds all the bn layers with training=True.
bn_layers = [
m for m in model.modules() if m.training and isinstance(m, _BatchNorm)
]
if len(bn_layers) == 0:
print_log('No BN found in model', logger=logger, level=logging.WARNING)
return
print_log(f'{len(bn_layers)} BN found', logger=logger)
# Finds all the other norm layers with training=True.
for m in model.modules():
if m.training and isinstance(m, (_InstanceNorm, GroupNorm)):
print_log(
'IN/GN stats will be updated like training.',
logger=logger,
level=logging.WARNING)
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum *
# batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers] # pyre-ignore
for bn in bn_layers:
bn.momentum = 1.0
# Note that running_var actually means "running average of variance"
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
finish_before_loader = False
prog_bar = mmcv.ProgressBar(len(data_loader))
for ind, data in enumerate(data_loader):
with torch.no_grad():
parallel_module(**data, return_loss=False)
prog_bar.update()
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# running var is actually
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
if (ind + 1) >= num_iters:
finish_before_loader = True
break
assert finish_before_loader, 'Dataloader stopped before ' \
f'iteration {num_iters}'
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
class PreciseBNHook(Hook):
"""Precise BN hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
num_iters (int): Number of iterations to update the bn stats.
Default: 200.
interval (int): Perform precise bn interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, num_iters=200, interval=1):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.num_iters = num_iters
def after_train_epoch(self, runner):
if self.every_n_epochs(runner, self.interval):
# sleep to avoid possible deadlock
time.sleep(2.)
print_log(
f'Running Precise BN for {self.num_iters} iterations',
logger=runner.logger)
update_bn_stats(
runner.model,
self.dataloader,
self.num_iters,
logger=runner.logger)
print_log('BN stats updated', logger=runner.logger)
# sleep to avoid possible deadlock
time.sleep(2.)
| 5,826 | 36.352564 | 107 | py |
STTS | STTS-main/VideoSwin/mmaction/utils/module_hooks.py | import torch
from mmcv.utils import Registry, build_from_cfg
MODULE_HOOKS = Registry('module_hooks')
def register_module_hooks(Module, module_hooks_list):
handles = []
for module_hook_cfg in module_hooks_list:
hooked_module_name = module_hook_cfg.pop('hooked_module', 'backbone')
if not hasattr(Module, hooked_module_name):
raise ValueError(
f'{Module.__class__} has no {hooked_module_name}!')
hooked_module = getattr(Module, hooked_module_name)
hook_pos = module_hook_cfg.pop('hook_pos', 'forward_pre')
if hook_pos == 'forward_pre':
handle = hooked_module.register_forward_pre_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
elif hook_pos == 'forward':
handle = hooked_module.register_forward_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
elif hook_pos == 'backward':
handle = hooked_module.register_backward_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
else:
raise ValueError(
f'hook_pos must be `forward_pre`, `forward` or `backward`, '
f'but get {hook_pos}')
handles.append(handle)
return handles
@MODULE_HOOKS.register_module()
class GPUNormalize:
"""Normalize images with the given mean and std value on GPUs.
Call the member function ``hook_func`` will return the forward pre-hook
function for module registration.
GPU normalization, rather than CPU normalization, is more recommended in
the case of a model running on GPUs with strong compute capacity such as
Tesla V100.
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
"""
def __init__(self, input_format, mean, std):
if input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(f'The input format {input_format} is invalid.')
self.input_format = input_format
_mean = torch.tensor(mean)
_std = torch.tensor(std)
if input_format == 'NCTHW':
self._mean = _mean[None, :, None, None, None]
self._std = _std[None, :, None, None, None]
elif input_format == 'NCHW':
self._mean = _mean[None, :, None, None]
self._std = _std[None, :, None, None]
elif input_format == 'NCHW_Flow':
self._mean = _mean[None, :, None, None]
self._std = _std[None, :, None, None]
elif input_format == 'NPTCHW':
self._mean = _mean[None, None, None, :, None, None]
self._std = _std[None, None, None, :, None, None]
else:
raise ValueError(f'The input format {input_format} is invalid.')
def hook_func(self):
def normalize_hook(Module, input):
x = input[0]
assert x.dtype == torch.uint8, (
f'The previous augmentation should use uint8 data type to '
f'speed up computation, but get {x.dtype}')
mean = self._mean.to(x.device)
std = self._std.to(x.device)
with torch.no_grad():
x = x.float().sub_(mean).div_(std)
return (x, *input[1:])
return normalize_hook
| 3,371 | 37.318182 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_train.py | import copy
import tempfile
from collections import OrderedDict
import pytest
import torch
import torch.nn as nn
from mmcv import Config
from torch.utils.data import Dataset
from mmaction.apis import train_model
from mmaction.datasets import DATASETS
@DATASETS.register_module()
class ExampleDataset(Dataset):
def __init__(self, test_mode=False):
self.test_mode = test_mode
@staticmethod
def evaluate(results, logger=None):
eval_results = OrderedDict()
eval_results['acc'] = 1
return eval_results
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.test_cfg = None
self.conv1 = nn.Conv2d(3, 8, kernel_size=1)
self.norm1 = nn.BatchNorm1d(2)
def forward(self, imgs, return_loss=False):
self.norm1(torch.rand(3, 2).cuda())
losses = dict()
losses['test_loss'] = torch.tensor([0.5], requires_grad=True)
return losses
def train_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
losses = self.forward(imgs, True)
loss = torch.tensor([0.5], requires_grad=True)
outputs = dict(loss=loss, log_vars=losses, num_samples=3)
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
self.forward(imgs, False)
outputs = dict(results=0.5)
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_train_model():
model = ExampleModel()
dataset = ExampleDataset()
datasets = [ExampleDataset(), ExampleDataset()]
_cfg = dict(
seed=0,
gpus=1,
gpu_ids=[0],
resume_from=None,
load_from=None,
workflow=[('train', 1)],
total_epochs=5,
evaluation=dict(interval=1, save_best='acc'),
data=dict(
videos_per_gpu=1,
workers_per_gpu=0,
val=dict(type='ExampleDataset')),
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
optimizer_config=dict(grad_clip=dict(max_norm=40, norm_type=2)),
lr_config=dict(policy='step', step=[40, 80]),
omnisource=False,
precise_bn=False,
checkpoint_config=dict(interval=1),
log_level='INFO',
log_config=dict(interval=20, hooks=[dict(type='TextLoggerHook')]))
with tempfile.TemporaryDirectory() as tmpdir:
# normal train
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
# train with validation
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config, validate=True)
with tempfile.TemporaryDirectory() as tmpdir:
# train with Fp16OptimizerHook
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['fp16'] = dict(loss_scale=512.)
config = Config(cfg)
model.fp16_enabled = None
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['omnisource'] = True
config = Config(cfg)
train_model(model, datasets, config)
with tempfile.TemporaryDirectory() as tmpdir:
# train with precise_bn on
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['workflow'] = [('train', 1), ('val', 1)]
cfg['data'] = dict(
videos_per_gpu=1,
workers_per_gpu=0,
train=dict(type='ExampleDataset'),
val=dict(type='ExampleDataset'))
cfg['precise_bn'] = dict(num_iters=1, interval=1)
config = Config(cfg)
train_model(model, datasets, config)
| 4,059 | 29.298507 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_optimizer.py | import torch
import torch.nn as nn
from mmcv.runner import build_optimizer_constructor
class SubModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2)
self.gn = nn.GroupNorm(2, 2)
self.fc = nn.Linear(2, 2)
self.param1 = nn.Parameter(torch.ones(1))
def forward(self, x):
return x
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
self.fc = nn.Linear(2, 1)
def forward(self, x):
return x
class PseudoDataParallel(nn.Module):
def __init__(self):
super().__init__()
self.module = ExampleModel()
def forward(self, x):
return x
base_lr = 0.01
base_wd = 0.0001
momentum = 0.9
def check_optimizer(optimizer,
model,
prefix='',
bias_lr_mult=1,
bias_decay_mult=1,
norm_decay_mult=1,
dwconv_decay_mult=1):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
assert len(param_groups) == len(model_parameters)
for i, param in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert param_group['momentum'] == momentum
# param1
param1 = param_groups[0]
assert param1['lr'] == base_lr
assert param1['weight_decay'] == base_wd
# conv1.weight
conv1_weight = param_groups[1]
assert conv1_weight['lr'] == base_lr
assert conv1_weight['weight_decay'] == base_wd
# conv2.weight
conv2_weight = param_groups[2]
assert conv2_weight['lr'] == base_lr
assert conv2_weight['weight_decay'] == base_wd
# conv2.bias
conv2_bias = param_groups[3]
assert conv2_bias['lr'] == base_lr * bias_lr_mult
assert conv2_bias['weight_decay'] == base_wd * bias_decay_mult
# bn.weight
bn_weight = param_groups[4]
assert bn_weight['lr'] == base_lr
assert bn_weight['weight_decay'] == base_wd * norm_decay_mult
# bn.bias
bn_bias = param_groups[5]
assert bn_bias['lr'] == base_lr
assert bn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.param1
sub_param1 = param_groups[6]
assert sub_param1['lr'] == base_lr
assert sub_param1['weight_decay'] == base_wd
# sub.conv1.weight
sub_conv1_weight = param_groups[7]
assert sub_conv1_weight['lr'] == base_lr
assert sub_conv1_weight['weight_decay'] == base_wd * dwconv_decay_mult
# sub.conv1.bias
sub_conv1_bias = param_groups[8]
assert sub_conv1_bias['lr'] == base_lr * bias_lr_mult
assert sub_conv1_bias['weight_decay'] == base_wd * dwconv_decay_mult
# sub.gn.weight
sub_gn_weight = param_groups[9]
assert sub_gn_weight['lr'] == base_lr
assert sub_gn_weight['weight_decay'] == base_wd * norm_decay_mult
# sub.gn.bias
sub_gn_bias = param_groups[10]
assert sub_gn_bias['lr'] == base_lr
assert sub_gn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.fc1.weight
sub_fc_weight = param_groups[11]
assert sub_fc_weight['lr'] == base_lr
assert sub_fc_weight['weight_decay'] == base_wd
# sub.fc1.bias
sub_fc_bias = param_groups[12]
assert sub_fc_bias['lr'] == base_lr * bias_lr_mult
assert sub_fc_bias['weight_decay'] == base_wd * bias_decay_mult
# fc1.weight
fc_weight = param_groups[13]
assert fc_weight['lr'] == base_lr
assert fc_weight['weight_decay'] == base_wd
# fc1.bias
fc_bias = param_groups[14]
assert fc_bias['lr'] == base_lr * bias_lr_mult
assert fc_bias['weight_decay'] == base_wd * bias_decay_mult
def check_tsm_optimizer(optimizer, model, fc_lr5=True):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
# first_conv_weight
first_conv_weight = param_groups[0]
assert torch.equal(first_conv_weight['params'][0], model_parameters[1])
assert first_conv_weight['lr'] == base_lr
assert first_conv_weight['weight_decay'] == base_wd
# first_conv_bias
first_conv_bias = param_groups[1]
assert first_conv_bias['params'] == []
assert first_conv_bias['lr'] == base_lr * 2
assert first_conv_bias['weight_decay'] == 0
# normal_weight
normal_weight = param_groups[2]
assert torch.equal(normal_weight['params'][0], model_parameters[2])
assert torch.equal(normal_weight['params'][1], model_parameters[7])
assert normal_weight['lr'] == base_lr
assert normal_weight['weight_decay'] == base_wd
# normal_bias
normal_bias = param_groups[3]
assert torch.equal(normal_bias['params'][0], model_parameters[3])
assert torch.equal(normal_bias['params'][1], model_parameters[8])
assert normal_bias['lr'] == base_lr * 2
assert normal_bias['weight_decay'] == 0
# bn
bn = param_groups[4]
assert torch.equal(bn['params'][0], model_parameters[4])
assert torch.equal(bn['params'][1], model_parameters[5])
assert torch.equal(bn['params'][2], model_parameters[9])
assert torch.equal(bn['params'][3], model_parameters[10])
assert bn['lr'] == base_lr
assert bn['weight_decay'] == 0
# normal linear weight
assert torch.equal(normal_weight['params'][2], model_parameters[11])
# normal linear bias
assert torch.equal(normal_bias['params'][2], model_parameters[12])
# fc_lr5
lr5_weight = param_groups[5]
lr10_bias = param_groups[6]
assert lr5_weight['lr'] == base_lr * 5
assert lr5_weight['weight_decay'] == base_wd
assert lr10_bias['lr'] == base_lr * 10
assert lr10_bias['weight_decay'] == 0
if fc_lr5:
# lr5_weight
assert torch.equal(lr5_weight['params'][0], model_parameters[13])
# lr10_bias
assert torch.equal(lr10_bias['params'][0], model_parameters[14])
else:
# lr5_weight
assert lr5_weight['params'] == []
# lr10_bias
assert lr10_bias['params'] == []
assert torch.equal(normal_weight['params'][3], model_parameters[13])
assert torch.equal(normal_bias['params'][3], model_parameters[14])
def test_tsm_optimizer_constructor():
model = ExampleModel()
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
# fc_lr5 is True
paramwise_cfg = dict(fc_lr5=True)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
# fc_lr5 is False
paramwise_cfg = dict(fc_lr5=False)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
| 7,742 | 35.182243 | 76 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_config.py | import glob
import os
import os.path as osp
import mmcv
import torch.nn as nn
from mmaction.models import build_localizer, build_recognizer
def _get_config_path():
"""Find the predefined recognizer config path."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
config_fpaths = [
osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths
]
return config_fpaths
def test_config_build_recognizer():
"""Test that all mmaction models defined in the configs can be
initialized."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/recognition')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
# test all config file in `configs` directory
for config_fpath in config_fpaths:
config_mod = mmcv.Config.fromfile(config_fpath)
print(f'Building recognizer, config_fpath = {config_fpath!r}')
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model['backbone']:
config_mod.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config_mod.model)
assert isinstance(recognizer, nn.Module)
def _get_config_path_for_localizer():
"""Find the predefined localizer config path for localizer."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/localization')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
config_fpaths = [
osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths
]
return config_fpaths
def test_config_build_localizer():
"""Test that all mmaction models defined in the configs can be
initialized."""
config_fpaths = _get_config_path_for_localizer()
# test all config file in `configs/localization` directory
for config_fpath in config_fpaths:
config_mod = mmcv.Config.fromfile(config_fpath)
print(f'Building localizer, config_fpath = {config_fpath!r}')
if config_mod.get('model', None):
localizer = build_localizer(config_mod.model)
assert isinstance(localizer, nn.Module)
| 2,887 | 38.027027 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_eval_hook.py | import os.path as osp
import tempfile
import unittest.mock as mock
import warnings
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, IterBasedRunner
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
# TODO import eval hooks from mmcv and delete them from mmaction2
try:
from mmcv.runner import EvalHook, DistEvalHook
pytest.skip(
'EvalHook and DistEvalHook are supported in MMCV',
allow_module_level=True)
except ImportError:
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from '
'mmaction2 will be deprecated. Please install mmcv through '
'master branch.')
from mmaction.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = OrderedDict(acc=acc, index=self.index, score=acc)
self.index += 1
return output
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
@staticmethod
def forward(x, **kwargs):
return x
@staticmethod
def train_step(data_batch, optimizer, **kwargs):
if not isinstance(data_batch, dict):
data_batch = dict(x=data_batch)
return data_batch
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = IterBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def test_eval_hook():
with pytest.raises(AssertionError):
# `save_best` should be a str
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best=True)
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = Model()
data_loader = [DataLoader(test_dataset)]
EvalHook(data_loader)
with pytest.raises(ValueError):
# save_best must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='unsupport')
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='auto', rule='unsupport')
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset)
model = Model()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
# total_epochs = 1
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(
test_dataset, [torch.tensor([1])], logger=runner.logger)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best acc and corresponding epoch
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best score and corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best score using less compare func
# and indicate corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3
# Test the EvalHook when resume happend
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
@patch('mmaction.apis.single_gpu_test', MagicMock)
@patch('mmaction.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook])
@pytest.mark.parametrize('_build_demo_runner,by_epoch',
[(_build_epoch_runner, True),
(_build_iter_runner, False)])
def test_start_param(EvalHookParam, _build_demo_runner, by_epoch):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, interval=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, interval=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0/negative, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=0, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
runner = _build_demo_runner()
with pytest.warns(UserWarning):
evalhook = EvalHookParam(
dataloader, start=-2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 2
else:
runner._iter = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 1
else:
runner._iter = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
| 12,595 | 35.616279 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_precise_bn.py | import copy
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmaction.utils import PreciseBNHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1.0], dtype=torch.float32))
return results
def __len__(self):
return 1
class BiggerDataset(ExampleDataset):
def __init__(self, fixed_values=range(0, 12)):
assert len(self) == len(fixed_values)
self.fixed_values = fixed_values
def __getitem__(self, idx):
results = dict(
imgs=torch.tensor([self.fixed_values[idx]], dtype=torch.float32))
return results
def __len__(self):
# a bigger dataset
return 12
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(self.conv(imgs))
@staticmethod
def train_step(data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
class SingleBNModel(ExampleModel):
def __init__(self):
super().__init__()
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(imgs)
class GNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.GroupNorm(1, 1)
self.test_cfg = None
class NoBNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.conv(imgs)
def test_precise_bn():
with pytest.raises(TypeError):
# `data_loader` must be a Pytorch DataLoader
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=2,
sampler=None,
num_workers=0,
shuffle=False)
PreciseBNHook('data_loader')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=2)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=2)
precise_bn_loader = copy.deepcopy(data_loader)
logger = get_logger('precise_bn')
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
with pytest.raises(AssertionError):
# num_iters should be no larget than total
# iters
precise_bn_hook = PreciseBNHook(precise_bn_loader, num_iters=5)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test non-DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model w/ gn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = GNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model without bn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = NoBNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test how precise it is
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=6) # run all
assert precise_bn_hook.num_iters == 6
assert precise_bn_hook.interval == 1
model = SingleBNModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
imgs_list = list()
for _, data in enumerate(loader):
imgs_list.append(np.array(data['imgs']))
mean = np.mean([np.mean(batch) for batch in imgs_list])
# bassel correction used in Pytorch, therefore ddof=1
var = np.mean([np.var(batch, ddof=1) for batch in imgs_list])
assert np.equal(mean, np.array(model.bn.running_mean))
assert np.equal(var, np.array(model.bn.running_var))
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_ddp_model_precise_bn():
# test DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = ExampleModel()
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=True)
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
| 6,456 | 30.497561 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.