repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/eval_detection.py
|
import json
import numpy as np
from mmcv.utils import print_log
from ...utils import get_root_logger
from .accuracy import interpolated_precision_recall, pairwise_temporal_iou
class ActivityNetLocalization:
"""Class to evaluate detection results on ActivityNet.
Args:
ground_truth_filename (str | None): The filename of groundtruth.
Default: None.
prediction_filename (str | None): The filename of action detection
results. Default: None.
tiou_thresholds (np.ndarray): The thresholds of temporal iou to
evaluate. Default: ``np.linspace(0.5, 0.95, 10)``.
verbose (bool): Whether to print verbose logs. Default: False.
"""
def __init__(self,
ground_truth_filename=None,
prediction_filename=None,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
verbose=False):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not prediction_filename:
raise IOError('Please input a valid prediction file.')
self.ground_truth_filename = ground_truth_filename
self.prediction_filename = prediction_filename
self.tiou_thresholds = tiou_thresholds
self.verbose = verbose
self.ap = None
self.logger = get_root_logger()
# Import ground truth and predictions.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
log_msg = (
'[INIT] Loaded ground_truth from '
f'{self.ground_truth_filename}, prediction from '
f'{self.prediction_filename}.\n'
f'Number of ground truth instances: {len(self.ground_truth)}\n'
f'Number of predictions: {len(self.prediction)}\n'
f'Fixed threshold for tiou score: {self.tiou_thresholds}')
print_log(log_msg, logger=self.logger)
@staticmethod
def _import_ground_truth(ground_truth_filename):
"""Read ground truth file and return the ground truth instances and the
activity classes.
Args:
ground_truth_filename (str): Full path to the ground truth json
file.
Returns:
tuple[list, dict]: (ground_truth, activity_index).
ground_truth contains the ground truth instances, which is in a
dict format.
activity_index contains classes index.
"""
with open(ground_truth_filename, 'r') as f:
data = json.load(f)
# Checking format
activity_index, class_idx = {}, 0
ground_truth = []
for video_id, video_info in data.items():
for anno in video_info['annotations']:
if anno['label'] not in activity_index:
activity_index[anno['label']] = class_idx
class_idx += 1
# old video_anno
ground_truth_item = {}
ground_truth_item['video-id'] = video_id[2:]
ground_truth_item['t-start'] = float(anno['segment'][0])
ground_truth_item['t-end'] = float(anno['segment'][1])
ground_truth_item['label'] = activity_index[anno['label']]
ground_truth.append(ground_truth_item)
return ground_truth, activity_index
def _import_prediction(self, prediction_filename):
"""Read prediction file and return the prediction instances.
Args:
prediction_filename (str): Full path to the prediction json file.
Returns:
List: List containing the prediction instances (dictionaries).
"""
with open(prediction_filename, 'r') as f:
data = json.load(f)
# Read predictions.
prediction = []
for video_id, video_info in data['results'].items():
for result in video_info:
prediction_item = dict()
prediction_item['video-id'] = video_id
prediction_item['label'] = self.activity_index[result['label']]
prediction_item['t-start'] = float(result['segment'][0])
prediction_item['t-end'] = float(result['segment'][1])
prediction_item['score'] = result['score']
prediction.append(prediction_item)
return prediction
def wrapper_compute_average_precision(self):
"""Computes average precision for each class."""
ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index)))
# Adaptation to query faster
ground_truth_by_label = []
prediction_by_label = []
for i in range(len(self.activity_index)):
ground_truth_by_label.append([])
prediction_by_label.append([])
for gt in self.ground_truth:
ground_truth_by_label[gt['label']].append(gt)
for pred in self.prediction:
prediction_by_label[pred['label']].append(pred)
for i in range(len(self.activity_index)):
ap_result = compute_average_precision_detection(
ground_truth_by_label[i], prediction_by_label[i],
self.tiou_thresholds)
ap[:, i] = ap_result
return ap
def evaluate(self):
"""Evaluates a prediction file.
For the detection task we measure the interpolated mean average
precision to measure the performance of a method.
"""
self.ap = self.wrapper_compute_average_precision()
self.mAP = self.ap.mean(axis=1)
self.average_mAP = self.mAP.mean()
return self.mAP, self.average_mAP
def compute_average_precision_detection(ground_truth,
prediction,
tiou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest score is matches as true
positive. This code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (list[dict]): List containing the ground truth instances
(dictionaries). Required keys are 'video-id', 't-start' and
't-end'.
prediction (list[dict]): List containing the prediction instances
(dictionaries). Required keys are: 'video-id', 't-start', 't-end'
and 'score'.
tiou_thresholds (np.ndarray): A 1darray indicates the temporal
intersection over union threshold, which is optional.
Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
Float: ap, Average precision score.
"""
num_thresholds = len(tiou_thresholds)
num_gts = len(ground_truth)
num_preds = len(prediction)
ap = np.zeros(num_thresholds)
if len(prediction) == 0:
return ap
num_positive = float(num_gts)
lock_gt = np.ones((num_thresholds, num_gts)) * -1
# Sort predictions by decreasing score order.
prediction.sort(key=lambda x: -x['score'])
# Initialize true positive and false positive vectors.
tp = np.zeros((num_thresholds, num_preds))
fp = np.zeros((num_thresholds, num_preds))
# Adaptation to query faster
ground_truth_by_videoid = {}
for i, item in enumerate(ground_truth):
item['index'] = i
ground_truth_by_videoid.setdefault(item['video-id'], []).append(item)
# Assigning true positive to truly grount truth instances.
for idx, pred in enumerate(prediction):
if pred['video-id'] in ground_truth_by_videoid:
gts = ground_truth_by_videoid[pred['video-id']]
else:
fp[:, idx] = 1
continue
tiou_arr = pairwise_temporal_iou(
np.array([pred['t-start'], pred['t-end']]),
np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts]))
tiou_arr = tiou_arr.reshape(-1)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for t_idx, tiou_threshold in enumerate(tiou_thresholds):
for j_idx in tiou_sorted_idx:
if tiou_arr[j_idx] < tiou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[t_idx, gts[j_idx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[t_idx, gts[j_idx]['index']] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
recall_cumsum = tp_cumsum / num_positive
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(tiou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap
| 9,363
| 39.017094
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_utils.py
|
import csv
import logging
import time
from collections import defaultdict
import numpy as np
from .ava_evaluation import object_detection_evaluation as det_eval
from .ava_evaluation import standard_fields
def det2csv(dataset, results, custom_classes):
csv_results = []
for idx in range(len(dataset)):
video_id = dataset.video_infos[idx]['video_id']
timestamp = dataset.video_infos[idx]['timestamp']
result = results[idx]
for label, _ in enumerate(result):
for bbox in result[label]:
bbox_ = tuple(bbox.tolist())
if custom_classes is not None:
actual_label = custom_classes[label + 1]
else:
actual_label = label + 1
csv_results.append((
video_id,
timestamp,
) + bbox_[:4] + (actual_label, ) + bbox_[4:])
return csv_results
# results is organized by class
def results2csv(dataset, results, out_file, custom_classes=None):
if isinstance(results[0], list):
csv_results = det2csv(dataset, results, custom_classes)
# save space for float
def to_str(item):
if isinstance(item, float):
return f'{item:.3f}'
return str(item)
with open(out_file, 'w') as f:
for csv_result in csv_results:
f.write(','.join(map(to_str, csv_result)))
f.write('\n')
def print_time(message, start):
print('==> %g seconds to %s' % (time.time() - start, message), flush=True)
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return f'{video_id},{int(timestamp):04d}'
def read_csv(csv_file, class_whitelist=None):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class
labels not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list
of integer class lables, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list
of score values lables, matching the corresponding label in `labels`.
If scores are not provided in the csv, then they will default to 1.0.
"""
start = time.time()
entries = defaultdict(list)
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert len(row) in [7, 8], 'Wrong number of columns: ' + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if len(row) == 8:
score = float(row[7])
entries[image_key].append((score, action_id, y1, x1, y2, x2))
for image_key in entries:
# Evaluation API assumes boxes with descending scores
entry = sorted(entries[image_key], key=lambda tup: -tup[0])
boxes[image_key] = [x[2:] for x in entry]
labels[image_key] = [x[1] for x in entry]
scores[image_key] = [x[0] for x in entry]
print_time('read file ' + csv_file.name, start)
return boxes, labels, scores
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g.
"aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
reader = csv.reader(exclusions_file)
for row in reader:
assert len(row) == 2, 'Expected only 2 columns, got: ' + row
excluded.add(make_image_key(row[0], row[1]))
return excluded
def read_labelmap(labelmap_file):
"""Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the
object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers.
"""
labelmap = []
class_ids = set()
name = ''
class_id = ''
for line in labelmap_file:
if line.startswith(' name:'):
name = line.split('"')[1]
elif line.startswith(' id:') or line.startswith(' label_id:'):
class_id = int(line.strip().split(' ')[-1])
labelmap.append({'id': class_id, 'name': name})
class_ids.add(class_id)
return labelmap, class_ids
# Seems there is at most 100 detections for each image
def ava_eval(result_file,
result_type,
label_file,
ann_file,
exclude_file,
verbose=True,
custom_classes=None):
assert result_type in ['mAP']
start = time.time()
categories, class_whitelist = read_labelmap(open(label_file))
if custom_classes is not None:
custom_classes = custom_classes[1:]
assert set(custom_classes).issubset(set(class_whitelist))
class_whitelist = custom_classes
categories = [cat for cat in categories if cat['id'] in custom_classes]
# loading gt, do not need gt score
gt_boxes, gt_labels, _ = read_csv(open(ann_file), class_whitelist)
if verbose:
print_time('Reading detection results', start)
if exclude_file is not None:
excluded_keys = read_exclusions(open(exclude_file))
else:
excluded_keys = list()
start = time.time()
boxes, labels, scores = read_csv(open(result_file), class_whitelist)
if verbose:
print_time('Reading detection results', start)
# Evaluation for mAP
pascal_evaluator = det_eval.PascalDetectionEvaluator(categories)
start = time.time()
for image_key in gt_boxes:
if verbose and image_key in excluded_keys:
logging.info(
'Found excluded timestamp in detections: %s.'
'It will be ignored.', image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array(gt_boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
np.array(gt_labels[image_key], dtype=int)
})
if verbose:
print_time('Convert groundtruth', start)
start = time.time()
for image_key in boxes:
if verbose and image_key in excluded_keys:
logging.info(
'Found excluded timestamp in detections: %s.'
'It will be ignored.', image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
np.array(scores[image_key], dtype=float)
})
if verbose:
print_time('convert detections', start)
start = time.time()
metrics = pascal_evaluator.evaluate()
if verbose:
print_time('run_evaluator', start)
for display_name in metrics:
print(f'{display_name}=\t{metrics[display_name]}')
return {
display_name: metrics[display_name]
for display_name in metrics if 'ByCategory' not in display_name
}
| 8,215
| 33.666667
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/eval_hooks.py
|
import os
import os.path as osp
import warnings
from math import inf
import torch.distributed as dist
from torch.nn.modules.batchnorm import _BatchNorm
from torch.utils.data import DataLoader
try:
from mmcv.runner import EvalHook as BasicEvalHook
from mmcv.runner import DistEvalHook as BasicDistEvalHook
from_mmcv = True
class EvalHook(BasicEvalHook):
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self, *args, save_best='auto', **kwargs):
super().__init__(*args, save_best=save_best, **kwargs)
class DistEvalHook(BasicDistEvalHook):
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self, *args, save_best='auto', **kwargs):
super().__init__(*args, save_best=save_best, **kwargs)
except (ImportError, ModuleNotFoundError):
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook in mmaction2 '
'will be deprecated, please install mmcv through master '
'branch.')
from_mmcv = False
if not from_mmcv:
from mmcv.runner import Hook
class EvalHook(Hook): # noqa: F811
"""Non-Distributed evaluation hook.
Notes:
If new arguments are added for EvalHook, tools/test.py,
tools/eval_metric.py may be effected.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the
resuming epoch. If None, whether to evaluate is merely decided
by ``interval``. Default: None.
interval (int): Evaluation interval. Default: 1.
by_epoch (bool): Determine perform evaluation by epoch or by
iteration. If set to True, it will perform by epoch.
Otherwise, by iteration. default: True.
save_best (str | None, optional): If a metric is specified, it
would measure the best checkpoint during evaluation. The
information about best checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and
VideoDataset). ``AR@AN``, ``auc`` for action localization
dataset. (ActivityNetDataset). ``mAP@0.5IOU`` for
spatio-temporal action detection dataset (AVADataset).
If ``save_best`` is ``auto``, the first key of the returned
``OrderedDict`` result will be used. Default: 'auto'.
rule (str | None, optional): Comparison rule for best score.
If set to None, it will infer a reasonable rule. Keys such as
'acc', 'top' .etc will be inferred by 'greater' rule. Keys
contain 'loss' will be inferred by 'less' rule. Options are
'greater', 'less', None. Default: None.
**eval_kwargs: Evaluation arguments fed into the evaluate function
of the dataset.
"""
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
init_value_map = {'greater': -inf, 'less': inf}
greater_keys = [
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP@', 'Recall@'
]
less_keys = ['loss']
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best='auto',
rule=None,
**eval_kwargs):
if 'key_indicator' in eval_kwargs:
raise RuntimeError(
'"key_indicator" is deprecated, '
'you need to use "save_best" instead. '
'See https://github.com/open-mmlab/mmaction2/pull/395 '
'for more info')
if not isinstance(dataloader, DataLoader):
raise TypeError(f'dataloader must be a pytorch DataLoader, '
f'but got {type(dataloader)}')
if interval <= 0:
raise ValueError(
f'interval must be positive, but got {interval}')
assert isinstance(by_epoch, bool)
if start is not None and start < 0:
warnings.warn(
f'The evaluation start epoch {start} is smaller than 0, '
f'use 0 instead', UserWarning)
start = 0
self.dataloader = dataloader
self.interval = interval
self.start = start
self.by_epoch = by_epoch
assert isinstance(save_best, str) or save_best is None
self.save_best = save_best
self.eval_kwargs = eval_kwargs
self.initial_flag = True
if self.save_best is not None:
self.best_ckpt_path = None
self._init_rule(rule, self.save_best)
def _init_rule(self, rule, key_indicator):
"""Initialize rule, key_indicator, comparison_func, and best score.
Args:
rule (str | None): Comparison rule for best score.
key_indicator (str | None): Key indicator to determine the
comparison rule.
"""
if rule not in self.rule_map and rule is not None:
raise KeyError(f'rule must be greater, less or None, '
f'but got {rule}.')
if rule is None:
if key_indicator != 'auto':
if any(key in key_indicator for key in self.greater_keys):
rule = 'greater'
elif any(key in key_indicator for key in self.less_keys):
rule = 'less'
else:
raise ValueError(
f'Cannot infer the rule for key '
f'{key_indicator}, thus a specific rule '
f'must be specified.')
self.rule = rule
self.key_indicator = key_indicator
if self.rule is not None:
self.compare_func = self.rule_map[self.rule]
def before_run(self, runner):
if self.save_best is not None:
if runner.meta is None:
warnings.warn('runner.meta is None. Creating a empty one.')
runner.meta = dict()
runner.meta.setdefault('hook_msgs', dict())
def before_train_iter(self, runner):
"""Evaluate the model only at the start of training by
iteration."""
if self.by_epoch:
return
if not self.initial_flag:
return
if self.start is not None and runner.iter >= self.start:
self.after_train_iter(runner)
self.initial_flag = False
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
if not self.by_epoch:
return
if not self.initial_flag:
return
if self.start is not None and runner.epoch >= self.start:
self.after_train_epoch(runner)
self.initial_flag = False
def after_train_iter(self, runner):
"""Called after every training iter to evaluate the results."""
if not self.by_epoch:
self._do_evaluate(runner)
def after_train_epoch(self, runner):
"""Called after every training epoch to evaluate the results."""
if self.by_epoch:
self._do_evaluate(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self.evaluation_flag(runner):
return
from mmaction.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
def evaluation_flag(self, runner):
"""Judge whether to perform_evaluation.
Returns:
bool: The flag indicating whether to perform evaluation.
"""
if self.by_epoch:
current = runner.epoch
check_time = self.every_n_epochs
else:
current = runner.iter
check_time = self.every_n_iters
if self.start is None:
if not check_time(runner, self.interval):
# No evaluation during the interval.
return False
elif (current + 1) < self.start:
# No evaluation if start is larger than the current time.
return False
else:
# Evaluation only at epochs/iters 3, 5, 7...
# if start==3 and interval==2
if (current + 1 - self.start) % self.interval:
return False
return True
def _save_ckpt(self, runner, key_score):
if self.by_epoch:
current = f'epoch_{runner.epoch + 1}'
cur_type, cur_time = 'epoch', runner.epoch + 1
else:
current = f'iter_{runner.iter + 1}'
cur_type, cur_time = 'iter', runner.iter + 1
best_score = runner.meta['hook_msgs'].get(
'best_score', self.init_value_map[self.rule])
if self.compare_func(key_score, best_score):
best_score = key_score
runner.meta['hook_msgs']['best_score'] = best_score
if self.best_ckpt_path and osp.isfile(self.best_ckpt_path):
os.remove(self.best_ckpt_path)
best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
runner.save_checkpoint(
runner.work_dir, best_ckpt_name, create_symlink=False)
self.best_ckpt_path = osp.join(runner.work_dir, best_ckpt_name)
runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
runner.logger.info(
f'Now best checkpoint is saved as {best_ckpt_name}.')
runner.logger.info(
f'Best {self.key_indicator} is {best_score:0.4f} '
f'at {cur_time} {cur_type}.')
def evaluate(self, runner, results):
"""Evaluate the results.
Args:
runner (:obj:`mmcv.Runner`): The underlined training runner.
results (list): Output results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
if self.save_best is not None:
if self.key_indicator == 'auto':
# infer from eval_results
self._init_rule(self.rule, list(eval_res.keys())[0])
return eval_res[self.key_indicator]
return None
class DistEvalHook(EvalHook): # noqa: F811
"""Distributed evaluation hook.
This hook will regularly perform evaluation in a given interval when
performing in distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
start (int | None, optional): Evaluation starting epoch. It enables
evaluation before the training starts if ``start`` <= the
resuming epoch. If None, whether to evaluate is merely decided
by ``interval``. Default: None.
interval (int): Evaluation interval. Default: 1.
by_epoch (bool): Determine perform evaluation by epoch or by
iteration. If set to True, it will perform by epoch. Otherwise,
by iteration. default: True.
save_best (str | None, optional): If a metric is specified, it
would measure the best checkpoint during evaluation. The
information about best checkpoint would be save in best.json.
Options are the evaluation metrics to the test dataset. e.g.,
``top1_acc``, ``top5_acc``, ``mean_class_accuracy``,
``mean_average_precision``, ``mmit_mean_average_precision``
for action recognition dataset (RawframeDataset and
VideoDataset). ``AR@AN``, ``auc`` for action localization
dataset (ActivityNetDataset). ``mAP@0.5IOU`` for
spatio-temporal action detection dataset (AVADataset).
If ``save_best`` is ``auto``, the first key of the returned
``OrderedDict`` result will be used. Default: 'auto'.
rule (str | None, optional): Comparison rule for best score. If
set to None, it will infer a reasonable rule. Keys such as
'acc', 'top' .etc will be inferred by 'greater' rule. Keys
contain 'loss' will be inferred by 'less' rule. Options are
'greater', 'less', None. Default: None.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
broadcast_bn_buffer (bool): Whether to broadcast the
buffer(running_mean and running_var) of rank 0 to other rank
before evaluation. Default: True.
**eval_kwargs: Evaluation arguments fed into the evaluate function
of the dataset.
"""
def __init__(self,
dataloader,
start=None,
interval=1,
by_epoch=True,
save_best='auto',
rule=None,
broadcast_bn_buffer=True,
tmpdir=None,
gpu_collect=False,
**eval_kwargs):
super().__init__(
dataloader,
start=start,
interval=interval,
by_epoch=by_epoch,
save_best=save_best,
rule=rule,
**eval_kwargs)
self.broadcast_bn_buffer = broadcast_bn_buffer
self.tmpdir = tmpdir
self.gpu_collect = gpu_collect
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for _, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self.evaluation_flag(runner):
return
from mmaction.apis import multi_gpu_test
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 16,695
| 41.700767
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/accuracy.py
|
import numpy as np
def confusion_matrix(y_pred, y_real, normalize=None):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix.
"""
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
if isinstance(y_pred, list):
y_pred = np.array(y_pred)
if not isinstance(y_pred, np.ndarray):
raise TypeError(
f'y_pred must be list or np.ndarray, but got {type(y_pred)}')
if not y_pred.dtype == np.int64:
raise TypeError(
f'y_pred dtype must be np.int64, but got {y_pred.dtype}')
if isinstance(y_real, list):
y_real = np.array(y_real)
if not isinstance(y_real, np.ndarray):
raise TypeError(
f'y_real must be list or np.ndarray, but got {type(y_real)}')
if not y_real.dtype == np.int64:
raise TypeError(
f'y_real dtype must be np.int64, but got {y_real.dtype}')
label_set = np.unique(np.concatenate((y_pred, y_real)))
num_labels = len(label_set)
max_label = label_set[-1]
label_map = np.zeros(max_label + 1, dtype=np.int64)
for i, label in enumerate(label_set):
label_map[label] = i
y_pred_mapped = label_map[y_pred]
y_real_mapped = label_map[y_real]
confusion_mat = np.bincount(
num_labels * y_real_mapped + y_pred_mapped,
minlength=num_labels**2).reshape(num_labels, num_labels)
with np.errstate(all='ignore'):
if normalize == 'true':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=1, keepdims=True))
elif normalize == 'pred':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=0, keepdims=True))
elif normalize == 'all':
confusion_mat = (confusion_mat / confusion_mat.sum())
confusion_mat = np.nan_to_num(confusion_mat)
return confusion_mat
def mean_class_accuracy(scores, labels):
"""Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
"""
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
mean_class_acc = np.mean(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
return mean_class_acc
def top_k_accuracy(scores, labels, topk=(1, )):
"""Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
"""
res = []
labels = np.array(labels)[:, np.newaxis]
for k in topk:
max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]
match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)
topk_acc_score = match_array.sum() / match_array.shape[0]
res.append(topk_acc_score)
return res
def mmit_mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition. Used for reporting
MMIT style mAP on Multi-Moments in Times. The difference is that this
method calculates average-precision for each sample and averages them among
samples.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float: The MMIT style mean average precision.
"""
results = []
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
return np.mean(results)
def mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float: The mean average precision.
"""
results = []
scores = np.stack(scores).T
labels = np.stack(labels).T
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
results = [x for x in results if not np.isnan(x)]
if results == []:
return np.nan
return np.mean(results)
def binary_precision_recall_curve(y_score, y_true):
"""Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precison and
recall are tested.
"""
assert isinstance(y_score, np.ndarray)
assert isinstance(y_true, np.ndarray)
assert y_score.shape == y_true.shape
# make y_true a boolean vector
y_true = (y_true == 1)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# There may be ties in values, therefore find the `distinct_value_inds`
distinct_value_inds = np.where(np.diff(y_score))[0]
threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_inds]
fps = 1 + threshold_inds - tps
thresholds = y_score[threshold_inds]
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def pairwise_temporal_iou(candidate_segments,
target_segments,
calculate_overlap_self=False):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True.
"""
candidate_segments_ndim = candidate_segments.ndim
if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:
raise ValueError('Dimension of arguments is incorrect')
if candidate_segments_ndim == 1:
candidate_segments = candidate_segments[np.newaxis, :]
n, m = target_segments.shape[0], candidate_segments.shape[0]
t_iou = np.empty((n, m), dtype=np.float32)
if calculate_overlap_self:
t_overlap_self = np.empty((n, m), dtype=np.float32)
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
t_iou[:, i] = (segments_intersection.astype(float) / segments_union)
if calculate_overlap_self:
candidate_length = candidate_segment[1] - candidate_segment[0]
t_overlap_self[:, i] = (
segments_intersection.astype(float) / candidate_length)
if candidate_segments_ndim == 1:
t_iou = np.squeeze(t_iou, axis=1)
if calculate_overlap_self:
if candidate_segments_ndim == 1:
t_overlap_self = np.squeeze(t_overlap_self, axis=1)
return t_iou, t_overlap_self
return t_iou
def average_recall_at_avg_proposals(ground_truth,
proposals,
total_num_proposals,
max_avg_proposals=None,
temporal_iou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve.
"""
total_num_videos = len(ground_truth)
if not max_avg_proposals:
max_avg_proposals = float(total_num_proposals) / total_num_videos
ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)
# For each video, compute temporal_iou scores among the retrieved proposals
score_list = []
total_num_retrieved_proposals = 0
for video_id in ground_truth:
# Get proposals for this video.
proposals_video_id = proposals[video_id]
this_video_proposals = proposals_video_id[:, :2]
# Sort proposals by score.
sort_idx = proposals_video_id[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(
np.float32)
# Get ground-truth instances associated to this video.
ground_truth_video_id = ground_truth[video_id]
this_video_ground_truth = ground_truth_video_id[:, :2].astype(
np.float32)
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_list.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:
num_retrieved_proposals, :]
# Compute temporal_iou scores.
t_iou = pairwise_temporal_iou(this_video_proposals,
this_video_ground_truth)
score_list.append(t_iou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_list = np.arange(1, 101) / 100.0 * (
max_avg_proposals * float(total_num_videos) /
total_num_retrieved_proposals)
matches = np.empty((total_num_videos, pcn_list.shape[0]))
positives = np.empty(total_num_videos)
recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))
# Iterates over each temporal_iou threshold.
for ridx, temporal_iou in enumerate(temporal_iou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_list):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum temporal_iou threshold.
true_positives_temporal_iou = score >= temporal_iou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum(
(score.shape[1] * pcn_list).astype(np.int), score.shape[1])
for j, num_retrieved_proposals in enumerate(pcn_proposals):
# Compute the number of matches
# for each percentage of the proposals
matches[i, j] = np.count_nonzero(
(true_positives_temporal_iou[:, :num_retrieved_proposals]
).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_list * (
float(total_num_retrieved_proposals) / total_num_videos)
# Get AUC
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores
def softmax(x, dim=1):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return e_x / e_x.sum(axis=dim, keepdims=True)
def interpolated_precision_recall(precision, recall):
"""Interpolated AP - VOCdevkit from VOC 2011.
Args:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
Returns:
float: Average precision score.
"""
mprecision = np.hstack([[0], precision, [0]])
mrecall = np.hstack([[0], recall, [1]])
for i in range(len(mprecision) - 1)[::-1]:
mprecision[i] = max(mprecision[i], mprecision[i + 1])
idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1
ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx])
return ap
def average_precision_at_temporal_iou(ground_truth,
prediction,
temporal_iou_thresholds=(np.linspace(
0.5, 0.95, 10))):
"""Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score.
"""
ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32)
if len(prediction) < 1:
return ap
num_gts = 0.
lock_gt = dict()
for key in ground_truth:
lock_gt[key] = np.ones(
(len(temporal_iou_thresholds), len(ground_truth[key]))) * -1
num_gts += len(ground_truth[key])
# Sort predictions by decreasing score order.
prediction = np.array(prediction)
scores = prediction[:, 4].astype(float)
sort_idx = np.argsort(scores)[::-1]
prediction = prediction[sort_idx]
# Initialize true positive and false positive vectors.
tp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
fp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
# Assigning true positive to truly grount truth instances.
for idx, this_pred in enumerate(prediction):
# Check if there is at least one ground truth in the video.
if this_pred[0] in ground_truth:
this_gt = np.array(ground_truth[this_pred[0]], dtype=float)
else:
fp[:, idx] = 1
continue
t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt)
# We would like to retrieve the predictions with highest t_iou score.
t_iou_sorted_idx = t_iou.argsort()[::-1]
for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds):
for jdx in t_iou_sorted_idx:
if t_iou[jdx] < t_iou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[this_pred[0]][t_idx, jdx] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[this_pred[0]][t_idx, jdx] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32)
recall_cumsum = tp_cumsum / num_gts
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(temporal_iou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap
| 20,710
| 38.449524
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/__init__.py
|
from .accuracy import (average_precision_at_temporal_iou,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, interpolated_precision_recall,
mean_average_precision, mean_class_accuracy,
mmit_mean_average_precision, pairwise_temporal_iou,
softmax, top_k_accuracy)
from .eval_detection import ActivityNetLocalization
from .eval_hooks import DistEvalHook, EvalHook
__all__ = [
'DistEvalHook', 'EvalHook', 'top_k_accuracy', 'mean_class_accuracy',
'confusion_matrix', 'mean_average_precision', 'get_weighted_score',
'average_recall_at_avg_proposals', 'pairwise_temporal_iou',
'average_precision_at_temporal_iou', 'ActivityNetLocalization', 'softmax',
'interpolated_precision_recall', 'mmit_mean_average_precision'
]
| 866
| 50
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/per_image_evaluation.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Evaluate Object Detection result on a single image.
Annotate each detected result as true positives or false positive according to
a predefined IOU ratio. Non Maximum Supression is used by default. Multi class
detection is supported by default. Based on the settings, per image evaluation
is either performed on boxes or on object masks.
"""
import numpy as np
from . import np_box_list, np_box_ops
class PerImageEvaluation:
"""Evaluate detection result of a single image."""
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5):
"""Initialized PerImageEvaluation by evaluation parameters.
Args:
num_groundtruth_classes: Number of ground truth object classes
matching_iou_threshold: A ratio of area intersection to union,
which is the threshold to consider whether a detection is true
positive or not
"""
self.matching_iou_threshold = matching_iou_threshold
self.num_groundtruth_classes = num_groundtruth_classes
def compute_object_detection_metrics(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
detected_masks=None,
groundtruth_masks=None):
"""Evaluates detections as being tp, fp or ignored from a single image.
The evaluation is done in two stages:
1. All detections are matched to non group-of boxes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1],
repreneting the class labels of the detected N object
instances.
groundtruth_boxes: A float numpy array of shape [M, 4],
representing M regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
detected_masks: (optional) A uint8 numpy array of shape
[N, height, width]. If not None, the metrics will be computed
based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape
[M, height, width].
Returns:
scores: A list of C float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
tp_fp_labels: A list of C boolean numpy arrays. Each numpy array
is of shape [K, 1], representing K True/False positive label of
object instances detected with class label c
"""
(
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks,
) = self._remove_invalid_boxes(
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks,
)
scores, tp_fp_labels = self._compute_tp_fp(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks,
)
return scores, tp_fp_labels
def _compute_tp_fp(self,
detected_boxes,
detected_scores,
detected_class_labels,
groundtruth_boxes,
groundtruth_class_labels,
detected_masks=None,
groundtruth_masks=None):
"""Labels true/false positives of detections of an image across all
classes.
Args:
detected_boxes: A float numpy array of shape [N, 4], representing N
regions of detected object regions.
Each row is of the format [y_min, x_min, y_max, x_max]
detected_scores: A float numpy array of shape [N, 1], representing
the confidence scores of the detected N object instances.
detected_class_labels: A integer numpy array of shape [N, 1],
repreneting the class labels of the detected N object
instances.
groundtruth_boxes: A float numpy array of shape [M, 4],
representing M regions of object instances in ground truth
groundtruth_class_labels: An integer numpy array of shape [M, 1],
representing M class labels of object instances in ground truth
detected_masks: (optional) A np.uint8 numpy array of shape
[N, height, width]. If not None, the scores will be computed
based on masks.
groundtruth_masks: (optional) A np.uint8 numpy array of shape
[M, height, width].
Returns:
result_scores: A list of float numpy arrays. Each numpy array is of
shape [K, 1], representing K scores detected with object class
label c
result_tp_fp_labels: A list of boolean numpy array. Each numpy
array is of shape [K, 1], representing K True/False positive
label of object instances detected with class label c
Raises:
ValueError: If detected masks is not None but groundtruth masks are
None, or the other way around.
"""
if detected_masks is not None and groundtruth_masks is None:
raise ValueError(
'Detected masks is available but groundtruth masks is not.')
if detected_masks is None and groundtruth_masks is not None:
raise ValueError(
'Groundtruth masks is available but detected masks is not.')
result_scores = []
result_tp_fp_labels = []
for i in range(self.num_groundtruth_classes):
(gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class) = self._get_ith_class_arrays(
detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes, groundtruth_masks,
groundtruth_class_labels, i)
scores, tp_fp_labels = self._compute_tp_fp_for_single_class(
detected_boxes=detected_boxes_at_ith_class,
detected_scores=detected_scores_at_ith_class,
groundtruth_boxes=gt_boxes_at_ith_class,
detected_masks=detected_masks_at_ith_class,
groundtruth_masks=gt_masks_at_ith_class,
)
result_scores.append(scores)
result_tp_fp_labels.append(tp_fp_labels)
return result_scores, result_tp_fp_labels
@staticmethod
def _get_overlaps_and_scores_box_mode(detected_boxes, detected_scores,
groundtruth_boxes):
"""Computes overlaps and scores between detected and groudntruth boxes.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected
box coordinates
detected_scores: A 1-d numpy array of length N representing
classification score
groundtruth_boxes: A numpy array of shape [M, 4] representing
ground truth box coordinates
Returns:
iou: A float numpy array of size [num_detected_boxes,
num_gt_boxes]. If gt_non_group_of_boxlist.num_boxes() == 0 it
will be None.
ioa: A float numpy array of size [num_detected_boxes,
num_gt_boxes]. If gt_group_of_boxlist.num_boxes() == 0 it will
be None.
scores: The score of the detected boxlist.
num_boxes: Number of non-maximum suppressed detected boxes.
"""
detected_boxlist = np_box_list.BoxList(detected_boxes)
detected_boxlist.add_field('scores', detected_scores)
gt_non_group_of_boxlist = np_box_list.BoxList(groundtruth_boxes)
iou = np_box_ops.iou(detected_boxlist.get(),
gt_non_group_of_boxlist.get())
scores = detected_boxlist.get_field('scores')
num_boxes = detected_boxlist.num_boxes()
return iou, None, scores, num_boxes
def _compute_tp_fp_for_single_class(self,
detected_boxes,
detected_scores,
groundtruth_boxes,
detected_masks=None,
groundtruth_masks=None):
"""Labels boxes detected with the same class from the same image as
tp/fp.
Args:
detected_boxes: A numpy array of shape [N, 4] representing detected
box coordinates
detected_scores: A 1-d numpy array of length N representing
classification score
groundtruth_boxes: A numpy array of shape [M, 4] representing
groundtruth box coordinates
detected_masks: (optional) A uint8 numpy array of shape
[N, height, width]. If not None, the scores will be computed
based on masks.
groundtruth_masks: (optional) A uint8 numpy array of shape
[M, height, width].
Returns:
Two arrays of the same size, containing all boxes that were
evaluated as being true positives or false positives.
scores: A numpy array representing the detection scores.
tp_fp_labels: a boolean numpy array indicating whether a detection
is a true positive.
"""
if detected_boxes.size == 0:
return np.array([], dtype=float), np.array([], dtype=bool)
(iou, _, scores,
num_detected_boxes) = self._get_overlaps_and_scores_box_mode(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
groundtruth_boxes=groundtruth_boxes)
if groundtruth_boxes.size == 0:
return scores, np.zeros(num_detected_boxes, dtype=bool)
tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool)
# The evaluation is done in two stages:
# 1. All detections are matched to non group-of boxes.
# 2. Detections that are determined as false positives are matched
# against group-of boxes and ignored if matched.
# Tp-fp evaluation for non-group of boxes (if any).
if iou.shape[1] > 0:
max_overlap_gt_ids = np.argmax(iou, axis=1)
is_gt_box_detected = np.zeros(iou.shape[1], dtype=bool)
for i in range(num_detected_boxes):
gt_id = max_overlap_gt_ids[i]
if iou[i, gt_id] >= self.matching_iou_threshold:
if not is_gt_box_detected[gt_id]:
tp_fp_labels[i] = True
is_gt_box_detected[gt_id] = True
return scores, tp_fp_labels
@staticmethod
def _get_ith_class_arrays(detected_boxes, detected_scores, detected_masks,
detected_class_labels, groundtruth_boxes,
groundtruth_masks, groundtruth_class_labels,
class_index):
"""Returns numpy arrays belonging to class with index `class_index`.
Args:
detected_boxes: A numpy array containing detected boxes.
detected_scores: A numpy array containing detected scores.
detected_masks: A numpy array containing detected masks.
detected_class_labels: A numpy array containing detected class
labels.
groundtruth_boxes: A numpy array containing groundtruth boxes.
groundtruth_masks: A numpy array containing groundtruth masks.
groundtruth_class_labels: A numpy array containing groundtruth
class labels.
class_index: An integer index.
Returns:
gt_boxes_at_ith_class: A numpy array containing groundtruth boxes
labeled as ith class.
gt_masks_at_ith_class: A numpy array containing groundtruth masks
labeled as ith class.
detected_boxes_at_ith_class: A numpy array containing detected
boxes corresponding to the ith class.
detected_scores_at_ith_class: A numpy array containing detected
scores corresponding to the ith class.
detected_masks_at_ith_class: A numpy array containing detected
masks corresponding to the ith class.
"""
selected_groundtruth = groundtruth_class_labels == class_index
gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth]
if groundtruth_masks is not None:
gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth]
else:
gt_masks_at_ith_class = None
selected_detections = detected_class_labels == class_index
detected_boxes_at_ith_class = detected_boxes[selected_detections]
detected_scores_at_ith_class = detected_scores[selected_detections]
if detected_masks is not None:
detected_masks_at_ith_class = detected_masks[selected_detections]
else:
detected_masks_at_ith_class = None
return (gt_boxes_at_ith_class, gt_masks_at_ith_class,
detected_boxes_at_ith_class, detected_scores_at_ith_class,
detected_masks_at_ith_class)
@staticmethod
def _remove_invalid_boxes(detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Removes entries with invalid boxes.
A box is invalid if either its xmax is smaller than its xmin, or its
ymax is smaller than its ymin.
Args:
detected_boxes: A float numpy array of size [num_boxes, 4]
containing box coordinates in [ymin, xmin, ymax, xmax] format.
detected_scores: A float numpy array of size [num_boxes].
detected_class_labels: A int32 numpy array of size [num_boxes].
detected_masks: A uint8 numpy array of size
[num_boxes, height, width].
Returns:
valid_detected_boxes: A float numpy array of size
[num_valid_boxes, 4] containing box coordinates in
[ymin, xmin, ymax, xmax] format.
valid_detected_scores: A float numpy array of size
[num_valid_boxes].
valid_detected_class_labels: A int32 numpy array of size
[num_valid_boxes].
valid_detected_masks: A uint8 numpy array of size
[num_valid_boxes, height, width].
"""
valid_indices = np.logical_and(
detected_boxes[:, 0] < detected_boxes[:, 2],
detected_boxes[:, 1] < detected_boxes[:, 3])
detected_boxes = detected_boxes[valid_indices]
detected_scores = detected_scores[valid_indices]
detected_class_labels = detected_class_labels[valid_indices]
if detected_masks is not None:
detected_masks = detected_masks[valid_indices]
return [
detected_boxes, detected_scores, detected_class_labels,
detected_masks
]
| 16,948
| 46.211699
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/standard_fields.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
"""
class InputDataFields:
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors.
This should be used by the decoder to identify keys for the returned
tensor_dict containing input tensors. And it should be used by the model to
identify the tensors it needs.
Attributes:
image: image.
original_image: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_label_types: box-level label types (e.g. explicit
negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of
the same class, forming a connected group, where instances are
heavily occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_label_scores: groundtruth label scores.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
true_image_shapes: true shapes of images in the resized images, as
resized images can be padded with zeros.
"""
image = 'image'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
true_image_shape = 'true_image_shape'
class DetectionResultFields:
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the
image.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection
box.
detection_keypoints: contains detection keypoints for each detection
box.
num_detections: number of detections in the batch.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
num_detections = 'num_detections'
| 5,313
| 44.810345
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/np_box_list.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Numpy BoxList classes and functions."""
import numpy as np
class BoxList:
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within
a given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError(
'Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be
accessed.
field_data: a numpy array of [N, ...] representing the data
associated with the field.
Raises:
ValueError: if the field is already exist or the dimension of the
field data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes(
):
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box
collection.
Args:
field: a string parameter used to speficy a related field to be
accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError(f'field {field} does not exist')
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
@staticmethod
def _is_valid_boxes(data):
"""Check whether data fulfills the format of N*[ymin, xmin, ymax,
xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater
than ymin, and all xmax of boxes are equal or greater than xmin.
"""
if len(data) != 0:
for v in data:
if v[0] > v[2] or v[1] > v[3]:
return False
return True
| 4,923
| 34.171429
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/metrics.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
import numpy as np
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This
value is None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive
instances. This value is None if no ground truth labels are
present.
"""
if (not isinstance(labels, np.ndarray) or labels.dtype != np.bool
or len(labels.shape) != 1):
raise ValueError('labels must be single dimension bool numpy array')
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError('scores must be single dimension numpy array')
if num_gt < np.sum(labels):
raise ValueError(
'Number of true positives must be smaller than num_gt.')
if len(scores) != len(labels):
raise ValueError('scores and labels must be of the same size.')
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = 1 - true_positive_labels
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError('If precision is None, recall must also be None')
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError('precision and recall must be numpy array')
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError('input must be float numpy array.')
if len(precision) != len(recall):
raise ValueError('precision and recall must be of the same size.')
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError('Precision must be in the range of [0, 1].')
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError('recall must be in the range of [0, 1].')
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError('recall must be a non-decreasing array')
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images
containing at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number
of images that are correctly detected at least one object instance
of a particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of
each class
"""
# Divide by zero expected for classes with no gt examples.
with np.errstate(divide='ignore', invalid='ignore'):
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
| 5,689
| 38.79021
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/object_detection_evaluation.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of
a object detection dataset, and computes frequently used detection metrics such
as Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
import collections
import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import numpy as np
from . import metrics, per_image_evaluation, standard_fields
class DetectionEvaluator:
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
__metaclass__ = ABCMeta
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this
category.
'name': (required) string representing category name e.g.,
'cat', 'dog'.
"""
self._categories = categories
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required
for evaluations.
"""
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required
for evaluation.
"""
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this
category.
'name': (required) string representing category name e.g.,
'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching
groundtruth boxes to detection boxes.
evaluate_corlocs: (optional) boolean which determines if corloc
scores are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None,
no prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the
mean average precision is computed directly from the scores and
tp_fp_labels of all classes.
evaluate_masks: If False, evaluation will be performed based on
boxes. If True, mask evaluation will be performed instead.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32
numpy array of shape [num_boxes, 4] containing `num_boxes`
groundtruth boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer
numpy array of shape [num_boxes] containing 1-indexed
groundtruth classes for the boxes.
standard_fields.InputDataFields.groundtruth_instance_masks:
Optional numpy array of shape [num_boxes, height, width]
with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will
also raise error if instance masks are not in groundtruth
dictionary.
"""
if image_id in self._image_ids:
raise ValueError(
'Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks
not in groundtruth_dict):
raise ValueError(
'Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_masks=groundtruth_masks,
)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32
numpy array of shape [num_boxes, 4] containing `num_boxes`
detection boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32
numpy array of shape [num_boxes] containing detection
scores for the boxes.
standard_fields.DetectionResultFields.detection_classes:
integer numpy array of shape [num_boxes] containing
1-indexed detection classes for the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8
numpy array of shape [num_boxes, height, width] containing
`num_boxes` masks of values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[
standard_fields.DetectionResultFields.detection_classes] -
self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks
not in detections_dict):
raise ValueError(
'Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks,
)
@staticmethod
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category
id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this
category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories,
but keyed by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'Precision/mAP@<matching_iou_threshold>IOU': mean average
precision at the specified IOU threshold
2. per_category_ap: category specific results with keys of the form
'PerformanceByCategory/mAP@<matching_iou_threshold>IOU/category'
"""
(per_class_ap, mean_ap, _, _, per_class_corloc,
mean_corloc) = self._evaluation.evaluate()
metric = f'mAP@{self._matching_iou_threshold}IOU'
pascal_metrics = {self._metric_prefix + metric: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(
self._matching_iou_threshold)] = mean_corloc
category_index = self.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
display_name = (
self._metric_prefix +
'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold,
category_index[idx + self._label_id_offset]['name'],
))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold,
category_index[idx +
self._label_id_offset]['name'],
))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
)
self._image_ids.clear()
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
use_weighted_mean_ap=False,
)
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics',
[
'average_precisions',
'mean_ap',
'precisions',
'recalls',
'corlocs',
'mean_corloc',
],
)
class ObjectDetectionEvaluation:
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
use_weighted_mean_ap=False,
label_id_offset=0):
if num_groundtruth_classes < 1:
raise ValueError(
'Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_evaluation.PerImageEvaluation(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
)
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(
self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = []
self.recalls_per_class = []
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_masks: uint8 numpy array of shape
[num_boxes, height, width] containing `num_boxes` groundtruth
masks. The mask values range from 0 to 1.
"""
if image_key in self.groundtruth_boxes:
warnings.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
self._update_ground_truth_statistics(groundtruth_class_labels)
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes]
containing detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape
[num_boxes, height, width] containing `num_boxes` detection
masks with values ranging between 0 and 1.
Raises:
ValueError: if the number of boxes, scores and class labels differ
in length.
"""
if len(detected_boxes) != len(detected_scores) or len(
detected_boxes) != len(detected_class_labels):
raise ValueError(
'detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes),
len(detected_scores),
len(detected_class_labels),
)
if image_key in self.detection_keys:
warnings.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not
# want to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(image_key)
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
(
scores,
tp_fp_labels,
) = self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks,
)
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
def _update_ground_truth_statistics(self, groundtruth_class_labels):
"""Update grouth truth statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M,
representing M class labels of object instances in ground truth
"""
count = defaultdict(lambda: 0)
for label in groundtruth_class_labels:
count[label] += 1
for k in count:
self.num_gt_instances_per_class[k] += count[k]
self.num_gt_imgs_per_class[k] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.info(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=bool)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(
self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels,
self.num_gt_instances_per_class[class_index])
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
average_precision = metrics.compute_average_precision(
precision, recall)
self.average_precision_per_class[class_index] = average_precision
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
mean_ap = metrics.compute_average_precision(precision, recall)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(
self.average_precision_per_class,
mean_ap,
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class,
mean_corloc,
)
| 24,757
| 42.057391
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/evaluation/ava_evaluation/np_box_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = (
np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) -
intersect)
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
areas = np.expand_dims(area(boxes2), axis=0)
return intersect / areas
| 3,462
| 33.979798
| 80
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/bbox/bbox_target.py
|
import torch
import torch.nn.functional as F
def bbox_target(pos_bboxes_list, neg_bboxes_list, gt_labels, cfg):
"""Generate classification targets for bboxes.
Args:
pos_bboxes_list (list[Tensor]): Positive bboxes list.
neg_bboxes_list (list[Tensor]): Negative bboxes list.
gt_labels (list[Tensor]): Groundtruth classification label list.
cfg (Config): RCNN config.
Returns:
(Tensor, Tensor): Label and label_weight for bboxes.
"""
labels, label_weights = [], []
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
assert len(pos_bboxes_list) == len(neg_bboxes_list) == len(gt_labels)
length = len(pos_bboxes_list)
for i in range(length):
pos_bboxes = pos_bboxes_list[i]
neg_bboxes = neg_bboxes_list[i]
gt_label = gt_labels[i]
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
label = F.pad(gt_label, (0, 0, 0, num_neg))
label_weight = pos_bboxes.new_zeros(num_samples)
label_weight[:num_pos] = pos_weight
label_weight[-num_neg:] = 1.
labels.append(label)
label_weights.append(label_weight)
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
return labels, label_weights
| 1,334
| 30.785714
| 73
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/bbox/__init__.py
|
from .assigners import MaxIoUAssignerAVA
from .bbox_target import bbox_target
from .transforms import bbox2result
__all__ = ['MaxIoUAssignerAVA', 'bbox_target', 'bbox2result']
| 177
| 28.666667
| 61
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/bbox/transforms.py
|
import numpy as np
def bbox2result(bboxes, labels, num_classes, thr=0.01):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 4)
labels (Tensor): shape (n, #num_classes)
num_classes (int): class number, including background class
thr (float): The score threshold used when converting predictions to
detection results
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
# We only handle multilabel now
assert labels.shape[-1] > 1
scores = labels # rename for clarification
thr = (thr, ) * num_classes if isinstance(thr, float) else thr
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
result = []
for i in range(num_classes - 1):
where = scores[:, i + 1] > thr[i + 1]
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result
| 1,167
| 30.567568
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/bbox/assigners/__init__.py
|
from .max_iou_assigner_ava import MaxIoUAssignerAVA
__all__ = ['MaxIoUAssignerAVA']
| 85
| 20.5
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/bbox/assigners/max_iou_assigner_ava.py
|
import torch
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import AssignResult, MaxIoUAssigner
from mmdet.core.bbox.builder import BBOX_ASSIGNERS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@BBOX_ASSIGNERS.register_module()
class MaxIoUAssignerAVA(MaxIoUAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float | tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each
gt). Default: 0.
gt_max_assign_all (bool): Whether to assign all bboxes with the
same highest overlap with some gt to that gt. Default: True.
"""
# The function is overriden, to handle the case that gt_label is not
# int
def assign_wrt_overlaps(self, overlaps, gt_labels=None):
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor, optional): Labels of k gt_bboxes, shape
(k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwirte the assigned_gt_inds
# assigned in Step 3. Thus, the assigned gt might not be the
# best one for prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox
# 1 & 2, bbox 1 will be assigned as the best target for bbox A
# in step 3. However, if GT bbox 2's gt_argmax_overlaps = A,
# bbox A's assigned_gt_inds will be overwritten to be bbox B.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
if gt_labels is not None:
# consider multi-class case (AVA)
assert len(gt_labels[0]) > 1
assigned_labels = assigned_gt_inds.new_zeros(
(num_bboxes, len(gt_labels[0])), dtype=torch.float32)
# If not assigned, labels will be all 0
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
else:
# define an empty class, so that can be imported
@import_module_error_class('mmdet')
class MaxIoUAssignerAVA:
pass
| 6,032
| 42.402878
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/runner/omnisource_runner.py
|
# Copyright (c) Open-MMLab. All rights reserved.
import time
import warnings
import mmcv
from mmcv.runner import EpochBasedRunner, Hook
from mmcv.runner.utils import get_host_info
def cycle(iterable):
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable)
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch wraps the sampler as its attributes.
data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
class OmniSourceRunner(EpochBasedRunner):
"""OmniSource Epoch-based Runner.
This runner train models epoch by epoch, the epoch length is defined by the
dataloader[0], which is the main dataloader.
"""
def run_iter(self, data_batch, train_mode, source, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer,
**kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
# Since we have multiple sources, we add a suffix to log_var names,
# so that we can differentiate them.
if 'log_vars' in outputs:
log_vars = outputs['log_vars']
log_vars = {k + source: v for k, v in log_vars.items()}
self.log_buffer.update(log_vars, outputs['num_samples'])
self.outputs = outputs
def train(self, data_loaders, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loaders = data_loaders
self.main_loader = self.data_loaders[0]
# Add aliasing
self.data_loader = self.main_loader
self.aux_loaders = self.data_loaders[1:]
self.aux_iters = [cycle(loader) for loader in self.aux_loaders]
auxiliary_iter_times = [1] * len(self.aux_loaders)
use_aux_per_niter = 1
if 'train_ratio' in kwargs:
train_ratio = kwargs.pop('train_ratio')
use_aux_per_niter = train_ratio[0]
auxiliary_iter_times = train_ratio[1:]
self._max_iters = self._max_epochs * len(self.main_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.main_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, source='')
self.call_hook('after_train_iter')
if self._iter % use_aux_per_niter != 0:
self._iter += 1
continue
for idx, n_times in enumerate(auxiliary_iter_times):
for _ in range(n_times):
data_batch = next(self.aux_iters[idx])
self.call_hook('before_train_iter')
self.run_iter(
data_batch, train_mode=True, source=f'/aux{idx}')
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
# Now that we use validate hook, not implement this func to save efforts.
def val(self, data_loader, **kwargs):
raise NotImplementedError
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training.
`data_loaders[0]` is the main data_loader, which contains
target datasets and determines the epoch length.
`data_loaders[1:]` are auxiliary data loaders, which contain
auxiliary web datasets.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2)] means running 2
epochs for training iteratively. Note that val epoch is not
supported for this runner for simplicity.
max_epochs (int | None): The max epochs that training lasts,
deprecated now. Default: None.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(workflow) == 1 and workflow[0][0] == 'train'
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
mode, epochs = workflow[0]
self._max_iters = self._max_epochs * len(data_loaders[0])
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
while self.epoch < self._max_epochs:
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
f'mode in workflow must be a str, but got {mode}')
for _ in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(data_loaders, **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
| 6,589
| 39.429448
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/runner/__init__.py
|
from .omnisource_runner import OmniSourceDistSamplerSeedHook, OmniSourceRunner
__all__ = ['OmniSourceRunner', 'OmniSourceDistSamplerSeedHook']
| 144
| 35.25
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/hooks/__init__.py
|
from .output import OutputHook
__all__ = ['OutputHook']
| 57
| 13.5
| 30
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/hooks/output.py
|
import functools
import warnings
import torch
class OutputHook:
"""Output feature map of some layers.
Args:
module (nn.Module): The whole module to get layers.
outputs (tuple[str] | list[str]): Layer name to output. Default: None.
as_tensor (bool): Determine to return a tensor or a numpy array.
Default: False.
"""
def __init__(self, module, outputs=None, as_tensor=False):
self.outputs = outputs
self.as_tensor = as_tensor
self.layer_outputs = {}
self.handles = []
self.register(module)
def register(self, module):
def hook_wrapper(name):
def hook(model, input, output):
if not isinstance(output, torch.Tensor):
warnings.warn(f'Directly return the output from {name}, '
f'since it is not a tensor')
self.layer_outputs[name] = output
elif self.as_tensor:
self.layer_outputs[name] = output
else:
self.layer_outputs[name] = output.detach().cpu().numpy()
return hook
if isinstance(self.outputs, (list, tuple)):
for name in self.outputs:
try:
layer = rgetattr(module, name)
h = layer.register_forward_hook(hook_wrapper(name))
except AttributeError:
raise AttributeError(f'Module {name} not found')
self.handles.append(h)
def remove(self):
for h in self.handles:
h.remove()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.remove()
# using wonder's beautiful simplification:
# https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
| 2,040
| 29.014706
| 84
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/optimizer/topk_optimizer_constructor.py
|
import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TopkOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optimizer in different ways from the default one.
1. Parameters of the first conv layer have default lr and weight decay.
2. Parameters of BN layers have default lr and zero weight decay.
3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters
of the last fc layer in cls_head have 5x lr multiplier and 10x weight
decay multiplier.
4. Weights of other layers have default lr and weight decay, and biases
have a 2x lr multiplier and zero weight decay.
"""
def add_params(self, params, model):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
train_topk_only = self.paramwise_cfg['train_topk_only']
# Batchnorm parameters.
bn_params = []
# Non-batchnorm parameters.
non_bn_params = []
predictor = []
for name, param in model.named_parameters():
if 'predictor' in name:
predictor.append(param)
elif train_topk_only:
continue # frozen weights other than predictor
elif "bn" in name:
bn_params.append(param)
else:
non_bn_params.append(param)
params.append({
'params': predictor,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': bn_params,
'lr': self.base_lr * 0.01,
'weight_decay': 0.0
})
params.append({
'params': non_bn_params,
'lr': self.base_lr * 0.01,
'weight_decay': self.base_wd
})
| 2,226
| 32.238806
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/optimizer/tsm_optimizer_constructor.py
|
import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TSMOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optimizer in different ways from the default one.
1. Parameters of the first conv layer have default lr and weight decay.
2. Parameters of BN layers have default lr and zero weight decay.
3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters
of the last fc layer in cls_head have 5x lr multiplier and 10x weight
decay multiplier.
4. Weights of other layers have default lr and weight decay, and biases
have a 2x lr multiplier and zero weight decay.
"""
def add_params(self, params, model):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
# use fc_lr5 to determine whether to specify higher multi-factor
# for fc layer weights and bias.
fc_lr5 = self.paramwise_cfg['fc_lr5']
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
conv_cnt = 0
for m in model.modules():
if isinstance(m, _ConvNd):
m_params = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(m_params[0])
if len(m_params) == 2:
first_conv_bias.append(m_params[1])
else:
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m, torch.nn.Linear):
m_params = list(m.parameters())
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m,
(_BatchNorm, SyncBatchNorm, torch.nn.GroupNorm)):
for param in list(m.parameters()):
if param.requires_grad:
bn.append(param)
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError(f'New atomic module type: {type(m)}. '
'Need to give it a learning policy')
# pop the cls_head fc layer params
last_fc_weight = normal_weight.pop()
last_fc_bias = normal_bias.pop()
if fc_lr5:
lr5_weight.append(last_fc_weight)
lr10_bias.append(last_fc_bias)
else:
normal_weight.append(last_fc_weight)
normal_bias.append(last_fc_bias)
params.append({
'params': first_conv_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': first_conv_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({
'params': normal_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': normal_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0})
params.append({
'params': lr5_weight,
'lr': self.base_lr * 5,
'weight_decay': self.base_wd
})
params.append({
'params': lr10_bias,
'lr': self.base_lr * 10,
'weight_decay': 0
})
| 4,074
| 36.045455
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/optimizer/copy_of_sgd.py
|
from mmcv.runner import OPTIMIZERS
from torch.optim import SGD
@OPTIMIZERS.register_module()
class CopyOfSGD(SGD):
"""A clone of torch.optim.SGD.
A customized optimizer could be defined like CopyOfSGD. You may derive from
built-in optimizers in torch.optim, or directly implement a new optimizer.
"""
| 320
| 25.75
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/core/optimizer/__init__.py
|
from .copy_of_sgd import CopyOfSGD
from .tsm_optimizer_constructor import TSMOptimizerConstructor
from .topk_optimizer_constructor import TopkOptimizerConstructor
__all__ = ['CopyOfSGD', 'TSMOptimizerConstructor', 'TopkOptimizerConstructor']
| 243
| 39.666667
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/__init__.py
|
from .backbones import (C3D, X3D, MobileNetV2, MobileNetV2TSM, ResNet,
ResNet2Plus1d, ResNet3d, ResNet3dCSN, ResNet3dLayer,
ResNet3dSlowFast, ResNet3dSlowOnly, ResNetAudio,
ResNetTIN, ResNetTSM, TANet)
from .builder import (BACKBONES, DETECTORS, HEADS, LOCALIZERS, LOSSES, NECKS,
RECOGNIZERS, build_backbone, build_detector, build_head,
build_localizer, build_loss, build_model, build_neck,
build_recognizer)
from .common import LFB, TAM, Conv2plus1d, ConvAudio
from .heads import (ACRNHead, AudioTSNHead, AVARoIHead, BaseHead, BBoxHeadAVA,
FBOHead, I3DHead, LFBInferHead, SlowFastHead, TPNHead,
TRNHead, TSMHead, TSNHead, X3DHead)
from .localizers import BMN, PEM, TEM
from .losses import (BCELossWithLogits, BinaryLogisticRegressionLoss, BMNLoss,
CrossEntropyLoss, HVULoss, NLLLoss, OHEMHingeLoss,
SSNLoss)
from .necks import TPN
from .recognizers import (AudioRecognizer, BaseRecognizer, Recognizer2D,
Recognizer3D)
from .roi_extractors import SingleRoIExtractor3D
__all__ = [
'BACKBONES', 'HEADS', 'RECOGNIZERS', 'build_recognizer', 'build_head',
'build_backbone', 'Recognizer2D', 'Recognizer3D', 'C3D', 'ResNet',
'ResNet3d', 'ResNet2Plus1d', 'I3DHead', 'TSNHead', 'TSMHead', 'BaseHead',
'BaseRecognizer', 'LOSSES', 'CrossEntropyLoss', 'NLLLoss', 'HVULoss',
'ResNetTSM', 'ResNet3dSlowFast', 'SlowFastHead', 'Conv2plus1d',
'ResNet3dSlowOnly', 'BCELossWithLogits', 'LOCALIZERS', 'build_localizer',
'PEM', 'TAM', 'TEM', 'BinaryLogisticRegressionLoss', 'BMN', 'BMNLoss',
'build_model', 'OHEMHingeLoss', 'SSNLoss', 'ResNet3dCSN', 'ResNetTIN',
'TPN', 'TPNHead', 'build_loss', 'build_neck', 'AudioRecognizer',
'AudioTSNHead', 'X3D', 'X3DHead', 'ResNet3dLayer', 'DETECTORS',
'SingleRoIExtractor3D', 'BBoxHeadAVA', 'ResNetAudio', 'build_detector',
'ConvAudio', 'AVARoIHead', 'MobileNetV2', 'MobileNetV2TSM', 'TANet', 'LFB',
'FBOHead', 'LFBInferHead', 'TRNHead', 'NECKS', 'ACRNHead'
]
| 2,178
| 57.891892
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/builder.py
|
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
from mmaction.utils import import_module_error_func
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
HEADS = MODELS
RECOGNIZERS = MODELS
LOSSES = MODELS
LOCALIZERS = MODELS
try:
from mmdet.models.builder import DETECTORS, build_detector
except (ImportError, ModuleNotFoundError):
# Define an empty registry and building func, so that can import
DETECTORS = MODELS
@import_module_error_func('mmdet')
def build_detector(cfg, train_cfg, test_cfg):
pass
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
"""Build recognizer."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model. Details see this '
'PR: https://github.com/open-mmlab/mmaction2/pull/629',
UserWarning)
assert cfg.get(
'train_cfg'
) is None or train_cfg is None, 'train_cfg specified in both outer field and model field' # noqa: E501
assert cfg.get(
'test_cfg'
) is None or test_cfg is None, 'test_cfg specified in both outer field and model field ' # noqa: E501
return RECOGNIZERS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_localizer(cfg):
"""Build localizer."""
return LOCALIZERS.build(cfg)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
args = cfg.copy()
obj_type = args.pop('type')
if obj_type in LOCALIZERS:
return build_localizer(cfg)
if obj_type in RECOGNIZERS:
return build_recognizer(cfg, train_cfg, test_cfg)
if obj_type in DETECTORS:
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model. Details see this '
'PR: https://github.com/open-mmlab/mmaction2/pull/629',
UserWarning)
return build_detector(cfg, train_cfg, test_cfg)
model_in_mmdet = ['FastRCNN']
if obj_type in model_in_mmdet:
raise ImportError(
'Please install mmdet for spatial temporal detection tasks.')
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS, RECOGNIZERS or DETECTORS')
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
| 2,741
| 28.804348
| 107
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/base.py
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
from .. import builder
class BaseLocalizer(nn.Module, metaclass=ABCMeta):
"""Base class for localizers.
All localizers should subclass it. All subclass should overwrite:
Methods:``forward_train``, supporting to forward when training.
Methods:``forward_test``, supporting to forward when testing.
"""
def __init__(self, backbone, cls_head, train_cfg=None, test_cfg=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
self.cls_head = builder.build_head(cls_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights()
def init_weights(self):
"""Weight initialization for model."""
self.backbone.init_weights()
self.cls_head.init_weights()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
x = self.backbone(imgs)
return x
@abstractmethod
def forward_train(self, imgs, labels):
"""Defines the computation performed at training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at testing."""
def forward(self, imgs, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(imgs, **kwargs)
return self.forward_test(imgs, **kwargs)
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self.forward(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
results = self.forward(return_loss=False, **data_batch)
outputs = dict(results=results)
return outputs
| 5,143
| 34.722222
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/bsn.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...localization import temporal_iop
from ..builder import LOCALIZERS, build_loss
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class TEM(BaseLocalizer):
"""Temporal Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
tem_feat_dim (int): Feature dimension.
tem_hidden_dim (int): Hidden layer dimension.
tem_match_threshold (float): Temporal evaluation match threshold.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BinaryLogisticRegressionLoss')``.
loss_weight (float): Weight term for action_loss. Default: 2.
output_dim (int): Output dimension. Default: 3.
conv1_ratio (float): Ratio of conv1 layer output. Default: 1.0.
conv2_ratio (float): Ratio of conv2 layer output. Default: 1.0.
conv3_ratio (float): Ratio of conv3 layer output. Default: 0.01.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
tem_feat_dim,
tem_hidden_dim,
tem_match_threshold,
loss_cls=dict(type='BinaryLogisticRegressionLoss'),
loss_weight=2,
output_dim=3,
conv1_ratio=1,
conv2_ratio=1,
conv3_ratio=0.01):
super(BaseLocalizer, self).__init__()
self.temporal_dim = temporal_dim
self.boundary_ratio = boundary_ratio
self.feat_dim = tem_feat_dim
self.c_hidden = tem_hidden_dim
self.match_threshold = tem_match_threshold
self.output_dim = output_dim
self.loss_cls = build_loss(loss_cls)
self.loss_weight = loss_weight
self.conv1_ratio = conv1_ratio
self.conv2_ratio = conv2_ratio
self.conv3_ratio = conv3_ratio
self.conv1 = nn.Conv1d(
in_channels=self.feat_dim,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv2 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.c_hidden,
kernel_size=3,
stride=1,
padding=1,
groups=1)
self.conv3 = nn.Conv1d(
in_channels=self.c_hidden,
out_channels=self.output_dim,
kernel_size=1,
stride=1,
padding=0)
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors()
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.temporal_dim
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.temporal_dim):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = F.relu(self.conv1_ratio * self.conv1(x))
x = F.relu(self.conv2_ratio * self.conv2(x))
x = torch.sigmoid(self.conv3_ratio * self.conv3(x))
return x
def forward_train(self, raw_feature, label_action, label_start, label_end):
"""Define the computation performed at every call when training."""
tem_output = self._forward(raw_feature)
score_action = tem_output[:, 0, :]
score_start = tem_output[:, 1, :]
score_end = tem_output[:, 2, :]
loss_action = self.loss_cls(score_action, label_action,
self.match_threshold)
loss_start_small = self.loss_cls(score_start, label_start,
self.match_threshold)
loss_end_small = self.loss_cls(score_end, label_end,
self.match_threshold)
loss_dict = {
'loss_action': loss_action * self.loss_weight,
'loss_start': loss_start_small,
'loss_end': loss_end_small
}
return loss_dict
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
tem_output = self._forward(raw_feature).cpu().numpy()
batch_action = tem_output[:, 0, :]
batch_start = tem_output[:, 1, :]
batch_end = tem_output[:, 2, :]
video_meta_list = [dict(x) for x in video_meta]
video_results = []
for batch_idx, _ in enumerate(batch_action):
video_name = video_meta_list[batch_idx]['video_name']
video_action = batch_action[batch_idx]
video_start = batch_start[batch_idx]
video_end = batch_end[batch_idx]
video_result = np.stack((video_action, video_start, video_end,
self.anchors_tmins, self.anchors_tmaxs),
axis=1)
video_results.append((video_name, video_result))
return video_results
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_action_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_tmins = every_gt_bbox[:, 0].cpu().numpy()
gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy()
gt_lens = gt_tmaxs - gt_tmins
gt_len_pad = np.maximum(1. / self.temporal_dim,
self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_action = []
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_action.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax, gt_tmins,
gt_tmaxs)))
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_action_list.append(match_score_action)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_action_list = torch.Tensor(match_score_action_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_action_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_action, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_action = label_action.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_action, label_start,
label_end)
return self.forward_test(raw_feature, video_meta)
@LOCALIZERS.register_module()
class PEM(BaseLocalizer):
"""Proposals Evaluation Model for Boundary Sensetive Network.
Please refer `BSN: Boundary Sensitive Network for Temporal Action
Proposal Generation <http://arxiv.org/abs/1806.02964>`_.
Code reference
https://github.com/wzmsltw/BSN-boundary-sensitive-network
Args:
pem_feat_dim (int): Feature dimension.
pem_hidden_dim (int): Hidden layer dimension.
pem_u_ratio_m (float): Ratio for medium score proprosals to balance
data.
pem_u_ratio_l (float): Ratio for low score proprosals to balance data.
pem_high_temporal_iou_threshold (float): High IoU threshold.
pem_low_temporal_iou_threshold (float): Low IoU threshold.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
fc1_ratio (float): Ratio for fc1 layer output. Default: 0.1.
fc2_ratio (float): Ratio for fc2 layer output. Default: 0.1.
output_dim (int): Output dimension. Default: 1.
"""
def __init__(self,
pem_feat_dim,
pem_hidden_dim,
pem_u_ratio_m,
pem_u_ratio_l,
pem_high_temporal_iou_threshold,
pem_low_temporal_iou_threshold,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
fc1_ratio=0.1,
fc2_ratio=0.1,
output_dim=1):
super(BaseLocalizer, self).__init__()
self.feat_dim = pem_feat_dim
self.hidden_dim = pem_hidden_dim
self.u_ratio_m = pem_u_ratio_m
self.u_ratio_l = pem_u_ratio_l
self.pem_high_temporal_iou_threshold = pem_high_temporal_iou_threshold
self.pem_low_temporal_iou_threshold = pem_low_temporal_iou_threshold
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.fc1_ratio = fc1_ratio
self.fc2_ratio = fc2_ratio
self.output_dim = output_dim
self.fc1 = nn.Linear(
in_features=self.feat_dim, out_features=self.hidden_dim, bias=True)
self.fc2 = nn.Linear(
in_features=self.hidden_dim,
out_features=self.output_dim,
bias=True)
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = torch.cat(list(x))
x = F.relu(self.fc1_ratio * self.fc1(x))
x = torch.sigmoid(self.fc2_ratio * self.fc2(x))
return x
def forward_train(self, bsp_feature, reference_temporal_iou):
"""Define the computation performed at every call when training."""
pem_output = self._forward(bsp_feature)
reference_temporal_iou = torch.cat(list(reference_temporal_iou))
device = pem_output.device
reference_temporal_iou = reference_temporal_iou.to(device)
anchors_temporal_iou = pem_output.view(-1)
u_hmask = (reference_temporal_iou >
self.pem_high_temporal_iou_threshold).float()
u_mmask = (
(reference_temporal_iou <= self.pem_high_temporal_iou_threshold)
& (reference_temporal_iou > self.pem_low_temporal_iou_threshold)
).float()
u_lmask = (reference_temporal_iou <=
self.pem_low_temporal_iou_threshold).float()
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = self.u_ratio_m * num_h / (num_m)
r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0]
u_smmask = torch.rand(u_hmask.size()[0], device=device)
u_smmask = u_smmask * u_mmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = self.u_ratio_l * num_h / (num_l)
r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0]
u_slmask = torch.rand(u_hmask.size()[0], device=device)
u_slmask = u_slmask * u_lmask
u_slmask = (u_slmask > (1. - r_l)).float()
temporal_iou_weights = u_hmask + u_smmask + u_slmask
temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou,
reference_temporal_iou)
temporal_iou_loss = torch.sum(
temporal_iou_loss *
temporal_iou_weights) / torch.sum(temporal_iou_weights)
loss_dict = dict(temporal_iou_loss=temporal_iou_loss)
return loss_dict
def forward_test(self, bsp_feature, tmin, tmax, tmin_score, tmax_score,
video_meta):
"""Define the computation performed at every call when testing."""
pem_output = self._forward(bsp_feature).view(-1).cpu().numpy().reshape(
-1, 1)
tmin = tmin.view(-1).cpu().numpy().reshape(-1, 1)
tmax = tmax.view(-1).cpu().numpy().reshape(-1, 1)
tmin_score = tmin_score.view(-1).cpu().numpy().reshape(-1, 1)
tmax_score = tmax_score.view(-1).cpu().numpy().reshape(-1, 1)
score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1)
result = np.concatenate(
(tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1)
result = result.reshape(-1, 6)
video_info = dict(video_meta[0])
proposal_list = post_processing(result, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward(self,
bsp_feature,
reference_temporal_iou=None,
tmin=None,
tmax=None,
tmin_score=None,
tmax_score=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
return self.forward_train(bsp_feature, reference_temporal_iou)
return self.forward_test(bsp_feature, tmin, tmax, tmin_score,
tmax_score, video_meta)
| 15,855
| 39.141772
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/ssn.py
|
import torch
import torch.nn as nn
from .. import builder
from ..builder import LOCALIZERS
from .base import BaseLocalizer
@LOCALIZERS.register_module()
class SSN(BaseLocalizer):
"""Temporal Action Detection with Structured Segment Networks.
Args:
backbone (dict): Config for building backbone.
cls_head (dict): Config for building classification head.
in_channels (int): Number of channels for input data.
Default: 3.
spatial_type (str): Type of spatial pooling.
Default: 'avg'.
dropout_ratio (float): Ratio of dropout.
Default: 0.5.
loss_cls (dict): Config for building loss.
Default: ``dict(type='SSNLoss')``.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
in_channels=3,
spatial_type='avg',
dropout_ratio=0.5,
loss_cls=dict(type='SSNLoss'),
train_cfg=None,
test_cfg=None):
super().__init__(backbone, cls_head, train_cfg, test_cfg)
self.is_test_prepared = False
self.in_channels = in_channels
self.spatial_type = spatial_type
if self.spatial_type == 'avg':
self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
elif self.spatial_type == 'max':
self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
else:
self.pool = None
self.dropout_ratio = dropout_ratio
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.loss_cls = builder.build_loss(loss_cls)
def forward_train(self, imgs, proposal_scale_factor, proposal_type,
proposal_labels, reg_targets, **kwargs):
"""Define the computation performed at every call when training."""
imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
x = self.extract_feat(imgs)
if self.pool:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
return loss_dict
def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
proposal_tick_list, reg_norm_consts, **kwargs):
"""Define the computation performed at every call when testing."""
num_crops = imgs.shape[0]
imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
num_ticks = imgs.shape[1]
output = []
minibatch_size = self.test_cfg.ssn.sampler.batch_size
for idx in range(0, num_ticks, minibatch_size):
chunk = imgs[:, idx:idx +
minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
x = self.extract_feat(chunk)
if self.pool:
x = self.pool(x)
# Merge crop to save memory.
x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
output.append(x)
output = torch.cat(output, dim=0)
relative_proposal_list = relative_proposal_list.squeeze(0)
proposal_tick_list = proposal_tick_list.squeeze(0)
scale_factor_list = scale_factor_list.squeeze(0)
reg_norm_consts = reg_norm_consts.squeeze(0)
if not self.is_test_prepared:
self.is_test_prepared = self.cls_head.prepare_test_fc(
self.cls_head.consensus.num_multipliers)
(output, activity_scores, completeness_scores,
bbox_preds) = self.cls_head(
(output, proposal_tick_list, scale_factor_list), test_mode=True)
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
reg_norm_consts[0, 0])
bbox_preds[:, :, 1] = (
bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
reg_norm_consts[0, 1])
bbox_preds = bbox_preds.cpu().numpy()
result = [
dict(
relative_proposal_list=relative_proposal_list,
activity_scores=activity_scores,
completeness_scores=completeness_scores,
bbox_preds=bbox_preds)
]
return result
| 5,048
| 36.4
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/__init__.py
|
from .base import BaseLocalizer
from .bmn import BMN
from .bsn import PEM, TEM
from .ssn import SSN
__all__ = ['PEM', 'TEM', 'BMN', 'SSN', 'BaseLocalizer']
| 157
| 21.571429
| 55
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/bmn.py
|
import math
import numpy as np
import torch
import torch.nn as nn
from ...localization import temporal_iop, temporal_iou
from ..builder import LOCALIZERS, build_loss
from .base import BaseLocalizer
from .utils import post_processing
@LOCALIZERS.register_module()
class BMN(BaseLocalizer):
"""Boundary Matching Network for temporal action proposal generation.
Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal
Generation <https://arxiv.org/abs/1907.09702>`_.
Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network
Args:
temporal_dim (int): Total frames selected for each video.
boundary_ratio (float): Ratio for determining video boundaries.
num_samples (int): Number of samples for each proposal.
num_samples_per_bin (int): Number of bin samples for each sample.
feat_dim (int): Feature dimension.
soft_nms_alpha (float): Soft NMS alpha.
soft_nms_low_threshold (float): Soft NMS low threshold.
soft_nms_high_threshold (float): Soft NMS high threshold.
post_process_top_k (int): Top k proposals in post process.
feature_extraction_interval (int):
Interval used in feature extraction. Default: 16.
loss_cls (dict): Config for building loss.
Default: ``dict(type='BMNLoss')``.
hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256.
hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128.
hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512.
"""
def __init__(self,
temporal_dim,
boundary_ratio,
num_samples,
num_samples_per_bin,
feat_dim,
soft_nms_alpha,
soft_nms_low_threshold,
soft_nms_high_threshold,
post_process_top_k,
feature_extraction_interval=16,
loss_cls=dict(type='BMNLoss'),
hidden_dim_1d=256,
hidden_dim_2d=128,
hidden_dim_3d=512):
super(BaseLocalizer, self).__init__()
self.tscale = temporal_dim
self.boundary_ratio = boundary_ratio
self.num_samples = num_samples
self.num_samples_per_bin = num_samples_per_bin
self.feat_dim = feat_dim
self.soft_nms_alpha = soft_nms_alpha
self.soft_nms_low_threshold = soft_nms_low_threshold
self.soft_nms_high_threshold = soft_nms_high_threshold
self.post_process_top_k = post_process_top_k
self.feature_extraction_interval = feature_extraction_interval
self.loss_cls = build_loss(loss_cls)
self.hidden_dim_1d = hidden_dim_1d
self.hidden_dim_2d = hidden_dim_2d
self.hidden_dim_3d = hidden_dim_3d
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(
self.feat_dim,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True))
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
self.x_1d_e = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1,
groups=4), nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid())
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(
self.hidden_dim_1d,
self.hidden_dim_1d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True))
self.x_3d_p = nn.Sequential(
nn.Conv3d(
self.hidden_dim_1d,
self.hidden_dim_3d,
kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True))
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(
self.hidden_dim_2d,
self.hidden_dim_2d,
kernel_size=3,
padding=1), nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid())
self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors(
-0.5, 1.5)
self.match_map = self._match_map()
self.bm_mask = self._get_bm_mask()
def _match_map(self):
"""Generate match map."""
temporal_gap = 1. / self.tscale
match_map = []
for idx in range(self.tscale):
match_window = []
tmin = temporal_gap * idx
for jdx in range(1, self.tscale + 1):
tmax = tmin + temporal_gap * jdx
match_window.append([tmin, tmax])
match_map.append(match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
return match_map
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.):
"""Generate temporal anchors.
Args:
tmin_offset (int): Offset for the minimum value of temporal anchor.
Default: 0.
tmax_offset (int): Offset for the maximun value of temporal anchor.
Default: 1.
Returns:
tuple[Sequence[float]]: The minimum and maximum values of temporal
anchors.
"""
temporal_gap = 1. / self.tscale
anchors_tmins = []
anchors_tmaxs = []
for i in range(self.tscale):
anchors_tmins.append(temporal_gap * (i + tmin_offset))
anchors_tmaxs.append(temporal_gap * (i + tmax_offset))
return anchors_tmins, anchors_tmaxs
def _forward(self, x):
"""Define the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x.shape [batch_size, self.feat_dim, self.tscale]
base_feature = self.x_1d_b(x)
# base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale]
start = self.x_1d_s(base_feature).squeeze(1)
# start.shape [batch_size, self.tscale]
end = self.x_1d_e(base_feature).squeeze(1)
# end.shape [batch_size, self.tscale]
confidence_map = self.x_1d_p(base_feature)
# [batch_size, self.hidden_dim_1d, self.tscale]
confidence_map = self._boundary_matching_layer(confidence_map)
# [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
# [batch_size, self.hidden_dim_3d, self.tscale, self.tscale]
confidence_map = self.x_2d_p(confidence_map)
# [batch_size, 2, self.tscale, self.tscale]
return confidence_map, start, end
def _boundary_matching_layer(self, x):
"""Generate matching layer."""
input_size = x.size()
out = torch.matmul(x,
self.sample_mask).reshape(input_size[0],
input_size[1],
self.num_samples,
self.tscale, self.tscale)
return out
def forward_test(self, raw_feature, video_meta):
"""Define the computation performed at every call when testing."""
confidence_map, start, end = self._forward(raw_feature)
start_scores = start[0].cpu().numpy()
end_scores = end[0].cpu().numpy()
cls_confidence = (confidence_map[0][1]).cpu().numpy()
reg_confidence = (confidence_map[0][0]).cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,0]
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1 # [0,0,0...,0,1]
for idx in range(1, self.tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
# iterate through all combinations of start_index and end_index
new_proposals = []
for idx in range(self.tscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx + 1
if end_index < self.tscale and start_bins[
start_index] == 1 and end_bins[end_index] == 1:
tmin = start_index / self.tscale
tmax = end_index / self.tscale
tmin_score = start_scores[start_index]
tmax_score = end_scores[end_index]
cls_score = cls_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = tmin_score * tmax_score * cls_score * reg_score
new_proposals.append([
tmin, tmax, tmin_score, tmax_score, cls_score,
reg_score, score
])
new_proposals = np.stack(new_proposals)
video_info = dict(video_meta[0])
proposal_list = post_processing(new_proposals, video_info,
self.soft_nms_alpha,
self.soft_nms_low_threshold,
self.soft_nms_high_threshold,
self.post_process_top_k,
self.feature_extraction_interval)
output = [
dict(
video_name=video_info['video_name'],
proposal_list=proposal_list)
]
return output
def forward_train(self, raw_feature, label_confidence, label_start,
label_end):
"""Define the computation performed at every call when training."""
confidence_map, start, end = self._forward(raw_feature)
loss = self.loss_cls(confidence_map, start, end, label_confidence,
label_start, label_end,
self.bm_mask.to(raw_feature.device))
loss_dict = dict(loss=loss[0])
return loss_dict
def generate_labels(self, gt_bbox):
"""Generate training labels."""
match_score_confidence_list = []
match_score_start_list = []
match_score_end_list = []
for every_gt_bbox in gt_bbox:
gt_iou_map = []
for start, end in every_gt_bbox:
if isinstance(start, torch.Tensor):
start = start.numpy()
if isinstance(end, torch.Tensor):
end = end.numpy()
current_gt_iou_map = temporal_iou(self.match_map[:, 0],
self.match_map[:, 1], start,
end)
current_gt_iou_map = np.reshape(current_gt_iou_map,
[self.tscale, self.tscale])
gt_iou_map.append(current_gt_iou_map)
gt_iou_map = np.array(gt_iou_map).astype(np.float32)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_tmins = every_gt_bbox[:, 0]
gt_tmaxs = every_gt_bbox[:, 1]
gt_len_pad = 3 * (1. / self.tscale)
gt_start_bboxs = np.stack(
(gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1)
match_score_start = []
match_score_end = []
for anchor_tmin, anchor_tmax in zip(self.anchors_tmins,
self.anchors_tmaxs):
match_score_start.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_start_bboxs[:, 0], gt_start_bboxs[:,
1])))
match_score_end.append(
np.max(
temporal_iop(anchor_tmin, anchor_tmax,
gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_confidence_list.append(gt_iou_map)
match_score_start_list.append(match_score_start)
match_score_end_list.append(match_score_end)
match_score_confidence_list = torch.Tensor(match_score_confidence_list)
match_score_start_list = torch.Tensor(match_score_start_list)
match_score_end_list = torch.Tensor(match_score_end_list)
return (match_score_confidence_list, match_score_start_list,
match_score_end_list)
def forward(self,
raw_feature,
gt_bbox=None,
video_meta=None,
return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
label_confidence, label_start, label_end = (
self.generate_labels(gt_bbox))
device = raw_feature.device
label_confidence = label_confidence.to(device)
label_start = label_start.to(device)
label_end = label_end.to(device)
return self.forward_train(raw_feature, label_confidence,
label_start, label_end)
return self.forward_test(raw_feature, video_meta)
@staticmethod
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples,
num_samples_per_bin):
"""Generate sample mask for a boundary-matching pair."""
plen = float(seg_tmax - seg_tmin)
plen_sample = plen / (num_samples * num_samples_per_bin - 1.0)
total_samples = [
seg_tmin + plen_sample * i
for i in range(num_samples * num_samples_per_bin)
]
p_mask = []
for idx in range(num_samples):
bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) *
num_samples_per_bin]
bin_vector = np.zeros(tscale)
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if 0 <= int(sample_down) <= (tscale - 1):
bin_vector[int(sample_down)] += 1 - sample_decimal
if 0 <= int(sample_upper) <= (tscale - 1):
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_samples_per_bin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
"""Generate sample mask for each point in Boundary-Matching Map."""
mask_mat = []
for start_index in range(self.tscale):
mask_mat_vector = []
for duration_index in range(self.tscale):
if start_index + duration_index < self.tscale:
p_tmin = start_index
p_tmax = start_index + duration_index
center_len = float(p_tmax - p_tmin) + 1
sample_tmin = p_tmin - (center_len * self.boundary_ratio)
sample_tmax = p_tmax + (center_len * self.boundary_ratio)
p_mask = self._get_interp1d_bin_mask(
sample_tmin, sample_tmax, self.tscale,
self.num_samples, self.num_samples_per_bin)
else:
p_mask = np.zeros([self.tscale, self.num_samples])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(
torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
def _get_bm_mask(self):
"""Generate Boundary-Matching Mask."""
bm_mask = []
for idx in range(self.tscale):
mask_vector = [1] * (self.tscale - idx) + [0] * idx
bm_mask.append(mask_vector)
bm_mask = torch.tensor(bm_mask, dtype=torch.float)
return bm_mask
| 17,788
| 41.659472
| 93
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/utils/post_processing.py
|
from mmaction.localization import soft_nms
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k,
feature_extraction_interval):
"""Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
"""
if len(result) > 1:
result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k)
result = result[result[:, -1].argsort()[::-1]]
video_duration = float(
video_info['duration_frame'] // feature_extraction_interval *
feature_extraction_interval
) / video_info['duration_frame'] * video_info['duration_second']
proposal_list = []
for j in range(min(post_process_top_k, len(result))):
proposal = {}
proposal['score'] = float(result[j, -1])
proposal['segment'] = [
max(0, result[j, 0]) * video_duration,
min(1, result[j, 1]) * video_duration
]
proposal_list.append(proposal)
return proposal_list
| 1,807
| 39.177778
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/localizers/utils/__init__.py
|
from .post_processing import post_processing
__all__ = ['post_processing']
| 76
| 18.25
| 44
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/recognizers/base.py
|
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import auto_fp16
from .. import builder
class BaseRecognizer(nn.Module, metaclass=ABCMeta):
"""Base class for recognizers.
All recognizers should subclass it.
All subclass should overwrite:
- Methods:``forward_train``, supporting to forward when training.
- Methods:``forward_test``, supporting to forward when testing.
Args:
backbone (dict): Backbone modules to extract feature.
cls_head (dict | None): Classification head to process feature.
Default: None.
neck (dict | None): Neck for feature fusion. Default: None.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head=None,
neck=None,
train_cfg=None,
test_cfg=None):
super().__init__()
# record the source of the backbone
self.backbone_from = 'mmaction2'
if backbone['type'].startswith('mmcls.'):
try:
import mmcls.models.builder as mmcls_builder
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install mmcls to use this backbone.')
backbone['type'] = backbone['type'][6:]
self.backbone = mmcls_builder.build_backbone(backbone)
self.backbone_from = 'mmcls'
elif backbone['type'].startswith('torchvision.'):
try:
import torchvision.models
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install torchvision to use this '
'backbone.')
backbone_type = backbone.pop('type')[12:]
self.backbone = torchvision.models.__dict__[backbone_type](
**backbone)
# disable the classifier
self.backbone.classifier = nn.Identity()
self.backbone.fc = nn.Identity()
self.backbone_from = 'torchvision'
elif backbone['type'].startswith('timm.'):
try:
import timm
except (ImportError, ModuleNotFoundError):
raise ImportError('Please install timm to use this '
'backbone.')
backbone_type = backbone.pop('type')[5:]
# disable the classifier
backbone['num_classes'] = 0
self.backbone = timm.create_model(backbone_type, **backbone)
self.backbone_from = 'timm'
else:
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.cls_head = builder.build_head(cls_head) if cls_head else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# aux_info is the list of tensor names beyond 'imgs' and 'label' which
# will be used in train_step and val_step, data_batch should contain
# these tensors
self.aux_info = []
if train_cfg is not None and 'aux_info' in train_cfg:
self.aux_info = train_cfg['aux_info']
# max_testing_views should be int
self.max_testing_views = None
if test_cfg is not None and 'max_testing_views' in test_cfg:
self.max_testing_views = test_cfg['max_testing_views']
assert isinstance(self.max_testing_views, int)
if test_cfg is not None and 'feature_extraction' in test_cfg:
self.feature_extraction = test_cfg['feature_extraction']
else:
self.feature_extraction = False
# mini-batch blending, e.g. mixup, cutmix, etc.
self.blending = None
if train_cfg is not None and 'blending' in train_cfg:
from mmcv.utils import build_from_cfg
from mmaction.datasets.builder import BLENDINGS
self.blending = build_from_cfg(train_cfg['blending'], BLENDINGS)
self.init_weights()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the recognizer has a neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_cls_head(self):
"""bool: whether the recognizer has a cls_head"""
return hasattr(self, 'cls_head') and self.cls_head is not None
def init_weights(self):
"""Initialize the model network weights."""
if self.backbone_from in ['mmcls', 'mmaction2']:
self.backbone.init_weights()
elif self.backbone_from in ['torchvision', 'timm']:
warnings.warn('We do not initialize weights for backbones in '
f'{self.backbone_from}, since the weights for '
f'backbones in {self.backbone_from} are initialized'
'in their __init__ functions.')
else:
raise NotImplementedError('Unsupported backbone source '
f'{self.backbone_from}!')
if self.with_cls_head:
self.cls_head.init_weights()
if self.with_neck:
self.neck.init_weights()
@auto_fp16()
def extract_feat(self, imgs):
"""Extract features through a backbone.
Args:
imgs (torch.Tensor): The input images.
Returns:
torch.tensor: The extracted features.
"""
if (hasattr(self.backbone, 'features')
and self.backbone_from == 'torchvision'):
x = self.backbone.features(imgs)
elif self.backbone_from == 'timm':
x = self.backbone.forward_features(imgs)
else:
x = self.backbone(imgs)
return x
def average_clip(self, cls_score, num_segs=1):
"""Averaging class score over multiple clips.
Using different averaging types ('score' or 'prob' or None,
which defined in test_cfg) to computed the final averaged
class score. Only called in test mode.
Args:
cls_score (torch.Tensor): Class score to be averaged.
num_segs (int): Number of clips for each input sample.
Returns:
torch.Tensor: Averaged class score.
"""
if 'average_clips' not in self.test_cfg.keys():
raise KeyError('"average_clips" must defined in test_cfg\'s keys')
average_clips = self.test_cfg['average_clips']
if average_clips not in ['score', 'prob', None]:
raise ValueError(f'{average_clips} is not supported. '
f'Currently supported ones are '
f'["score", "prob", None]')
if average_clips is None:
return cls_score
batch_size = cls_score.shape[0]
cls_score = cls_score.view(batch_size // num_segs, num_segs, -1)
if average_clips == 'prob':
cls_score = F.softmax(cls_score, dim=2).mean(dim=1)
elif average_clips == 'score':
cls_score = cls_score.mean(dim=1)
return cls_score
@abstractmethod
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
@abstractmethod
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
@abstractmethod
def forward_gradcam(self, imgs):
"""Defines the computation performed at every all when using gradcam
utils."""
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward(self, imgs, label=None, return_loss=True, **kwargs):
"""Define the computation performed at every call."""
if kwargs.get('gradcam', False):
del kwargs['gradcam']
return self.forward_gradcam(imgs, **kwargs)
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
if self.blending is not None:
imgs, label = self.blending(imgs, label)
return self.forward_train(imgs, label, **kwargs)
return self.forward_test(imgs, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
assert item in data_batch
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
imgs = data_batch['imgs']
label = data_batch['label']
aux_info = {}
for item in self.aux_info:
aux_info[item] = data_batch[item]
losses = self(imgs, label, return_loss=True, **aux_info)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| 12,392
| 36.554545
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/recognizers/recognizer3d.py
|
import torch
from torch import nn
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
def copyParams(module_src, module_dest):
params_src = module_src.named_parameters()
params_dest = module_dest.named_parameters()
dict_dest = dict(params_dest)
for name, param in params_src:
dict_dest[name].data.copy_(param.data)
@RECOGNIZERS.register_module()
class Recognizer3D(BaseRecognizer):
"""3D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
assert self.with_cls_head
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
losses = dict()
x = self.extract_feat(imgs)
if self.with_neck:
x, loss_aux = self.neck(x, labels.squeeze())
losses.update(loss_aux)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
num_segs = imgs.shape[1]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
if self.max_testing_views is not None:
total_views = imgs.shape[0]
assert num_segs == total_views, (
'max_testing_views is only compatible '
'with batch_size == 1')
view_ptr = 0
feats = []
while view_ptr < total_views:
batch_imgs = imgs[view_ptr:view_ptr + self.max_testing_views]
x = self.extract_feat(batch_imgs)
if self.with_neck:
x, _ = self.neck(x)
feats.append(x)
view_ptr += self.max_testing_views
# should consider the case that feat is a tuple
if isinstance(feats[0], tuple):
len_tuple = len(feats[0])
feat = [
torch.cat([x[i] for x in feats]) for i in range(len_tuple)
]
feat = tuple(feat)
else:
feat = torch.cat(feats)
else:
feat = self.extract_feat(imgs)
if self.with_neck:
feat, _ = self.neck(feat)
if self.feature_extraction:
# perform spatio-temporal pooling
avg_pool = nn.AdaptiveAvgPool3d(1)
if isinstance(feat, tuple):
feat = [avg_pool(x) for x in feat]
# concat them
feat = torch.cat(feat, axis=1)
else:
feat = avg_pool(feat)
# squeeze dimensions
feat = feat.reshape((batches, num_segs, -1))
# temporal average pooling
feat = feat.mean(axis=1)
return feat
# should have cls_head if not extracting features
assert self.with_cls_head
cls_score = self.cls_head(feat)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
assert self.with_cls_head
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
if self.with_neck:
x, _ = self.neck(x)
outs = self.cls_head(x)
if softmax:
outs = nn.functional.softmax(outs)
return (outs, )
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
assert self.with_cls_head
return self._do_test(imgs)
| 4,166
| 30.330827
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/recognizers/recognizer2d.py
|
import torch
from torch import nn
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class Recognizer2D(BaseRecognizer):
"""2D recognizer model framework."""
def forward_train(self, imgs, labels, **kwargs):
"""Defines the computation performed at every call when training."""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
losses = dict()
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, loss_aux = self.neck(x, labels.squeeze())
x = x.squeeze(2)
num_segs = 1
losses.update(loss_aux)
cls_score = self.cls_head(x, num_segs)
gt_labels = labels.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs)
losses.update(loss_cls)
return losses
def _do_test(self, imgs):
"""Defines the computation performed at every call when evaluation,
testing and gradcam."""
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.backbone_from in ['torchvision', 'timm']:
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
# apply adaptive avg pooling
x = nn.AdaptiveAvgPool2d(1)(x)
x = x.reshape((x.shape[0], -1))
x = x.reshape(x.shape + (1, 1))
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
if self.feature_extraction:
# perform spatial pooling
avg_pool = nn.AdaptiveAvgPool2d(1)
x = avg_pool(x)
# squeeze dimensions
x = x.reshape((batches, num_segs, -1))
# temporal average pooling
x = x.mean(axis=1)
return x
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
# should have cls_head if not extracting features
cls_score = self.cls_head(x, num_segs)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def _do_fcn_test(self, imgs):
# [N, num_crops * num_segs, C, H, W] ->
# [N * num_crops * num_segs, C, H, W]
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = self.test_cfg.get('num_segs', self.backbone.num_segments)
if self.test_cfg.get('flip', False):
imgs = torch.flip(imgs, [-1])
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
else:
x = x.reshape((-1, num_segs) +
x.shape[1:]).transpose(1, 2).contiguous()
# When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes]
# When using `TSMHead`, shape is [batch_size * num_crops, num_classes]
# `num_crops` is calculated by:
# 1) `twice_sample` in `SampleFrames`
# 2) `num_sample_positions` in `DenseSampleFrames`
# 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline`
# 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1`
cls_score = self.cls_head(x, fcn_test=True)
assert cls_score.size()[0] % batches == 0
# calculate num_crops automatically
cls_score = self.average_clip(cls_score,
cls_score.size()[0] // batches)
return cls_score
def forward_test(self, imgs):
"""Defines the computation performed at every call when evaluation and
testing."""
if self.test_cfg.get('fcn_test', False):
# If specified, spatially fully-convolutional testing is performed
assert not self.feature_extraction
assert self.with_cls_head
return self._do_fcn_test(imgs).cpu().numpy()
return self._do_test(imgs).cpu().numpy()
def forward_dummy(self, imgs, softmax=False):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
assert self.with_cls_head
batches = imgs.shape[0]
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
num_segs = imgs.shape[0] // batches
x = self.extract_feat(imgs)
if self.with_neck:
x = [
each.reshape((-1, num_segs) +
each.shape[1:]).transpose(1, 2).contiguous()
for each in x
]
x, _ = self.neck(x)
x = x.squeeze(2)
num_segs = 1
outs = self.cls_head(x, num_segs)
if softmax:
outs = nn.functional.softmax(outs)
return (outs, )
def forward_gradcam(self, imgs):
"""Defines the computation performed at every call when using gradcam
utils."""
assert self.with_cls_head
return self._do_test(imgs)
| 6,572
| 34.33871
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/recognizers/__init__.py
|
from .audio_recognizer import AudioRecognizer
from .base import BaseRecognizer
from .recognizer2d import Recognizer2D
from .recognizer3d import Recognizer3D
__all__ = ['BaseRecognizer', 'Recognizer2D', 'Recognizer3D', 'AudioRecognizer']
| 238
| 33.142857
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/recognizers/audio_recognizer.py
|
from ..builder import RECOGNIZERS
from .base import BaseRecognizer
@RECOGNIZERS.register_module()
class AudioRecognizer(BaseRecognizer):
"""Audio recognizer model framework."""
def forward(self, audios, label=None, return_loss=True):
"""Define the computation performed at every call."""
if return_loss:
if label is None:
raise ValueError('Label should not be None.')
return self.forward_train(audios, label)
return self.forward_test(audios)
def forward_train(self, audios, labels):
"""Defines the computation performed at every call when training."""
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
gt_labels = labels.squeeze()
loss = self.cls_head.loss(cls_score, gt_labels)
return loss
def forward_test(self, audios):
"""Defines the computation performed at every call when evaluation and
testing."""
num_segs = audios.shape[1]
audios = audios.reshape((-1, ) + audios.shape[2:])
x = self.extract_feat(audios)
cls_score = self.cls_head(x)
cls_score = self.average_clip(cls_score, num_segs)
return cls_score.cpu().numpy()
def forward_gradcam(self, audios):
raise NotImplementedError
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data_batch (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
audios = data_batch['audios']
label = data_batch['label']
losses = self(audios, label)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(next(iter(data_batch.values()))))
return outputs
| 3,632
| 34.617647
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/common/tam.py
|
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, kaiming_init, normal_init
class TAM(nn.Module):
"""Temporal Adaptive Module(TAM) for TANet.
This module is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
Args:
in_channels (int): Channel num of input features.
num_segments (int): Number of frame segments.
alpha (int): ```alpha``` in the paper and is the ratio of the
intermediate channel number to the initial channel number in the
global branch. Default: 2.
adaptive_kernel_size (int): ```K``` in the paper and is the size of the
adaptive kernel size in the global branch. Default: 3.
beta (int): ```beta``` in the paper and is set to control the model
complexity in the local branch. Default: 4.
conv1d_kernel_size (int): Size of the convolution kernel of Conv1d in
the local branch. Default: 3.
adaptive_convolution_stride (int): The first dimension of strides in
the adaptive convolution of ```Temporal Adaptive Aggregation```.
Default: 1.
adaptive_convolution_padding (int): The first dimension of paddings in
the adaptive convolution of ```Temporal Adaptive Aggregation```.
Default: 1.
init_std (float): Std value for initiation of `nn.Linear`. Default:
0.001.
"""
def __init__(self,
in_channels,
num_segments,
alpha=2,
adaptive_kernel_size=3,
beta=4,
conv1d_kernel_size=3,
adaptive_convolution_stride=1,
adaptive_convolution_padding=1,
init_std=0.001):
super().__init__()
assert beta > 0 and alpha > 0
self.in_channels = in_channels
self.num_segments = num_segments
self.alpha = alpha
self.adaptive_kernel_size = adaptive_kernel_size
self.beta = beta
self.conv1d_kernel_size = conv1d_kernel_size
self.adaptive_convolution_stride = adaptive_convolution_stride
self.adaptive_convolution_padding = adaptive_convolution_padding
self.init_std = init_std
self.G = nn.Sequential(
nn.Linear(num_segments, num_segments * alpha, bias=False),
nn.BatchNorm1d(num_segments * alpha), nn.ReLU(inplace=True),
nn.Linear(num_segments * alpha, adaptive_kernel_size, bias=False),
nn.Softmax(-1))
self.L = nn.Sequential(
nn.Conv1d(
in_channels,
in_channels // beta,
conv1d_kernel_size,
stride=1,
padding=conv1d_kernel_size // 2,
bias=False), nn.BatchNorm1d(in_channels // beta),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels // beta, in_channels, 1, bias=False),
nn.Sigmoid())
self.init_weights()
def init_weights(self):
"""Initiate the parameters from scratch."""
for m in self.modules():
if isinstance(m, nn.Conv1d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm1d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# [n, c, h, w]
n, c, h, w = x.size()
num_segments = self.num_segments
num_batches = n // num_segments
assert c == self.in_channels
# [num_batches, c, num_segments, h, w]
x = x.view(num_batches, num_segments, c, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
# [num_batches * c, num_segments, 1, 1]
theta_out = F.adaptive_avg_pool2d(
x.view(-1, num_segments, h, w), (1, 1))
# [num_batches * c, 1, adaptive_kernel_size, 1]
conv_kernel = self.G(theta_out.view(-1, num_segments)).view(
num_batches * c, 1, -1, 1)
# [num_batches, c, num_segments, 1, 1]
local_activation = self.L(theta_out.view(-1, c, num_segments)).view(
num_batches, c, num_segments, 1, 1)
# [num_batches, c, num_segments, h, w]
new_x = x * local_activation
# [1, num_batches * c, num_segments, h * w]
y = F.conv2d(
new_x.view(1, num_batches * c, num_segments, h * w),
conv_kernel,
bias=None,
stride=(self.adaptive_convolution_stride, 1),
padding=(self.adaptive_convolution_padding, 0),
groups=num_batches * c)
# [n, c, h, w]
y = y.view(num_batches, c, num_segments, h, w)
y = y.permute(0, 2, 1, 3, 4).contiguous().view(n, c, h, w)
return y
| 5,051
| 36.422222
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/common/conv_audio.py
|
import torch
import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, ConvModule, constant_init, kaiming_init
from torch.nn.modules.utils import _pair
@CONV_LAYERS.register_module()
class ConvAudio(nn.Module):
"""Conv2d module for AudioResNet backbone.
<https://arxiv.org/abs/2001.08740>`_.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int | tuple[int]): Same as nn.Conv2d.
op (string): Operation to merge the output of freq
and time feature map. Choices are 'sum' and 'concat'.
Default: 'concat'.
stride (int | tuple[int]): Same as nn.Conv2d.
padding (int | tuple[int]): Same as nn.Conv2d.
dilation (int | tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
op='concat',
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert op in ['concat', 'sum']
self.op = op
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.output_padding = (0, 0)
self.transposed = False
self.conv_1 = ConvModule(
in_channels,
out_channels,
kernel_size=(kernel_size[0], 1),
stride=stride,
padding=(kernel_size[0] // 2, 0),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.conv_2 = ConvModule(
in_channels,
out_channels,
kernel_size=(1, kernel_size[1]),
stride=stride,
padding=(0, kernel_size[1] // 2),
bias=bias,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'))
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x_1 = self.conv_1(x)
x_2 = self.conv_2(x)
if self.op == 'concat':
out = torch.cat([x_1, x_2], 1)
else:
out = x_1 + x_2
return out
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_1.conv)
kaiming_init(self.conv_2.conv)
constant_init(self.conv_1.bn, 1, bias=0)
constant_init(self.conv_2.bn, 1, bias=0)
| 3,225
| 29.72381
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/common/lfb.py
|
import io
import os.path as osp
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
try:
import lmdb
lmdb_imported = True
except (ImportError, ModuleNotFoundError):
lmdb_imported = False
class LFB:
"""Long-Term Feature Bank (LFB).
LFB is proposed in `Long-Term Feature Banks for Detailed Video
Understanding <https://arxiv.org/abs/1812.05038>`_
The ROI features of videos are stored in the feature bank. The feature bank
was generated by inferring with a lfb infer config.
Formally, LFB is a Dict whose keys are video IDs and its values are also
Dicts whose keys are timestamps in seconds. Example of LFB:
.. code-block:: Python
{
'0f39OWEqJ24': {
901: tensor([[ 1.2760, 1.1965, ..., 0.0061, -0.0639],
[-0.6320, 0.3794, ..., -1.2768, 0.5684],
[ 0.2535, 1.0049, ..., 0.4906, 1.2555],
[-0.5838, 0.8549, ..., -2.1736, 0.4162]]),
...
1705: tensor([[-1.0169, -1.1293, ..., 0.6793, -2.0540],
[ 1.2436, -0.4555, ..., 0.2281, -0.8219],
[ 0.2815, -0.0547, ..., -0.4199, 0.5157]]),
...
},
'xmqSaQPzL1E': {
...
},
...
}
Args:
lfb_prefix_path (str): The storage path of lfb.
max_num_sampled_feat (int): The max number of sampled features.
Default: 5.
window_size (int): Window size of sampling long term feature.
Default: 60.
lfb_channels (int): Number of the channels of the features stored
in LFB. Default: 2048.
dataset_modes (tuple[str] | str): Load LFB of datasets with different
modes, such as training, validation, testing datasets. If you don't
do cross validation during training, just load the training dataset
i.e. setting `dataset_modes = ('train')`.
Default: ('train', 'val').
device (str): Where to load lfb. Choices are 'gpu', 'cpu' and 'lmdb'.
A 1.65GB half-precision ava lfb (including training and validation)
occupies about 2GB GPU memory. Default: 'gpu'.
lmdb_map_size (int): Map size of lmdb. Default: 4e9.
construct_lmdb (bool): Whether to construct lmdb. If you have
constructed lmdb of lfb, you can set to False to skip the
construction. Default: True.
"""
def __init__(self,
lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=2048,
dataset_modes=('train', 'val'),
device='gpu',
lmdb_map_size=4e9,
construct_lmdb=True):
if not osp.exists(lfb_prefix_path):
raise ValueError(
f'lfb prefix path {lfb_prefix_path} does not exist!')
self.lfb_prefix_path = lfb_prefix_path
self.max_num_sampled_feat = max_num_sampled_feat
self.window_size = window_size
self.lfb_channels = lfb_channels
if not isinstance(dataset_modes, tuple):
assert isinstance(dataset_modes, str)
dataset_modes = (dataset_modes, )
self.dataset_modes = dataset_modes
self.device = device
rank, world_size = get_dist_info()
# Loading LFB
if self.device == 'gpu':
self.load_lfb(f'cuda:{rank}')
elif self.device == 'cpu':
if world_size > 1:
warnings.warn(
'If distributed training is used with multi-GPUs, lfb '
'will be loaded multiple times on RAM. In this case, '
"'lmdb' is recomended.", UserWarning)
self.load_lfb('cpu')
elif self.device == 'lmdb':
assert lmdb_imported, (
'Please install `lmdb` to load lfb on lmdb!')
self.lmdb_map_size = lmdb_map_size
self.construct_lmdb = construct_lmdb
self.lfb_lmdb_path = osp.normpath(
osp.join(self.lfb_prefix_path, 'lmdb'))
if rank == 0 and self.construct_lmdb:
print('Constructing LFB lmdb...')
self.load_lfb_on_lmdb()
# Synchronizes all processes to make sure lfb lmdb exist.
if world_size > 1:
dist.barrier()
self.lmdb_env = lmdb.open(self.lfb_lmdb_path, readonly=True)
else:
raise ValueError("Device must be 'gpu', 'cpu' or 'lmdb', ",
f'but get {self.device}.')
def load_lfb(self, map_location):
self.lfb = {}
for dataset_mode in self.dataset_modes:
lfb_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))
print(f'Loading LFB from {lfb_path}...')
self.lfb.update(torch.load(lfb_path, map_location=map_location))
print(f'LFB has been loaded on {map_location}.')
def load_lfb_on_lmdb(self):
lfb = {}
for dataset_mode in self.dataset_modes:
lfb_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))
lfb.update(torch.load(lfb_path, map_location='cpu'))
lmdb_env = lmdb.open(self.lfb_lmdb_path, map_size=self.lmdb_map_size)
for key, value in lfb.items():
txn = lmdb_env.begin(write=True)
buff = io.BytesIO()
torch.save(value, buff)
buff.seek(0)
txn.put(key.encode(), buff.read())
txn.commit()
buff.close()
print(f'LFB lmdb has been constructed on {self.lfb_lmdb_path}!')
def sample_long_term_features(self, video_id, timestamp):
if self.device == 'lmdb':
with self.lmdb_env.begin(write=False) as txn:
buf = txn.get(video_id.encode())
video_features = torch.load(io.BytesIO(buf))
else:
video_features = self.lfb[video_id]
# Sample long term features.
window_size, K = self.window_size, self.max_num_sampled_feat
start = timestamp - (window_size // 2)
lt_feats = torch.zeros(window_size * K, self.lfb_channels)
for idx, sec in enumerate(range(start, start + window_size)):
if sec in video_features:
# `num_feat` is the number of roi features in this second.
num_feat = len(video_features[sec])
num_feat_sampled = min(num_feat, K)
# Sample some roi features randomly.
random_lfb_indices = np.random.choice(
range(num_feat), num_feat_sampled, replace=False)
for k, rand_idx in enumerate(random_lfb_indices):
lt_feats[idx * K + k] = video_features[sec][rand_idx]
# [window_size * max_num_sampled_feat, lfb_channels]
return lt_feats
def __getitem__(self, img_key):
"""Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb`
is a instance of class LFB."""
video_id, timestamp = img_key.split(',')
return self.sample_long_term_features(video_id, int(timestamp))
def __len__(self):
"""The number of videos whose ROI features are stored in LFB."""
return len(self.lfb)
| 7,493
| 38.650794
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/common/conv2plus1d.py
|
import torch.nn as nn
from mmcv.cnn import CONV_LAYERS, build_norm_layer, constant_init, kaiming_init
from torch.nn.modules.utils import _triple
@CONV_LAYERS.register_module()
class Conv2plus1d(nn.Module):
"""(2+1)d Conv module for R(2+1)d backbone.
https://arxiv.org/pdf/1711.11248.pdf.
Args:
in_channels (int): Same as nn.Conv3d.
out_channels (int): Same as nn.Conv3d.
kernel_size (int | tuple[int]): Same as nn.Conv3d.
stride (int | tuple[int]): Same as nn.Conv3d.
padding (int | tuple[int]): Same as nn.Conv3d.
dilation (int | tuple[int]): Same as nn.Conv3d.
groups (int): Same as nn.Conv3d.
bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
norm_cfg=dict(type='BN3d')):
super().__init__()
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
assert len(kernel_size) == len(stride) == len(padding) == 3
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.norm_cfg = norm_cfg
self.output_padding = (0, 0, 0)
self.transposed = False
# The middle-plane is calculated according to:
# M_i = \floor{\frac{t * d^2 N_i-1 * N_i}
# {d^2 * N_i-1 + t * N_i}}
# where d, t are spatial and temporal kernel, and
# N_i, N_i-1 are planes
# and inplanes. https://arxiv.org/pdf/1711.11248.pdf
mid_channels = 3 * (
in_channels * out_channels * kernel_size[1] * kernel_size[2])
mid_channels /= (
in_channels * kernel_size[1] * kernel_size[2] + 3 * out_channels)
mid_channels = int(mid_channels)
self.conv_s = nn.Conv3d(
in_channels,
mid_channels,
kernel_size=(1, kernel_size[1], kernel_size[2]),
stride=(1, stride[1], stride[2]),
padding=(0, padding[1], padding[2]),
bias=bias)
_, self.bn_s = build_norm_layer(self.norm_cfg, mid_channels)
self.relu = nn.ReLU(inplace=True)
self.conv_t = nn.Conv3d(
mid_channels,
out_channels,
kernel_size=(kernel_size[0], 1, 1),
stride=(stride[0], 1, 1),
padding=(padding[0], 0, 0),
bias=bias)
self.init_weights()
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.conv_s(x)
x = self.bn_s(x)
x = self.relu(x)
x = self.conv_t(x)
return x
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.conv_s)
kaiming_init(self.conv_t)
constant_init(self.bn_s, 1, bias=0)
| 3,453
| 31.895238
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/common/__init__.py
|
from .conv2plus1d import Conv2plus1d
from .conv_audio import ConvAudio
from .lfb import LFB
from .tam import TAM
__all__ = ['Conv2plus1d', 'ConvAudio', 'LFB', 'TAM']
| 167
| 23
| 52
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/necks/tpn.py
|
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init
from ..builder import NECKS, build_loss
class Identity(nn.Module):
"""Identity mapping."""
def forward(self, x):
return x
class DownSample(nn.Module):
"""DownSample modules.
It uses convolution and maxpooling to downsample the input feature,
and specifies downsample position to determine `pool-conv` or `conv-pool`.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output feature.
kernel_size (int | tuple[int]): Same as :class:`ConvModule`.
Default: (3, 1, 1).
stride (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 1, 1).
padding (int | tuple[int]): Same as :class:`ConvModule`.
Default: (1, 0, 0).
groups (int): Same as :class:`ConvModule`. Default: 1.
bias (bool | str): Same as :class:`ConvModule`. Default: False.
conv_cfg (dict | None): Same as :class:`ConvModule`.
Default: dict(type='Conv3d').
norm_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
act_cfg (dict | None): Same as :class:`ConvModule`. Default: None.
downsample_position (str): Type of downsample position. Options are
'before' and 'after'. Default: 'after'.
downsample_scale (int | tuple[int]): downsample scale for maxpooling.
It will be used for kernel size and stride of maxpooling.
Default: (1, 2, 2).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=(3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
groups=1,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=None,
downsample_position='after',
downsample_scale=(1, 2, 2)):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert downsample_position in ['before', 'after']
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(
downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x
class LevelFusion(nn.Module):
"""Level Fusion module.
This module is used to aggregate the hierarchical features dynamic in
visual tempos and consistent in spatial semantics. The top/bottom features
for top-down/bottom-up flow would be combined to achieve two additional
options, namely 'Cascade Flow' or 'Parallel Flow'. While applying a
bottom-up flow after a top-down flow will lead to the cascade flow,
applying them simultaneously will result in the parallel flow.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
mid_channels (tuple[int]): Channel numbers of middle features tuple.
out_channels (int): Channel numbers of output features.
downsample_scales (tuple[int | tuple[int]]): downsample scales for
each :class:`DownSample` module. Default: ((1, 1, 1), (1, 1, 1)).
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
downsample_scales=((1, 1, 1), (1, 1, 1))):
super().__init__()
num_stages = len(in_channels)
self.downsamples = nn.ModuleList()
for i in range(num_stages):
downsample = DownSample(
in_channels[i],
mid_channels[i],
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
bias=False,
padding=(0, 0, 0),
groups=32,
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
downsample_position='before',
downsample_scale=downsample_scales[i])
self.downsamples.append(downsample)
self.fusion_conv = ConvModule(
sum(mid_channels),
out_channels,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True))
def forward(self, x):
out = [self.downsamples[i](feature) for i, feature in enumerate(x)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out
class SpatialModulation(nn.Module):
"""Spatial Semantic Modulation.
This module is used to align spatial semantics of features in the
multi-depth pyramid. For each but the top-level feature, a stack
of convolutions with level-specific stride are applied to it, matching
its spatial shape and receptive field with the top one.
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel numbers of output features tuple.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.spatial_modulation = nn.ModuleList()
for channel in in_channels:
downsample_scale = out_channels // channel
downsample_factor = int(np.log2(downsample_scale))
op = nn.ModuleList()
if downsample_factor < 1:
op = Identity()
else:
for factor in range(downsample_factor):
in_factor = 2**factor
out_factor = 2**(factor + 1)
op.append(
ConvModule(
channel * in_factor,
channel * out_factor, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)))
self.spatial_modulation.append(op)
def forward(self, x):
out = []
for i, _ in enumerate(x):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = x[i]
for op in self.spatial_modulation[i]:
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](x[i]))
return out
class AuxHead(nn.Module):
"""Auxiliary Head.
This auxiliary head is appended to receive stronger supervision,
leading to enhanced semantics.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
loss_weight (float): weight of loss for the auxiliary head.
Default: 0.5.
loss_cls (dict): loss_cls (dict): Config for building loss.
Default: ``dict(type='CrossEntropyLoss')``.
"""
def __init__(self,
in_channels,
out_channels,
loss_weight=0.5,
loss_cls=dict(type='CrossEntropyLoss')):
super().__init__()
self.conv = ConvModule(
in_channels,
in_channels * 2, (1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc = nn.Linear(in_channels * 2, out_channels)
self.loss_cls = build_loss(loss_cls)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
normal_init(m, std=0.01)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
def forward(self, x, target=None):
losses = dict()
if target is None:
return losses
x = self.conv(x)
x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(x)
x = self.fc(x)
if target.shape == torch.Size([]):
target = target.unsqueeze(0)
losses['loss_aux'] = self.loss_weight * self.loss_cls(x, target)
return losses
class TemporalModulation(nn.Module):
"""Temporal Rate Modulation.
The module is used to equip TPN with a similar flexibility for temporal
tempo modulation as in the input-level frame pyramid.
Args:
in_channels (int): Channel number of input features.
out_channels (int): Channel number of output features.
downsample_scale (int): Downsample scale for maxpooling. Default: 8.
"""
def __init__(self, in_channels, out_channels, downsample_scale=8):
super().__init__()
self.conv = ConvModule(
in_channels,
out_channels, (3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
bias=False,
groups=32,
conv_cfg=dict(type='Conv3d'),
act_cfg=None)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1),
(downsample_scale, 1, 1), (0, 0, 0),
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
@NECKS.register_module()
class TPN(nn.Module):
"""TPN neck.
This module is proposed in `Temporal Pyramid Network for Action Recognition
<https://arxiv.org/pdf/2004.03548.pdf>`_
Args:
in_channels (tuple[int]): Channel numbers of input features tuple.
out_channels (int): Channel number of output feature.
spatial_modulation_cfg (dict | None): Config for spatial modulation
layers. Required keys are `in_channels` and `out_channels`.
Default: None.
temporal_modulation_cfg (dict | None): Config for temporal modulation
layers. Default: None.
upsample_cfg (dict | None): Config for upsample layers. The keys are
same as that in :class:``nn.Upsample``. Default: None.
downsample_cfg (dict | None): Config for downsample layers.
Default: None.
level_fusion_cfg (dict | None): Config for level fusion layers.
Required keys are 'in_channels', 'mid_channels', 'out_channels'.
Default: None.
aux_head_cfg (dict | None): Config for aux head layers.
Required keys are 'out_channels'. Default: None.
flow_type (str): Flow type to combine the features. Options are
'cascade' and 'parallel'. Default: 'cascade'.
"""
def __init__(self,
in_channels,
out_channels,
spatial_modulation_cfg=None,
temporal_modulation_cfg=None,
upsample_cfg=None,
downsample_cfg=None,
level_fusion_cfg=None,
aux_head_cfg=None,
flow_type='cascade'):
super().__init__()
assert isinstance(in_channels, tuple)
assert isinstance(out_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_tpn_stages = len(in_channels)
assert spatial_modulation_cfg is None or isinstance(
spatial_modulation_cfg, dict)
assert temporal_modulation_cfg is None or isinstance(
temporal_modulation_cfg, dict)
assert upsample_cfg is None or isinstance(upsample_cfg, dict)
assert downsample_cfg is None or isinstance(downsample_cfg, dict)
assert aux_head_cfg is None or isinstance(aux_head_cfg, dict)
assert level_fusion_cfg is None or isinstance(level_fusion_cfg, dict)
if flow_type not in ['cascade', 'parallel']:
raise ValueError(
f"flow type in TPN should be 'cascade' or 'parallel', "
f'but got {flow_type} instead.')
self.flow_type = flow_type
self.temporal_modulation_ops = nn.ModuleList()
self.upsample_ops = nn.ModuleList()
self.downsample_ops = nn.ModuleList()
self.level_fusion_1 = LevelFusion(**level_fusion_cfg)
self.spatial_modulation = SpatialModulation(**spatial_modulation_cfg)
for i in range(self.num_tpn_stages):
if temporal_modulation_cfg is not None:
downsample_scale = temporal_modulation_cfg[
'downsample_scales'][i]
temporal_modulation = TemporalModulation(
in_channels[-1], out_channels, downsample_scale)
self.temporal_modulation_ops.append(temporal_modulation)
if i < self.num_tpn_stages - 1:
if upsample_cfg is not None:
upsample = nn.Upsample(**upsample_cfg)
self.upsample_ops.append(upsample)
if downsample_cfg is not None:
downsample = DownSample(out_channels, out_channels,
**downsample_cfg)
self.downsample_ops.append(downsample)
out_dims = level_fusion_cfg['out_channels']
# two pyramids
self.level_fusion_2 = LevelFusion(**level_fusion_cfg)
self.pyramid_fusion = ConvModule(
out_dims * 2,
2048,
1,
stride=1,
padding=0,
bias=False,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True))
if aux_head_cfg is not None:
self.aux_head = AuxHead(self.in_channels[-2], **aux_head_cfg)
else:
self.aux_head = None
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
if self.aux_head is not None:
self.aux_head.init_weights()
def forward(self, x, target=None):
loss_aux = dict()
# Auxiliary loss
if self.aux_head is not None:
loss_aux = self.aux_head(x[-2], target)
# Spatial Modulation
spatial_modulation_outs = self.spatial_modulation(x)
# Temporal Modulation
temporal_modulation_outs = []
for i, temporal_modulation in enumerate(self.temporal_modulation_ops):
temporal_modulation_outs.append(
temporal_modulation(spatial_modulation_outs[i]))
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.upsample_ops) != 0:
for i in range(self.num_tpn_stages - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i])
# Get top-down outs
top_down_outs = self.level_fusion_1(outs)
# Build bottom-up flow using downsample operation
if self.flow_type == 'parallel':
outs = [out.clone() for out in temporal_modulation_outs]
if len(self.downsample_ops) != 0:
for i in range(self.num_tpn_stages - 1):
outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i])
# Get bottom-up outs
botton_up_outs = self.level_fusion_2(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion(
torch.cat([top_down_outs, botton_up_outs], 1))
return outs, loss_aux
| 16,411
| 35.552339
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/necks/__init__.py
|
from .tpn import TPN
__all__ = ['TPN']
| 40
| 9.25
| 20
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/roi_extractors/__init__.py
|
from .single_straight3d import SingleRoIExtractor3D
__all__ = ['SingleRoIExtractor3D']
| 88
| 21.25
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/roi_extractors/single_straight3d.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.utils import import_module_error_class
try:
from mmcv.ops import RoIAlign, RoIPool
except (ImportError, ModuleNotFoundError):
@import_module_error_class('mmcv-full')
class RoIAlign(nn.Module):
pass
@import_module_error_class('mmcv-full')
class RoIPool(nn.Module):
pass
try:
from mmdet.models import ROI_EXTRACTORS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class SingleRoIExtractor3D(nn.Module):
"""Extract RoI features from a single level feature map.
Args:
roi_layer_type (str): Specify the RoI layer type. Default: 'RoIAlign'.
featmap_stride (int): Strides of input feature maps. Default: 16.
output_size (int | tuple): Size or (Height, Width). Default: 16.
sampling_ratio (int): number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
Default: 0.
pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
Default: 'avg'.
aligned (bool): if False, use the legacy implementation in
MMDetection. If True, align the results more perfectly.
Default: True.
with_temporal_pool (bool): if True, avgpool the temporal dim.
Default: True.
with_global (bool): if True, concatenate the RoI feature with global
feature. Default: False.
Note that sampling_ratio, pool_mode, aligned only apply when roi_layer_type
is set as RoIAlign.
"""
def __init__(self,
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=16,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
temporal_pool_mode='avg',
with_global=False):
super().__init__()
self.roi_layer_type = roi_layer_type
assert self.roi_layer_type in ['RoIPool', 'RoIAlign']
self.featmap_stride = featmap_stride
self.spatial_scale = 1. / self.featmap_stride
self.output_size = output_size
self.sampling_ratio = sampling_ratio
self.pool_mode = pool_mode
self.aligned = aligned
self.with_temporal_pool = with_temporal_pool
self.temporal_pool_mode = temporal_pool_mode
self.with_global = with_global
if self.roi_layer_type == 'RoIPool':
self.roi_layer = RoIPool(self.output_size, self.spatial_scale)
else:
self.roi_layer = RoIAlign(
self.output_size,
self.spatial_scale,
sampling_ratio=self.sampling_ratio,
pool_mode=self.pool_mode,
aligned=self.aligned)
self.global_pool = nn.AdaptiveAvgPool2d(self.output_size)
def init_weights(self):
pass
# The shape of feat is N, C, T, H, W
def forward(self, feat, rois):
if not isinstance(feat, tuple):
feat = (feat, )
if len(feat) >= 2:
maxT = max([x.shape[2] for x in feat])
max_shape = (maxT, ) + feat[0].shape[3:]
# resize each feat to the largest shape (w. nearest)
feat = [F.interpolate(x, max_shape).contiguous() for x in feat]
if self.with_temporal_pool:
if self.temporal_pool_mode == 'avg':
feat = [torch.mean(x, 2, keepdim=True) for x in feat]
elif self.temporal_pool_mode == 'max':
feat = [torch.max(x, 2, keepdim=True)[0] for x in feat]
else:
raise NotImplementedError
feat = torch.cat(feat, axis=1).contiguous()
roi_feats = []
for t in range(feat.size(2)):
frame_feat = feat[:, :, t].contiguous()
roi_feat = self.roi_layer(frame_feat, rois)
if self.with_global:
global_feat = self.global_pool(frame_feat.contiguous())
inds = rois[:, 0].type(torch.int64)
global_feat = global_feat[inds]
roi_feat = torch.cat([roi_feat, global_feat], dim=1)
roi_feat = roi_feat.contiguous()
roi_feats.append(roi_feat)
return torch.stack(roi_feats, dim=2), feat
if mmdet_imported:
ROI_EXTRACTORS.register_module()(SingleRoIExtractor3D)
| 4,474
| 33.689922
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/base.py
|
from abc import ABCMeta, abstractmethod
import torch.nn as nn
class BaseWeightedLoss(nn.Module, metaclass=ABCMeta):
"""Base class for loss.
All subclass should overwrite the ``_forward()`` method which returns the
normal loss without loss weights.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__()
self.loss_weight = loss_weight
@abstractmethod
def _forward(self, *args, **kwargs):
pass
def forward(self, *args, **kwargs):
"""Defines the computation performed at every call.
Args:
*args: The positional arguments for the corresponding
loss.
**kwargs: The keyword arguments for the corresponding
loss.
Returns:
torch.Tensor: The calculated loss.
"""
ret = self._forward(*args, **kwargs)
if isinstance(ret, dict):
for k in ret:
if 'loss' in k:
ret[k] *= self.loss_weight
else:
ret *= self.loss_weight
return ret
| 1,181
| 25.266667
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/margin_loss.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
def batched_index_select(input, dim, index):
for i in range(1, len(input.shape)):
if i != dim:
index = index.unsqueeze(i)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
@LOSSES.register_module()
class MarginLoss(nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, margin=0.5, alpha1=2, alpha2=0.5, loss_weight=1.):
super().__init__()
self.alpha1 = alpha1
self.alpha2 = alpha2
self.margin = margin
def forward(self, cls_score, labels, bottom_outputs):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
base_loss = F.cross_entropy(cls_score, labels)
if bottom_outputs is None:
loss = base_loss
else:
labels = labels.unsqueeze(1)
outputs = F.softmax(cls_score, dim=-1)
bottom_outputs = F.softmax(bottom_outputs, dim=-1)
topk_prob = batched_index_select(outputs, dim=1, index=labels)
bottom_prob = batched_index_select(bottom_outputs, dim=1, index=labels)
margin_loss = bottom_prob - topk_prob + self.margin
margin_loss = F.relu(margin_loss).mean()
loss = base_loss * self.alpha1 + margin_loss * self.alpha2
return loss
| 2,004
| 34.175439
| 91
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/distill_loss.py
|
import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
@LOSSES.register_module()
class DistillationLoss(nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, distill_type='hard', alpha=0.5, tau=1.0, loss_weight=0.):
super().__init__()
assert distill_type in ['none', 'soft', 'hard']
self.distillation_type = distill_type
self.alpha = alpha
self.tau = tau
def forward(self, cls_score, labels, teacher_outputs):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
if cls_score.size() == labels.size():
# calculate loss for soft label
lsm = F.log_softmax(cls_score, 1)
loss_cls = -(labels * lsm).sum(1)
base_loss = loss_cls.mean()
else:
# calculate loss for hard label
base_loss = F.cross_entropy(cls_score, labels)
# base_loss = F.cross_entropy(cls_score, labels)
if self.distillation_type == 'none':
return base_loss
if self.distillation_type == 'soft':
T = self.tau
distillation_loss = F.kl_div(
F.log_softmax(cls_score / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / cls_score.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(cls_score, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
| 2,144
| 34.75
| 91
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/ohem_hinge_loss.py
|
import torch
class OHEMHingeLoss(torch.autograd.Function):
"""This class is the core implementation for the completeness loss in
paper.
It compute class-wise hinge loss and performs online hard example mining
(OHEM).
"""
@staticmethod
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size):
"""Calculate OHEM hinge loss.
Args:
pred (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
is_positive (int): Set to 1 when proposals are positive and
set to -1 when proposals are incomplete.
ohem_ratio (float): Ratio of hard examples.
group_size (int): Number of proposals sampled per video.
Returns:
torch.Tensor: Returned class-wise hinge loss.
"""
num_samples = pred.size(0)
if num_samples != len(labels):
raise ValueError(f'Number of samples should be equal to that '
f'of labels, but got {num_samples} samples and '
f'{len(labels)} labels.')
losses = torch.zeros(num_samples, device=pred.device)
slopes = torch.zeros(num_samples, device=pred.device)
for i in range(num_samples):
losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1])
slopes[i] = -is_positive if losses[i] != 0 else 0
losses = losses.view(-1, group_size).contiguous()
sorted_losses, indices = torch.sort(losses, dim=1, descending=True)
keep_length = int(group_size * ohem_ratio)
loss = torch.zeros(1, device=pred.device)
for i in range(losses.size(0)):
loss += sorted_losses[i, :keep_length].sum()
ctx.loss_index = indices[:, :keep_length]
ctx.labels = labels
ctx.slopes = slopes
ctx.shape = pred.size()
ctx.group_size = group_size
ctx.num_groups = losses.size(0)
return loss
@staticmethod
def backward(ctx, grad_output):
labels = ctx.labels
slopes = ctx.slopes
grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device)
for group in range(ctx.num_groups):
for idx in ctx.loss_index[group]:
loc = idx + group * ctx.group_size
grad_in[loc, labels[loc] - 1] = (
slopes[loc] * grad_output.data[0])
return torch.autograd.Variable(grad_in), None, None, None, None
| 2,497
| 37.430769
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/bmn_loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .binary_logistic_regression_loss import binary_logistic_regression_loss
@LOSSES.register_module()
class BMNLoss(nn.Module):
"""BMN Loss.
From paper https://arxiv.org/abs/1907.09702,
code https://github.com/JJBOY/BMN-Boundary-Matching-Network.
It will calculate loss for BMN Model. This loss is a weighted sum of
1) temporal evaluation loss based on confidence score of start and
end positions.
2) proposal evaluation regression loss based on confidence scores of
candidate proposals.
3) proposal evaluation classification loss based on classification
results of candidate proposals.
"""
@staticmethod
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
@staticmethod
def pem_reg_loss(pred_score,
gt_iou_map,
mask,
high_temporal_iou_threshold=0.7,
low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evalutaion regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) &
(gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) &
(gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(
loss * torch.ones_like(weights)) / torch.sum(weights)
return loss
@staticmethod
def pem_cls_loss(pred_score,
gt_iou_map,
mask,
threshold=0.9,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evalutaion classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
def forward(self,
pred_bm,
pred_start,
pred_end,
gt_iou_map,
gt_start,
gt_end,
bm_mask,
weight_tem=1.0,
weight_pem_reg=10.0,
weight_pem_cls=1.0):
"""Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
"""
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end)
loss = (
weight_tem * tem_loss + weight_pem_reg * pem_reg_loss +
weight_pem_cls * pem_cls_loss)
return loss, tem_loss, pem_reg_loss, pem_cls_loss
| 7,173
| 38.635359
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/nll_loss.py
|
import torch.nn.functional as F
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class NLLLoss(BaseWeightedLoss):
"""NLL Loss.
It will calculate NLL loss given cls_score and label.
"""
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate nll loss.
Returns:
torch.Tensor: The returned nll loss.
"""
loss_cls = F.nll_loss(cls_score, label, **kwargs)
return loss_cls
| 688
| 24.518519
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/hvu_loss.py
|
import torch
import torch.nn.functional as F
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class HVULoss(BaseWeightedLoss):
"""Calculate the BCELoss for HVU.
Args:
categories (tuple[str]): Names of tag categories, tags are organized in
this order. Default: ['action', 'attribute', 'concept', 'event',
'object', 'scene'].
category_nums (tuple[int]): Number of tags for each category. Default:
(739, 117, 291, 69, 1678, 248).
category_loss_weights (tuple[float]): Loss weights of categories, it
applies only if `loss_type == 'individual'`. The loss weights will
be normalized so that the sum equals to 1, so that you can give any
positive number as loss weight. Default: (1, 1, 1, 1, 1, 1).
loss_type (str): The loss type we calculate, we can either calculate
the BCELoss for all tags, or calculate the BCELoss for tags in each
category. Choices are 'individual' or 'all'. Default: 'all'.
with_mask (bool): Since some tag categories are missing for some video
clips. If `with_mask == True`, we will not calculate loss for these
missing categories. Otherwise, these missing categories are treated
as negative samples.
reduction (str): Reduction way. Choices are 'mean' or 'sum'. Default:
'mean'.
loss_weight (float): The loss weight. Default: 1.0.
"""
def __init__(self,
categories=('action', 'attribute', 'concept', 'event',
'object', 'scene'),
category_nums=(739, 117, 291, 69, 1678, 248),
category_loss_weights=(1, 1, 1, 1, 1, 1),
loss_type='all',
with_mask=False,
reduction='mean',
loss_weight=1.0):
super().__init__(loss_weight)
self.categories = categories
self.category_nums = category_nums
self.category_loss_weights = category_loss_weights
assert len(self.category_nums) == len(self.category_loss_weights)
for category_loss_weight in self.category_loss_weights:
assert category_loss_weight >= 0
self.loss_type = loss_type
self.with_mask = with_mask
self.reduction = reduction
self.category_startidx = [0]
for i in range(len(self.category_nums) - 1):
self.category_startidx.append(self.category_startidx[-1] +
self.category_nums[i])
assert self.loss_type in ['individual', 'all']
assert self.reduction in ['mean', 'sum']
def _forward(self, cls_score, label, mask, category_mask):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
mask (torch.Tensor): The mask of tags. 0 indicates that the
category of this tag is missing in the label of the video.
category_mask (torch.Tensor): The category mask. For each sample,
it's a tensor with length `len(self.categories)`, denotes that
if the category is labeled for this video.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if self.loss_type == 'all':
loss_cls = F.binary_cross_entropy_with_logits(
cls_score, label, reduction='none')
if self.with_mask:
w_loss_cls = mask * loss_cls
w_loss_cls = torch.sum(w_loss_cls, dim=1)
if self.reduction == 'mean':
w_loss_cls = w_loss_cls / torch.sum(mask, dim=1)
w_loss_cls = torch.mean(w_loss_cls)
return dict(loss_cls=w_loss_cls)
if self.reduction == 'sum':
loss_cls = torch.sum(loss_cls, dim=-1)
return dict(loss_cls=torch.mean(loss_cls))
if self.loss_type == 'individual':
losses = {}
loss_weights = {}
for name, num, start_idx in zip(self.categories,
self.category_nums,
self.category_startidx):
category_score = cls_score[:, start_idx:start_idx + num]
category_label = label[:, start_idx:start_idx + num]
category_loss = F.binary_cross_entropy_with_logits(
category_score, category_label, reduction='none')
if self.reduction == 'mean':
category_loss = torch.mean(category_loss, dim=1)
elif self.reduction == 'sum':
category_loss = torch.sum(category_loss, dim=1)
idx = self.categories.index(name)
if self.with_mask:
category_mask_i = category_mask[:, idx].reshape(-1)
# there should be at least one sample which contains tags
# in thie category
if torch.sum(category_mask_i) < 0.5:
losses[f'{name}_LOSS'] = torch.tensor(.0).cuda()
loss_weights[f'{name}_LOSS'] = .0
continue
category_loss = torch.sum(category_loss * category_mask_i)
category_loss = category_loss / torch.sum(category_mask_i)
else:
category_loss = torch.mean(category_loss)
# We name the loss of each category as 'LOSS', since we only
# want to monitor them, not backward them. We will also provide
# the loss used for backward in the losses dictionary
losses[f'{name}_LOSS'] = category_loss
loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx]
loss_weight_sum = sum(loss_weights.values())
loss_weights = {
k: v / loss_weight_sum
for k, v in loss_weights.items()
}
loss_cls = sum([losses[k] * loss_weights[k] for k in losses])
losses['loss_cls'] = loss_cls
# We also trace the loss weights
losses.update({
k + '_weight': torch.tensor(v).to(losses[k].device)
for k, v in loss_weights.items()
})
# Note that the loss weights are just for reference.
return losses
else:
raise ValueError("loss_type should be 'all' or 'individual', "
f'but got {self.loss_type}')
| 6,676
| 46.021127
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/binary_logistic_regression_loss.py
|
import torch
import torch.nn as nn
from ..builder import LOSSES
def binary_logistic_regression_loss(reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Binary Logistic Regression Loss."""
label = label.view(-1).to(reg_score.device)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float().to(reg_score.device)
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
# clip ratio value between ratio_range
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (
1.0 - pmask) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
| 2,061
| 32.258065
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/ssn_loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .ohem_hinge_loss import OHEMHingeLoss
@LOSSES.register_module()
class SSNLoss(nn.Module):
@staticmethod
def activity_loss(activity_score, labels, activity_indexer):
"""Activity Loss.
It will calculate activity loss given activity_score and label.
Args:
activity_score (torch.Tensor): Predicted activity score.
labels (torch.Tensor): Groundtruth class label.
activity_indexer (torch.Tensor): Index slices of proposals.
Returns:
torch.Tensor: Returned cross entropy loss.
"""
pred = activity_score[activity_indexer, :]
gt = labels[activity_indexer]
return F.cross_entropy(pred, gt)
@staticmethod
def completeness_loss(completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=0.17):
"""Completeness Loss.
It will calculate completeness loss given completeness_score and label.
Args:
completeness_score (torch.Tensor): Predicted completeness score.
labels (torch.Tensor): Groundtruth class label.
completeness_indexer (torch.Tensor): Index slices of positive and
incomplete proposals.
positive_per_video (int): Number of positive proposals sampled
per video.
incomplete_per_video (int): Number of incomplete proposals sampled
pre video.
ohem_ratio (float): Ratio of online hard example mining.
Default: 0.17.
Returns:
torch.Tensor: Returned class-wise completeness loss.
"""
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video,
pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1),
-1, ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
return ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
@staticmethod
def classwise_regression_loss(bbox_pred, labels, bbox_targets,
regression_indexer):
"""Classwise Regression Loss.
It will calculate classwise_regression loss given
class_reg_pred and targets.
Args:
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
regression_indexer (torch.Tensor): Index slices of
positive proposals.
Returns:
torch.Tensor: Returned class-wise regression loss.
"""
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat(
(torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
loss = F.smooth_l1_loss(
classwise_reg_pred.view(-1), reg_target.view(-1)) * 2
return loss
def forward(self, activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg):
"""Calculate Boundary Matching Network Loss.
Args:
activity_score (torch.Tensor): Predicted activity score.
completeness_score (torch.Tensor): Predicted completeness score.
bbox_pred (torch.Tensor): Predicted interval center and span
of positive proposals.
proposal_type (torch.Tensor): Type index slices of proposals.
labels (torch.Tensor): Groundtruth class label.
bbox_targets (torch.Tensor): Groundtruth center and span
of positive proposals.
train_cfg (dict): Config for training.
Returns:
dict([torch.Tensor, torch.Tensor, torch.Tensor]):
(loss_activity, loss_completeness, loss_reg).
Loss_activity is the activity loss, loss_completeness is
the class-wise completeness loss,
loss_reg is the class-wise regression loss.
"""
self.sampler = train_cfg.ssn.sampler
self.loss_weight = train_cfg.ssn.loss_weight
losses = dict()
proposal_type = proposal_type.view(-1)
labels = labels.view(-1)
activity_indexer = ((proposal_type == 0) +
(proposal_type == 2)).nonzero().squeeze(1)
completeness_indexer = ((proposal_type == 0) +
(proposal_type == 1)).nonzero().squeeze(1)
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
positive_per_video = int(self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
incomplete_per_video = (
self.sampler.num_per_video - positive_per_video -
background_per_video)
losses['loss_activity'] = self.activity_loss(activity_score, labels,
activity_indexer)
losses['loss_completeness'] = self.completeness_loss(
completeness_score,
labels,
completeness_indexer,
positive_per_video,
incomplete_per_video,
ohem_ratio=positive_per_video / incomplete_per_video)
losses['loss_completeness'] *= self.loss_weight.comp_loss_weight
if bbox_pred is not None:
regression_indexer = (proposal_type == 0).nonzero().squeeze(1)
bbox_targets = bbox_targets.view(-1, 2)
losses['loss_reg'] = self.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
losses['loss_reg'] *= self.loss_weight.reg_loss_weight
return losses
| 7,274
| 39.416667
| 102
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/cross_entropy_loss.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from ..builder import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register_module()
class CrossEntropyLoss(BaseWeightedLoss):
"""Cross Entropy Loss.
Support two kinds of labels and their corresponding loss type. It's worth
mentioning that loss type will be detected by the shape of ``cls_score``
and ``label``.
1) Hard label: This label is an integer array and all of the elements are
in the range [0, num_classes - 1]. This label's shape should be
``cls_score``'s shape with the `num_classes` dimension removed.
2) Soft label(probablity distribution over classes): This label is a
probability distribution and all of the elements are in the range
[0, 1]. This label's shape must be the same as ``cls_score``. For now,
only 2-dim soft label is supported.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
class_weight (list[float] | None): Loss weight for each class. If set
as None, use the same weight 1 for all classes. Only applies
to CrossEntropyLoss and BCELossWithLogits (should not be set when
using other losses). Default: None.
"""
def __init__(self, loss_weight=1.0, class_weight=None):
super().__init__(loss_weight=loss_weight)
self.class_weight = None
if class_weight is not None:
self.class_weight = torch.Tensor(class_weight)
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
torch.Tensor: The returned CrossEntropy loss.
"""
if cls_score.size() == label.size():
# calculate loss for soft label
assert cls_score.dim() == 2, 'Only support 2-dim soft label'
assert len(kwargs) == 0, \
('For now, no extra args are supported for soft label, '
f'but get {kwargs}')
lsm = F.log_softmax(cls_score, 1)
if self.class_weight is not None:
lsm = lsm * self.class_weight.unsqueeze(0)
loss_cls = -(label * lsm).sum(1)
# default reduction 'mean'
if self.class_weight is not None:
# Use weighted average as pytorch CrossEntropyLoss does.
# For more information, please visit https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html # noqa
loss_cls = loss_cls.sum() / torch.sum(
self.class_weight.unsqueeze(0) * label)
else:
loss_cls = loss_cls.mean()
else:
# calculate loss for hard label
if self.class_weight is not None:
assert 'weight' not in kwargs, \
"The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.cross_entropy(cls_score, label, **kwargs)
return loss_cls
@LOSSES.register_module()
class BCELossWithLogits(BaseWeightedLoss):
"""Binary Cross Entropy Loss with logits.
Args:
loss_weight (float): Factor scalar multiplied on the loss.
Default: 1.0.
class_weight (list[float] | None): Loss weight for each class. If set
as None, use the same weight 1 for all classes. Only applies
to CrossEntropyLoss and BCELossWithLogits (should not be set when
using other losses). Default: None.
"""
def __init__(self, loss_weight=1.0, class_weight=None):
super().__init__(loss_weight=loss_weight)
self.class_weight = None
if class_weight is not None:
self.class_weight = torch.Tensor(class_weight)
def _forward(self, cls_score, label, **kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The class score.
label (torch.Tensor): The ground truth label.
kwargs: Any keyword argument to be used to calculate
bce loss with logits.
Returns:
torch.Tensor: The returned bce loss with logits.
"""
if self.class_weight is not None:
assert 'weight' not in kwargs, "The key 'weight' already exists."
kwargs['weight'] = self.class_weight.to(cls_score.device)
loss_cls = F.binary_cross_entropy_with_logits(cls_score, label,
**kwargs)
return loss_cls
| 4,774
| 39.12605
| 132
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/losses/__init__.py
|
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss'
]
| 520
| 33.733333
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/mobilenet_v2.py
|
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number down to the nearest value that can
be divisible by the divisor.
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int, optional): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float, optional): The minimum ratio of the rounded channel
number to the original channel number. Default: 0.9.
Returns:
int: The modified output channel number
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(nn.Module):
"""MobileNetV2 backbone.
Args:
pretrained (str | None): Name of pretrained model. Default: None.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
pretrained=None,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU6', inplace=True),
norm_eval=False,
with_cp=False):
super().__init__()
self.pretrained = pretrained
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self):
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 10,933
| 35.691275
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/c3d.py
|
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init, normal_init
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
@BACKBONES.register_module()
class C3D(nn.Module):
"""C3D backbone.
Args:
pretrained (str | None): Name of pretrained model.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config dict for convolution layer.
If set to None, it uses ``dict(type='Conv3d')`` to construct
layers. Default: None.
norm_cfg (dict | None): Config for norm layers. required keys are
``type``, Default: None.
act_cfg (dict | None): Config dict for activation layer. If set to
None, it uses ``dict(type='ReLU')`` to construct layers.
Default: None.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation of fc layers. Default: 0.01.
"""
def __init__(self,
pretrained=None,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
dropout_ratio=0.5,
init_std=0.005):
super().__init__()
if conv_cfg is None:
conv_cfg = dict(type='Conv3d')
if act_cfg is None:
act_cfg = dict(type='ReLU')
self.pretrained = pretrained
self.style = style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.dropout_ratio = dropout_ratio
self.init_std = init_std
c3d_conv_param = dict(
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv1a = ConvModule(3, 64, **c3d_conv_param)
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.conv2a = ConvModule(64, 128, **c3d_conv_param)
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv3a = ConvModule(128, 256, **c3d_conv_param)
self.conv3b = ConvModule(256, 256, **c3d_conv_param)
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv4a = ConvModule(256, 512, **c3d_conv_param)
self.conv4b = ConvModule(512, 512, **c3d_conv_param)
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.conv5a = ConvModule(512, 512, **c3d_conv_param)
self.conv5b = ConvModule(512, 512, **c3d_conv_param)
self.pool5 = nn.MaxPool3d(
kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))
self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=self.dropout_ratio)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.Linear):
normal_init(m, std=self.init_std)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
the size of x is (num_batches, 3, 16, 112, 112).
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1a(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.pool2(x)
x = self.conv3a(x)
x = self.conv3b(x)
x = self.pool3(x)
x = self.conv4a(x)
x = self.conv4b(x)
x = self.pool4(x)
x = self.conv5a(x)
x = self.conv5b(x)
x = self.pool5(x)
x = x.flatten(start_dim=1)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
return x
| 4,771
| 33.085714
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/checkpoint.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import re
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
import mmcv
from mmcv.fileio import FileClient
from mmcv.fileio import load as load_file
from mmcv.parallel import is_module_wrapper
from mmcv.utils import mkdir_or_exist
from mmcv.runner.dist_utils import get_dist_info
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
class CheckpointLoader:
"""A general checkpoint loader to manage all schemes."""
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if (prefix not in cls._schemes) or force:
cls._schemes[prefix] = loader
else:
raise KeyError(
f'{prefix} is already registered as a loader backend, '
'add "force=True" if you want to override it')
# sort, longer prefixes take priority
cls._schemes = OrderedDict(
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
@classmethod
def register_scheme(cls, prefixes, loader=None, force=False):
"""Register a loader to CheckpointLoader.
This method can be used as a normal class method or a decorator.
Args:
prefixes (str or list[str] or tuple[str]):
The prefix of the registered loader.
loader (function, optional): The loader function to be registered.
When this method is used as a decorator, loader is None.
Defaults to None.
force (bool, optional): Whether to override the loader
if the prefix has already been registered. Defaults to False.
"""
if loader is not None:
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
@classmethod
def _get_checkpoint_loader(cls, path):
"""Finds a loader that supports the given path. Falls back to the local
loader if no other loader is found.
Args:
path (str): checkpoint path
Returns:
loader (function): checkpoint loader
"""
for p in cls._schemes:
if path.startswith(p):
return cls._schemes[p]
@classmethod
def load_checkpoint(cls, filename, map_location=None, logger=None):
"""load checkpoint through URL scheme path.
Args:
filename (str): checkpoint file name with given prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
logger (:mod:`logging.Logger`, optional): The logger for message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint_loader = cls._get_checkpoint_loader(filename)
class_name = checkpoint_loader.__name__
mmcv.print_log(f'Use {class_name} loader', logger)
return checkpoint_loader(filename, map_location)
@CheckpointLoader.register_scheme(prefixes='')
def load_from_local(filename, map_location):
"""load checkpoint by local file path.
Args:
filename (str): local checkpoint file path
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
def load_from_http(filename, map_location=None, model_dir=None):
"""load checkpoint through HTTP or HTTPS scheme path. In distributed
setting, this function only download checkpoint at local rank 0.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
model_dir (string, optional): directory in which to save the object,
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='pavi://')
def load_from_pavi(filename, map_location=None):
"""load checkpoint through the file path prefixed with pavi. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with pavi prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
assert filename.startswith('pavi://'), \
f'Expected filename startswith `pavi://`, but get {filename}'
model_path = filename[7:]
try:
from pavi import modelcloud
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='s3://')
def load_from_ceph(filename, map_location=None, backend='ceph'):
"""load checkpoint through the file path prefixed with s3. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with s3 prefix
map_location (str, optional): Same as :func:`torch.load`.
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb". Default: 'ceph'
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
def load_from_torchvision(filename, map_location=None):
"""load checkpoint through the file path prefixed with modelzoo or
torchvision.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_torchvision_models()
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_name = filename[11:]
else:
model_name = filename[14:]
return load_from_http(model_urls[model_name], map_location=map_location)
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
def load_from_openmmlab(filename, map_location=None):
"""load checkpoint through the file path prefixed with open-mmlab or
openmmlab.
Args:
filename (str): checkpoint file path with open-mmlab or
openmmlab prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_external_models()
prefix_str = 'open-mmlab://'
if filename.startswith(prefix_str):
model_name = filename[13:]
else:
model_name = filename[12:]
prefix_str = 'openmmlab://'
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
f'of {prefix_str}{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_from_http(model_url, map_location=map_location)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='mmcls://')
def load_from_mmcls(filename, map_location=None):
"""load checkpoint through the file path prefixed with mmcls.
Args:
filename (str): checkpoint file path with mmcls prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_from_http(
model_urls[model_name], map_location=map_location)
checkpoint = _process_mmcls_checkpoint(checkpoint)
return checkpoint
def _load_checkpoint(filename, map_location=None, logger=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str, optional): Same as :func:`torch.load`.
Default: None.
logger (:mod:`logging.Logger`, optional): The logger for error message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
"""Load partial pretrained model with specific prefix.
Args:
prefix (str): The prefix of sub-module.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location=map_location)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if not prefix.endswith('.'):
prefix += '.'
prefix_len = len(prefix)
state_dict = {
k[prefix_len:]: v
for k, v in state_dict.items() if k.startswith(prefix)
}
assert state_dict, f'{prefix} is not in the pretrained model'
return state_dict
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('backbone.', '')
new_state_dict[name] = v
state_dict = new_state_dict
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
# Keep metadata in state_dict
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi import exception
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except exception.NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| 24,129
| 34.021771
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet3d_slowonly.py
|
from ..builder import BACKBONES
from .resnet3d_slowfast import ResNet3dPathway
try:
from mmdet.models.builder import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
@BACKBONES.register_module()
class ResNet3dSlowOnly(ResNet3dPathway):
"""SlowOnly backbone based on ResNet3dPathway.
Args:
*args (arguments): Arguments same as :class:`ResNet3dPathway`.
conv1_kernel (Sequence[int]): Kernel size of the first conv layer.
Default: (1, 7, 7).
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
inflate (Sequence[int]): Inflate Dims of each block.
Default: (0, 0, 1, 1).
**kwargs (keyword arguments): Keywords arguments for
:class:`ResNet3dPathway`.
"""
def __init__(self,
*args,
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
with_pool2=False,
**kwargs):
super().__init__(
*args,
lateral=lateral,
conv1_kernel=conv1_kernel,
conv1_stride_t=conv1_stride_t,
pool1_stride_t=pool1_stride_t,
inflate=inflate,
with_pool2=with_pool2,
**kwargs)
assert not self.lateral
if mmdet_imported:
MMDET_BACKBONES.register_module()(ResNet3dSlowOnly)
| 1,643
| 30.018868
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet3d_csn.py
|
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.utils import _BatchNorm
from ..builder import BACKBONES
from .resnet3d import Bottleneck3d, ResNet3d
class CSNBottleneck3d(Bottleneck3d):
"""Channel-Separated Bottleneck Block.
This module is proposed in
"Video Classification with Channel-Separated Convolutional Networks"
Link: https://arxiv.org/pdf/1711.11248.pdf
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ir'.
args (position arguments): Position arguments for Bottleneck.
kwargs (dict, optional): Keyword arguments for Bottleneck.
"""
def __init__(self,
inplanes,
planes,
*args,
bottleneck_mode='ir',
**kwargs):
super(CSNBottleneck3d, self).__init__(inplanes, planes, *args,
**kwargs)
self.bottleneck_mode = bottleneck_mode
conv2 = []
if self.bottleneck_mode == 'ip':
conv2.append(
nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False))
conv2_kernel_size = self.conv2.conv.kernel_size
conv2_stride = self.conv2.conv.stride
conv2_padding = self.conv2.conv.padding
conv2_dilation = self.conv2.conv.dilation
conv2_bias = bool(self.conv2.conv.bias)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=conv2_stride,
padding=conv2_padding,
dilation=conv2_dilation,
bias=conv2_bias,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
groups=planes)
conv2.append(self.conv2)
self.conv2 = nn.Sequential(*conv2)
@BACKBONES.register_module()
class ResNet3dCSN(ResNet3d):
"""ResNet backbone for CSN.
Args:
depth (int): Depth of ResNetCSN, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
temporal_strides (tuple[int]):
Temporal strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
conv1_kernel (tuple[int]): Kernel size of the first conv layer.
Default: (3, 7, 7).
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
norm_cfg (dict): Config for norm layers. required keys are `type` and
`requires_grad`.
Default: dict(type='BN3d', requires_grad=True, eps=1e-3).
inflate_style (str): `3x1x1` or `3x3x3`. which determines the kernel
sizes and padding strides for conv1 and conv2 in each block.
Default: '3x3x3'.
bottleneck_mode (str): Determine which ways to factorize a 3D
bottleneck block using channel-separated convolutional networks.
If set to 'ip', it will replace the 3x3x3 conv2 layer with a
1x1x1 traditional convolution and a 3x3x3 depthwise
convolution, i.e., Interaction-preserved channel-separated
bottleneck block.
If set to 'ir', it will replace the 3x3x3 conv2 layer with a
3x3x3 depthwise convolution, which is derived from preserved
bottleneck block by removing the extra 1x1x1 convolution,
i.e., Interaction-reduced channel-separated bottleneck block.
Default: 'ip'.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
depth,
pretrained,
temporal_strides=(1, 2, 2, 2),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
norm_cfg=dict(type='BN3d', requires_grad=True, eps=1e-3),
inflate_style='3x3x3',
bottleneck_mode='ir',
bn_frozen=False,
**kwargs):
self.arch_settings = {
# 18: (BasicBlock3d, (2, 2, 2, 2)),
# 34: (BasicBlock3d, (3, 4, 6, 3)),
50: (CSNBottleneck3d, (3, 4, 6, 3)),
101: (CSNBottleneck3d, (3, 4, 23, 3)),
152: (CSNBottleneck3d, (3, 8, 36, 3))
}
self.bn_frozen = bn_frozen
if bottleneck_mode not in ['ip', 'ir']:
raise ValueError(f'Bottleneck mode must be "ip" or "ir",'
f'but got {bottleneck_mode}.')
super(ResNet3dCSN, self).__init__(
depth,
pretrained,
temporal_strides=temporal_strides,
conv1_kernel=conv1_kernel,
conv1_stride_t=conv1_stride_t,
pool1_stride_t=pool1_stride_t,
norm_cfg=norm_cfg,
inflate_style=inflate_style,
bottleneck_mode=bottleneck_mode,
**kwargs)
def train(self, mode=True):
super(ResNet3d, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if self.bn_frozen:
for param in m.parameters():
param.requires_grad = False
| 6,234
| 40.845638
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/swin_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import DropPath, trunc_normal_
# from mmcv.runner import load_checkpoint
from .checkpoint import load_checkpoint
from mmaction.utils import get_root_logger
from ..builder import BACKBONES
from functools import reduce, lru_cache
from operator import mul
from einops import rearrange
from mmaction.models.backbones.topk import PatchNet
import math
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, D, H, W, C)
window_size (tuple[int]): window size
Returns:
windows: (B*num_windows, window_size*window_size, C)
"""
B, D, H, W, C = x.shape
x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)
return windows
def window_reverse(windows, window_size, B, D, H, W):
"""
Args:
windows: (B*num_windows, window_size, window_size, C)
window_size (tuple[int]): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, D, H, W, C)
"""
x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1], window_size[2], -1)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
class WindowAttention3D(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The temporal length, height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wd, Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1), num_heads)) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, N, N) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index[:N, :N].reshape(-1)].reshape(
N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock3D(nn.Module):
""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, num_heads, window_size=(2,7,7), shift_size=(0,0,0),
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_checkpoint=False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint=use_checkpoint
assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention3D(
dim, window_size=self.window_size, num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward_part1(self, x, mask_matrix):
B, D, H, W, C = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)
x = self.norm1(x)
# pad feature maps to multiples of window size
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0]
pad_b = (window_size[1] - H % window_size[1]) % window_size[1]
pad_r = (window_size[2] - W % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))
_, Dp, Hp, Wp, _ = x.shape
# cyclic shift
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, window_size) # B*nW, Wd*Wh*Ww, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # B*nW, Wd*Wh*Ww, C
# merge windows
attn_windows = attn_windows.view(-1, *(window_size+(C,)))
shifted_x = window_reverse(attn_windows, window_size, B, Dp, Hp, Wp) # B D' H' W' C
# reverse cyclic shift
if any(i > 0 for i in shift_size):
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
else:
x = shifted_x
if pad_d1 >0 or pad_r > 0 or pad_b > 0:
x = x[:, :D, :H, :W, :].contiguous()
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
mask_matrix: Attention mask for cyclic shift.
"""
shortcut = x
if self.use_checkpoint:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.use_checkpoint:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
"""
B, D, H, W, C = x.shape
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C
x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C
x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C
x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
# cache each stage results
@lru_cache()
def compute_mask(D, H, W, window_size, shift_size, device):
img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1
cnt = 0
for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0],None):
for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1],None):
for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2],None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1
mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (tuple[int]): Local window size. Default: (1,7,7).
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
"""
def __init__(self,
dim,
depth,
num_heads,
window_size=(2,7,7),
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False):
super().__init__()
self.window_size = window_size
self.shift_size = tuple(i // 2 for i in window_size)
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock3D(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=(0,0,0) if (i % 2 == 0) else self.shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
)
for i in range(depth)])
self.downsample = downsample
if self.downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, C, D, H, W).
"""
# calculate attention mask for SW-MSA
B, C, D, H, W = x.shape
window_size, shift_size = get_window_size((D,H,W), self.window_size, self.shift_size)
x = rearrange(x, 'b c d h w -> b d h w c')
Dp = int(np.ceil(D / window_size[0])) * window_size[0]
Hp = int(np.ceil(H / window_size[1])) * window_size[1]
Wp = int(np.ceil(W / window_size[2])) * window_size[2]
attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(B, D, H, W, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, 'b d h w c -> b c d h w')
return x
class PatchEmbed3D(nn.Module):
""" Video to Patch Embedding.
Args:
patch_size (int): Patch token size. Default: (2,4,4).
in_chans (int): Number of input video channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, num_frames=32, image_size=224, patch_size=(2,4,4), in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.num_patches = (num_frames // patch_size[0], image_size // patch_size[1], image_size // patch_size[2])
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, D, H, W = x.size()
if W % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))
if H % self.patch_size[1] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))
if D % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0]))
x = self.proj(x) # B C D Wh Ww
if self.norm is not None:
D, Wh, Ww = x.size(2), x.size(3), x.size(4)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww)
return x
@BACKBONES.register_module()
class SwinTransformer3D(nn.Module):
""" Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
patch_size (int | tuple(int)): Patch size. Default: (4,4,4).
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: Truee
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer: Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
"""
def __init__(self,
pretrained=None,
pretrained2d=True,
num_frames=32,
image_size=224,
patch_size=(4,4,4),
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(2,7,7),
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
patch_norm=False,
frozen_stages=-1,
use_checkpoint=False,
time_pruning_loc=None,
time_left_ratio=[0.5],
time_score='tconv',
space_pruning_loc=None,
space_left_ratio=[0.5],
space_score='spool',
sigma=0.05):
super().__init__()
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.frozen_stages = frozen_stages
self.window_size = window_size
self.patch_size = patch_size
# split image into non-overlapping patches
self.patch_embed = PatchEmbed3D(
num_frames=num_frames, image_size=image_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
temporal_size = num_patches[0]
spatial_size = num_patches[1] * num_patches[2]
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.time_pruning_loc = time_pruning_loc
time_left_ratio = time_left_ratio
time_score = time_score
self.space_pruning_loc = space_pruning_loc
space_left_ratio = space_left_ratio
space_score = space_score
self.sigma_max = sigma
self.sigma = sigma
# build layers
out_dims = []
self.layers = nn.ModuleList()
embedding_temporal_size = temporal_size
embedding_spatial_size = spatial_size
time_score_predictor = nn.ModuleList()
space_score_predictor = nn.ModuleList()
s_count = 0
t_count = 0
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2**i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if i_layer<self.num_layers-1 else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
out_dims.append(int(embed_dim * 2**i_layer))
if self.time_pruning_loc is not None and i_layer in self.time_pruning_loc:
left_frames = int(embedding_temporal_size * time_left_ratio[t_count])
t_count += 1
patchnet = PatchNet(score=time_score, k=left_frames, in_channels = int(embed_dim * 2**i_layer))
time_score_predictor.append(patchnet)
embedding_temporal_size = left_frames
if self.space_pruning_loc is not None and i_layer in self.space_pruning_loc:
left_patches = int(embedding_spatial_size * space_left_ratio[s_count])
s_count += 1
patchnet = PatchNet(score=space_score, k=left_patches, in_channels = int(embed_dim * 2**i_layer))
space_score_predictor.append(patchnet)
embedding_spatial_size = left_patches
embedding_spatial_size = embedding_spatial_size // 4
if len(time_score_predictor) > 0:
self.time_score_predictor = time_score_predictor
if len(space_score_predictor) > 0:
self.space_score_predictor = space_score_predictor
self.num_features = int(embed_dim * 2**(self.num_layers-1))
# add a norm layer for each output
self.norm = norm_layer(self.num_features)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1:
self.pos_drop.eval()
for i in range(0, self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def inflate_weights(self, logger):
"""Inflate the swin2d parameters to swin3d.
The differences between swin3d and swin2d mainly lie in an extra
axis. To utilize the pretrained parameters in 2d model,
the weight of swin2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
checkpoint = torch.load(self.pretrained, map_location='cpu')
state_dict = checkpoint['model']
# delete relative_position_index since we always re-init it
relative_position_index_keys = [k for k in state_dict.keys() if "relative_position_index" in k]
for k in relative_position_index_keys:
del state_dict[k]
# delete attn_mask since we always re-init it
attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]
for k in attn_mask_keys:
del state_dict[k]
state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).repeat(1,1,self.patch_size[0],1,1) / self.patch_size[0]
# bicubic interpolate relative_position_bias_table if not match
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for k in relative_position_bias_table_keys:
relative_position_bias_table_pretrained = state_dict[k]
relative_position_bias_table_current = self.state_dict()[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
L2 = (2*self.window_size[1]-1) * (2*self.window_size[2]-1)
wd = self.window_size[0]
if nH1 != nH2:
logger.warning(f"Error in loading {k}, passing")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(2*self.window_size[1]-1, 2*self.window_size[2]-1),
mode='bicubic')
relative_position_bias_table_pretrained = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
state_dict[k] = relative_position_bias_table_pretrained.repeat(2*wd-1,1)
msg = self.load_state_dict(state_dict, strict=False)
logger.info(msg)
logger.info(f"=> loaded successfully '{self.pretrained}'")
del checkpoint
torch.cuda.empty_cache()
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def update_sigma(self, cur_step, total_steps):
process = cur_step / total_steps
sigma_multiplier = 1 - process
self.sigma = self.sigma_max * sigma_multiplier
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
T = x.size(2)
N = x.size(3) * x.size(4)
# B C T H W
x = self.pos_drop(x)
t_count = 0
s_count = 0
for i, layer in enumerate(self.layers):
if hasattr(self, 'time_score_predictor') and i in self.time_pruning_loc:
x = self.time_score_predictor[t_count](x, 'time', N, T, self.sigma)
T = x.size(2)
t_count += 1
if hasattr(self, 'space_score_predictor') and i in self.space_pruning_loc:
x = self.space_score_predictor[s_count](x, 'space', N, T, self.sigma)
N = x.size(3) * x.size(4)
s_count += 1
# print(i, x.size())
x = layer(x.contiguous())
T = x.size(2)
N = x.size(3) * x.size(4)
x = rearrange(x, 'n c d h w -> n d h w c')
x = self.norm(x)
x = rearrange(x, 'n d h w c -> n c d h w')
return x
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer3D, self).train(mode)
self._freeze_stages()
| 30,486
| 40.032301
| 156
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet.py
|
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.utils import checkpoint as cp
from ...utils import get_root_logger
from ..builder import BACKBONES
class BasicBlock(nn.Module):
"""Basic block for ResNet.
Args:
inplanes (int): Number of channels for the input in first conv2d layer.
planes (int): Number of channels produced by some norm/conv2d layers.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.style = style
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
assert not with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
Args:
inplanes (int):
Number of channels for the input feature in first conv layer.
planes (int):
Number of channels produced by some norm layes and conv layers
stride (int): Spatial stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
self.inplanes = inplanes
self.planes = planes
if style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.norm_cfg = norm_cfg
self.with_cp = with_cp
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for ResNet.
Args:
block: (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature in each block.
planes (int): Number of channels for the output feature in each block.
blocks (int): Number of residual blocks.
stride (int): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
style=style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
dilations (Sequence[int]): Dilation of each stage.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: ``pytorch``.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
partial_bn (bool): Whether to use partial bn. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained=None,
torchvision_pretrain=True,
in_channels=3,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
style='pytorch',
frozen_stages=-1,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
partial_bn=False,
with_cp=False):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.in_channels = in_channels
self.pretrained = pretrained
self.torchvision_pretrain = torchvision_pretrain
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.partial_bn = partial_bn
self.with_cp = with_cp
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@staticmethod
def _load_conv_params(conv, state_dict_tv, module_name_tv,
loaded_param_names):
"""Load the conv parameters of resnet from torchvision.
Args:
conv (nn.Module): The destination conv module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding conv module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
weight_tv_name = module_name_tv + '.weight'
if conv.weight.data.shape == state_dict_tv[weight_tv_name].shape:
conv.weight.data.copy_(state_dict_tv[weight_tv_name])
loaded_param_names.append(weight_tv_name)
if getattr(conv, 'bias') is not None:
bias_tv_name = module_name_tv + '.bias'
if conv.bias.data.shape == state_dict_tv[bias_tv_name].shape:
conv.bias.data.copy_(state_dict_tv[bias_tv_name])
loaded_param_names.append(bias_tv_name)
@staticmethod
def _load_bn_params(bn, state_dict_tv, module_name_tv, loaded_param_names):
"""Load the bn parameters of resnet from torchvision.
Args:
bn (nn.Module): The destination bn module.
state_dict_tv (OrderedDict): The state dict of pretrained
torchvision model.
module_name_tv (str): The name of corresponding bn module in the
torchvision model.
loaded_param_names (list[str]): List of parameters that have been
loaded.
"""
for param_name, param in bn.named_parameters():
param_tv_name = f'{module_name_tv}.{param_name}'
param_tv = state_dict_tv[param_tv_name]
if param.data.shape == param_tv.shape:
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
for param_name, param in bn.named_buffers():
param_tv_name = f'{module_name_tv}.{param_name}'
# some buffers like num_batches_tracked may not exist
if param_tv_name in state_dict_tv:
param_tv = state_dict_tv[param_tv_name]
if param.data.shape == param_tv.shape:
param.data.copy_(param_tv)
loaded_param_names.append(param_tv_name)
def _load_torchvision_checkpoint(self, logger=None):
"""Initiate the parameters from torchvision pretrained checkpoint."""
state_dict_torchvision = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_torchvision:
state_dict_torchvision = state_dict_torchvision['state_dict']
loaded_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
self._load_conv_params(module.conv, state_dict_torchvision,
original_conv_name, loaded_param_names)
self._load_bn_params(module.bn, state_dict_torchvision,
original_bn_name, loaded_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_torchvision.keys()) - set(loaded_param_names)
if remaining_names:
logger.info(
f'These parameters in pretrained checkpoint are not loaded'
f': {remaining_names}')
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
if self.torchvision_pretrain:
# torchvision's
self._load_torchvision_checkpoint(logger)
else:
# ours
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in self.conv1.modules():
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def _partial_bn(self):
logger = get_root_logger()
logger.info('Freezing BatchNorm2D except the first one.')
count_bn = 0
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if mode and self.partial_bn:
self._partial_bn()
| 21,448
| 35.292724
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet_audio.py
|
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.utils import _ntuple
from ...utils import get_root_logger
from ..builder import BACKBONES
class Bottleneck2dAudio(nn.Module):
"""Bottleneck2D block for ResNet2D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
stride (int | tuple[int]): Stride in the conv layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module): Downsample layer. Default: None.
factorize (bool): Whether to factorize kernel. Default: True.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=2,
dilation=1,
downsample=None,
factorize=True,
norm_cfg=None,
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.factorize = factorize
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.conv1_stride = 1
self.conv2_stride = stride
conv1_kernel_size = (1, 1)
conv1_padding = 0
conv2_kernel_size = (3, 3)
conv2_padding = (dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=conv1_kernel_size,
padding=conv1_padding,
dilation=dilation,
norm_cfg=self.norm_cfg,
bias=False)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=conv2_kernel_size,
stride=stride,
padding=conv2_padding,
dilation=dilation,
bias=False,
conv_cfg=dict(type='ConvAudio') if factorize else dict(
type='Conv'),
norm_cfg=None,
act_cfg=None)
self.conv3 = ConvModule(
2 * planes if factorize else planes,
planes * self.expansion,
kernel_size=1,
bias=False,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNetAudio(nn.Module):
"""ResNet 2d audio backbone. Reference:
<https://arxiv.org/abs/2001.08740>`_.
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
pretrained (str | None): Name of pretrained model.
in_channels (int): Channel num of input features. Default: 1.
base_channels (int): Channel num of stem output features. Default: 32.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilations (Sequence[int]): Dilation of each stage.
Default: (1, 1, 1, 1).
conv1_kernel (int): Kernel size of the first conv layer. Default: 9.
conv1_stride (int | tuple[int]): Stride of the first conv layer.
Default: 1.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
factorize (Sequence[int]): factorize Dims of each block for audio.
Default: (1, 1, 0, 0).
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
"""
arch_settings = {
# 18: (BasicBlock2dAudio, (2, 2, 2, 2)),
# 34: (BasicBlock2dAudio, (3, 4, 6, 3)),
50: (Bottleneck2dAudio, (3, 4, 6, 3)),
101: (Bottleneck2dAudio, (3, 4, 23, 3)),
152: (Bottleneck2dAudio, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
in_channels=1,
num_stages=4,
base_channels=32,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
conv1_kernel=9,
conv1_stride=1,
frozen_stages=-1,
factorize=(1, 1, 0, 0),
norm_eval=False,
with_cp=False,
conv_cfg=dict(type='Conv'),
norm_cfg=dict(type='BN2d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
zero_init_residual=True):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.dilations = dilations
self.conv1_kernel = conv1_kernel
self.conv1_stride = conv1_stride
self.frozen_stages = frozen_stages
self.stage_factorization = _ntuple(num_stages)(factorize)
self.norm_eval = norm_eval
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
factorize=self.stage_factorization[i],
norm_cfg=self.norm_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
factorize=1,
norm_cfg=None,
with_cp=False):
"""Build residual layer for ResNetAudio.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
stride (Sequence[int]): Strides of residual blocks of each stage.
Default: (1, 2, 2, 2).
dilation (int): Spacing between kernel elements. Default: 1.
factorize (int | Sequence[int]): Determine whether to factorize
for each block. Default: 1.
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
A residual layer for the given config.
"""
factorize = factorize if not isinstance(
factorize, int) else (factorize, ) * blocks
assert len(factorize) == blocks
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
factorize=(factorize[0] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
factorize=(factorize[i] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=self.conv1_stride,
bias=False,
conv_cfg=dict(type='ConvAudio', op='sum'),
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in [self.conv1.conv, self.conv1.bn]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck2dAudio):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 13,252
| 34.435829
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/topk.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import einops
from einops import rearrange
from math import sqrt
class PredictorLG(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, embed_dim=384):
super().__init__()
self.in_conv = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, embed_dim),
nn.GELU()
)
self.out_conv = nn.Sequential(
nn.Linear(embed_dim, embed_dim // 2),
nn.GELU(),
nn.Linear(embed_dim // 2, embed_dim // 4),
nn.GELU(),
nn.Linear(embed_dim // 4, 1)
)
def forward(self, x):
x = self.in_conv(x)
B, N, C = x.size()
local_x = x[:,:, :C//2]
global_x = torch.mean(x[:,:, C//2:], dim=1, keepdim=True)
x = torch.cat([local_x, global_x.expand(B, N, C//2)], dim=-1)
return self.out_conv(x)
def HardTopK(k, x):
topk_results = torch.topk(x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, k
indices = torch.sort(indices, dim=-1).values
return indices
class PerturbedTopK(nn.Module):
def __init__(self, k: int, num_samples: int = 1000):
super(PerturbedTopK, self).__init__()
self.num_samples = num_samples
self.k = k
def __call__(self, x, sigma):
return PerturbedTopKFunction.apply(x, self.k, self.num_samples, sigma)
class PerturbedTopKFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, k: int, num_samples: int = 1000, sigma: float = 0.05):
b, d = x.shape
# for Gaussian: noise and gradient are the same.
noise = torch.normal(mean=0.0, std=1.0, size=(b, num_samples, d)).to(x.device)
perturbed_x = x[:, None, :] + noise * sigma # b, nS, d
topk_results = torch.topk(perturbed_x, k=k, dim=-1, sorted=False)
indices = topk_results.indices # b, nS, k
indices = torch.sort(indices, dim=-1).values # b, nS, k
# b, nS, k, d
perturbed_output = torch.nn.functional.one_hot(indices, num_classes=d).float()
indicators = perturbed_output.mean(dim=1) # b, k, d
# constants for backward
ctx.k = k
ctx.num_samples = num_samples
ctx.sigma = sigma
# tensors for backward
ctx.perturbed_output = perturbed_output
ctx.noise = noise
return indicators
@staticmethod
def backward(ctx, grad_output):
if grad_output is None:
return tuple([None] * 5)
noise_gradient = ctx.noise
# import pdb; pdb.Pdb(nosigint=True).set_trace()
if ctx.sigma <= 1e-20:
b, _, k, d = ctx.perturbed_output.size()
expected_gradient = torch.zeros(b, k, d).to(grad_output.device)
else:
expected_gradient = (
torch.einsum("bnkd,bnd->bkd", ctx.perturbed_output, noise_gradient)
/ ctx.num_samples
/ (ctx.sigma)
)
grad_input = torch.einsum("bkd,bkd->bd", grad_output, expected_gradient)
return (grad_input,) + tuple([None] * 5)
def batched_index_select(input, dim, index):
for i in range(1, len(input.shape)):
if i != dim:
index = index.unsqueeze(i)
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
def extract_patches_from_indices(x, indices):
batch_size, _, channels = x.shape
k = indices.shape[-1]
patches = x
patches = batched_index_select(patches, 1, indices)
patches = patches.contiguous().view(batch_size, k, channels)
return patches
def extract_patches_from_indicators(x, indicators):
indicators = rearrange(indicators, "b d k -> b k d")
patches = torch.bmm(indicators, x)
return patches
def min_max_norm(x):
flatten_score_min = x.min(axis=-1, keepdim=True).values
flatten_score_max = x.max(axis=-1, keepdim=True).values
norm_flatten_score = (x - flatten_score_min) / (flatten_score_max - flatten_score_min + 1e-5)
return norm_flatten_score
class PatchNet(nn.Module):
def __init__(self, score, k, in_channels, stride=None, num_samples=500):
super(PatchNet, self).__init__()
self.k = k
self.anchor_size = int(sqrt(k))
self.stride = stride
self.score = score
self.in_channels = in_channels
self.num_samples = num_samples
if score == 'tpool':
self.score_network = PredictorLG(embed_dim=2*in_channels)
elif score == 'spatch':
self.score_network = PredictorLG(embed_dim=in_channels)
self.init = torch.eye(self.k).unsqueeze(0).unsqueeze(-1).cuda()
def get_indicator(self, scores, k, sigma):
indicator = PerturbedTopKFunction.apply(scores, k, self.num_samples, sigma)
indicator = einops.rearrange(indicator, "b k d -> b d k")
return indicator
def get_indices(self, scores, k):
indices = HardTopK(k, scores)
return indices
def generate_random_indices(self, b, n, k):
indices = []
for _ in range(b):
indice = np.sort(np.random.choice(n, k, replace=False))
indices.append(indice)
indices = np.vstack(indices)
indices = torch.Tensor(indices).long().cuda()
return indices
def generate_uniform_indices(self, b, n, k):
indices = torch.linspace(0, n-1, steps=k).long()
indices = indices.unsqueeze(0).cuda()
indices = indices.repeat(b, 1)
return indices
def forward(self, x, type, N, T, sigma):
B = x.size(0)
H = W = int(sqrt(N))
indicator = None
indices = None
if type == 'time':
if self.score == 'tpool':
x = rearrange(x, 'b c t h w -> b t (h w) c')
avg = torch.mean(x, dim=2, keepdim=False)
max_ = torch.max(x, dim=2).values
x_ = torch.cat((avg, max_), dim=2)
scores = self.score_network(x_).squeeze(-1)
scores = min_max_norm(scores)
if self.training:
indicator = self.get_indicator(scores, self.k, sigma)
else:
indices = self.get_indices(scores, self.k)
x = rearrange(x, 'b t n c -> b t (n c)')
else:
s = self.stride if self.stride is not None else int(max((H - self.anchor_size) // 2, 1))
if self.score == 'spatch':
x = rearrange(x, 'b c t h w -> (b t) (h w) c')
scores = self.score_network(x)
scores = rearrange(scores, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
scores = F.unfold(scores, kernel_size=self.anchor_size, stride=s)
scores = scores.mean(dim=1)
scores = min_max_norm(scores)
x = rearrange(x, '(b t) (h w) c -> (b t) c h w', b=B, h=H)
x = F.unfold(x, kernel_size=self.anchor_size, stride=s).permute(0, 2, 1).contiguous()
if self.training:
indicator = self.get_indicator(scores, 1, sigma)
else:
indices = self.get_indices(scores, 1)
if self.training:
if indicator is not None:
patches = extract_patches_from_indicators(x, indicator)
elif indices is not None:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (h w c) -> b c k h w', h=H, w=W)
elif self.score == 'spatch':
patches = patches.squeeze(1)
patches = rearrange(patches, '(b t) (c kh kw) -> b c t kh kw', b=B, c=self.in_channels, kh=self.anchor_size)
return patches
else:
patches = extract_patches_from_indices(x, indices)
if type == 'time':
patches = rearrange(patches, 'b k (h w c) -> b c k h w', h=H, w=W)
elif self.score == 'spatch':
patches = patches.squeeze(1)
patches = rearrange(patches, '(b t) (c kh kw) -> b c t kh kw', b=B, c=self.in_channels, kh=self.anchor_size)
return patches
| 8,524
| 33.375
| 124
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet3d.py
|
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, NonLocal3d, build_activation_layer,
constant_init, kaiming_init)
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import _BatchNorm
from torch.nn.modules.utils import _ntuple, _triple
from ...utils import get_root_logger
from ..builder import BACKBONES
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
from mmdet.models import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class BasicBlock3d(nn.Module):
"""BasicBlock 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 1
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False,
**kwargs):
super().__init__()
assert style in ['pytorch', 'caffe']
# make sure that only ``inflate_style`` is passed into kwargs
assert set(kwargs).issubset(['inflate_style'])
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
conv1_kernel_size = (3, 3, 3)
conv1_padding = (1, dilation, dilation)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, 1, 1)
else:
conv1_kernel_size = (1, 3, 3)
conv1_padding = (0, dilation, dilation)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, 1, 1)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes * self.expansion,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv2.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
class Bottleneck3d(nn.Module):
"""Bottleneck 3d block for ResNet3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
temporal_stride (int): Temporal stride in the conv3d layer. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: 'pytorch'.
inflate (bool): Whether to inflate kernel. Default: True.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
non_local (bool): Determine whether to apply non-local module in this
block. Default: False.
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
inflate=True,
inflate_style='3x1x1',
non_local=False,
non_local_cfg=dict(),
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
assert style in ['pytorch', 'caffe']
assert inflate_style in ['3x1x1', '3x3x3']
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.inflate_style = inflate_style
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
if self.style == 'pytorch':
self.conv1_stride_s = 1
self.conv2_stride_s = spatial_stride
self.conv1_stride_t = 1
self.conv2_stride_t = temporal_stride
else:
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
if inflate_style == '3x1x1':
conv1_kernel_size = (3, 1, 1)
conv1_padding = (1, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, dilation, dilation)
else:
conv1_kernel_size = (1, 1, 1)
conv1_padding = (0, 0, 0)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
conv1_kernel_size,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=conv1_padding,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv2 = ConvModule(
planes,
planes,
conv2_kernel_size,
stride=(self.conv2_stride_t, self.conv2_stride_s,
self.conv2_stride_s),
padding=conv2_padding,
dilation=(1, dilation, dilation),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.conv3 = ConvModule(
planes,
planes * self.expansion,
1,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
# No activation in the third ConvModule for bottleneck
act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv3.norm.num_features,
**self.non_local_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out
@BACKBONES.register_module()
class ResNet3d(nn.Module):
"""ResNet 3d backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
stage_blocks (tuple | None): Set number of stages for each res layer.
Default: None.
pretrained2d (bool): Whether to load pretrained 2D model.
Default: True.
in_channels (int): Channel num of input features. Default: 3.
base_channels (int): Channel num of stem output features. Default: 64.
out_indices (Sequence[int]): Indices of output feature. Default: (3, ).
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
temporal_strides (Sequence[int]):
Temporal strides of residual blocks of each stage.
Default: ``(1, 1, 1, 1)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
conv1_kernel (Sequence[int]): Kernel size of the first conv layer.
Default: ``(3, 7, 7)``.
conv1_stride_s (int): Spatial stride of the first conv layer.
Default: 2.
conv1_stride_t (int): Temporal stride of the first conv layer.
Default: 1.
pool1_stride_s (int): Spatial stride of the first pooling layer.
Default: 2.
pool1_stride_t (int): Temporal stride of the first pooling layer.
Default: 1.
with_pool2 (bool): Whether to use pool2. Default: True.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Default: -1.
inflate (Sequence[int]): Inflate Dims of each block.
Default: (1, 1, 1, 1).
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
arch_settings = {
18: (BasicBlock3d, (2, 2, 2, 2)),
34: (BasicBlock3d, (3, 4, 6, 3)),
50: (Bottleneck3d, (3, 4, 6, 3)),
101: (Bottleneck3d, (3, 4, 23, 3)),
152: (Bottleneck3d, (3, 8, 36, 3))
}
def __init__(self,
depth,
pretrained,
stage_blocks=None,
pretrained2d=True,
in_channels=3,
num_stages=4,
base_channels=64,
out_indices=(3, ),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 1, 1, 1),
dilations=(1, 1, 1, 1),
conv1_kernel=(3, 7, 7),
conv1_stride_s=2,
conv1_stride_t=1,
pool1_stride_s=2,
pool1_stride_t=1,
with_pool2=True,
style='pytorch',
frozen_stages=-1,
inflate=(1, 1, 1, 1),
inflate_style='3x1x1',
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
zero_init_residual=True,
**kwargs):
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.stage_blocks = stage_blocks
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.spatial_strides = spatial_strides
self.temporal_strides = temporal_strides
self.dilations = dilations
assert len(spatial_strides) == len(temporal_strides) == len(
dilations) == num_stages
if self.stage_blocks is not None:
assert len(self.stage_blocks) == num_stages
self.conv1_kernel = conv1_kernel
self.conv1_stride_s = conv1_stride_s
self.conv1_stride_t = conv1_stride_t
self.pool1_stride_s = pool1_stride_s
self.pool1_stride_t = pool1_stride_t
self.with_pool2 = with_pool2
self.style = style
self.frozen_stages = frozen_stages
self.stage_inflations = _ntuple(num_stages)(inflate)
self.non_local_stages = _ntuple(num_stages)(non_local)
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
if self.stage_blocks is None:
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self.non_local_cfg = non_local_cfg
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
temporal_stride = temporal_strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
non_local=self.non_local_stages[i],
non_local_cfg=self.non_local_cfg,
inflate=self.stage_inflations[i],
inflate_style=self.inflate_style,
with_cp=with_cp,
**kwargs)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides in
residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer, otherwise
the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines
the kernel sizes and padding strides for conv1 and conv2
in each block. Default: '3x1x1'.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
downsample = None
if spatial_stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
downsample=downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
@staticmethod
def _inflate_conv_params(conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
kernel_t = conv3d.weight.data.shape[2]
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
@staticmethod
def _inflate_bn_params(bn3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a norm module from 2d to 3d.
Args:
bn3d (nn.Module): The destination bn3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding bn module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
for param_name, param in bn3d.named_parameters():
param_2d_name = f'{module_name_2d}.{param_name}'
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
for param_name, param in bn3d.named_buffers():
param_2d_name = f'{module_name_2d}.{param_name}'
# some buffers like num_batches_tracked may not exist in old
# checkpoints
if param_2d_name in state_dict_2d:
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
@staticmethod
def _inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
if original_conv_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_conv_name}')
else:
shape_2d = state_dict_r2d[original_conv_name +
'.weight'].shape
shape_3d = module.conv.weight.data.shape
if shape_2d != shape_3d[:2] + shape_3d[3:]:
logger.warning(f'Weight shape mismatch for '
f': {original_conv_name} : '
f'3d weight shape: {shape_3d}; '
f'2d weight shape: {shape_2d}. ')
else:
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
if original_bn_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name,
inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def inflate_weights(self, logger):
self._inflate_weights(self, logger)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=(self.conv1_stride_t, self.conv1_stride_s,
self.conv1_stride_s),
padding=tuple([(k - 1) // 2 for k in _triple(self.conv1_kernel)]),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.maxpool = nn.MaxPool3d(
kernel_size=(1, 3, 3),
stride=(self.pool1_stride_t, self.pool1_stride_s,
self.pool1_stride_s),
padding=(0, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1))
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
@staticmethod
def _init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch.
Args:
pretrained (str | None): The path of the pretrained weight. Will
override the original `pretrained` if set. The arg is added to
be compatible with mmdet. Default: None.
"""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
if self.pretrained2d:
# Inflate 2D model into 3D model.
self.inflate_weights(logger)
else:
# Directly load 3D model.
load_checkpoint(
self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck3d):
constant_init(m.conv3.bn, 0)
elif isinstance(m, BasicBlock3d):
constant_init(m.conv2.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def init_weights(self, pretrained=None):
self._init_weights(self, pretrained)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i == 0 and self.with_pool2:
x = self.pool2(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNet3dLayer(nn.Module):
"""ResNet 3d Layer.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
pretrained (str | None): Name of pretrained model.
pretrained2d (bool): Whether to load pretrained 2D model.
Default: True.
stage (int): The index of Resnet stage. Default: 3.
base_channels (int): Channel num of stem output features. Default: 64.
spatial_stride (int): The 1st res block's spatial stride. Default 2.
temporal_stride (int): The 1st res block's temporal stride. Default 1.
dilation (int): The dilation. Default: 1.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer. Default: 'pytorch'.
all_frozen (bool): Frozen all modules in the layer. Default: False.
inflate (int): Inflate Dims of each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the
kernel sizes and padding strides for conv1 and conv2 in each block.
Default: '3x1x1'.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
depth,
pretrained,
pretrained2d=True,
stage=3,
base_channels=64,
spatial_stride=2,
temporal_stride=1,
dilation=1,
style='pytorch',
all_frozen=False,
inflate=1,
inflate_style='3x1x1',
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.arch_settings = ResNet3d.arch_settings
assert depth in self.arch_settings
self.make_res_layer = ResNet3d.make_res_layer
self._inflate_conv_params = ResNet3d._inflate_conv_params
self._inflate_bn_params = ResNet3d._inflate_bn_params
self._inflate_weights = ResNet3d._inflate_weights
self._init_weights = ResNet3d._init_weights
self.depth = depth
self.pretrained = pretrained
self.pretrained2d = pretrained2d
self.stage = stage
# stage index is 0 based
assert 0 <= stage <= 3
self.base_channels = base_channels
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.all_frozen = all_frozen
self.stage_inflation = inflate
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
block, stage_blocks = self.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = self.make_res_layer(
block,
inplanes,
planes,
stage_block,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
inflate=self.stage_inflation,
inflate_style=self.inflate_style,
with_cp=with_cp,
**kwargs)
self.layer_name = f'layer{stage + 1}'
self.add_module(self.layer_name, res_layer)
def inflate_weights(self, logger):
self._inflate_weights(self, logger)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.all_frozen:
layer = getattr(self, self.layer_name)
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
self._init_weights(self, pretrained)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
res_layer = getattr(self, self.layer_name)
out = res_layer(x)
return out
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(ResNet3dLayer)
MMDET_BACKBONES.register_module()(ResNet3d)
| 40,219
| 38.277344
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet3d_slowfast.py
|
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init
from mmcv.runner import _load_checkpoint, load_checkpoint
from mmcv.utils import print_log
from ...utils import get_root_logger
from ..builder import BACKBONES
from .resnet3d import ResNet3d
try:
from mmdet.models import BACKBONES as MMDET_BACKBONES
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class ResNet3dPathway(ResNet3d):
"""A pathway of Slowfast based on ResNet3d.
Args:
*args (arguments): Arguments same as :class:``ResNet3d``.
lateral (bool): Determines whether to enable the lateral connection
from another pathway. Default: False.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
``alpha`` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to ``beta`` in the paper.
Default: 8.
fusion_kernel (int): The kernel size of lateral fusion.
Default: 5.
**kwargs (keyword arguments): Keywords arguments for ResNet3d.
"""
def __init__(self,
*args,
lateral=False,
speed_ratio=8,
channel_ratio=8,
fusion_kernel=5,
**kwargs):
self.lateral = lateral
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
self.fusion_kernel = fusion_kernel
super().__init__(*args, **kwargs)
self.inplanes = self.base_channels
if self.lateral:
self.conv1_lateral = ConvModule(
self.inplanes // self.channel_ratio,
# https://arxiv.org/abs/1812.03982, the
# third type of lateral connection has out_channel:
# 2 * \beta * C
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.lateral_connections = []
for i in range(len(self.stage_blocks)):
planes = self.base_channels * 2**i
self.inplanes = planes * self.block.expansion
if lateral and i != self.num_stages - 1:
# no lateral connection needed in final stage
lateral_name = f'layer{(i + 1)}_lateral'
setattr(
self, lateral_name,
ConvModule(
self.inplanes // self.channel_ratio,
self.inplanes * 2 // self.channel_ratio,
kernel_size=(fusion_kernel, 1, 1),
stride=(self.speed_ratio, 1, 1),
padding=((fusion_kernel - 1) // 2, 0, 0),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None))
self.lateral_connections.append(lateral_name)
def make_res_layer(self,
block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate=1,
inflate_style='3x1x1',
non_local=0,
non_local_cfg=dict(),
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
with_cp=False):
"""Build residual layer for Slowfast.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input
feature in each block.
planes (int): Number of channels for the output
feature in each block.
blocks (int): Number of residual blocks.
spatial_stride (int | Sequence[int]): Spatial strides
in residual and conv layers. Default: 1.
temporal_stride (int | Sequence[int]): Temporal strides in
residual and conv layers. Default: 1.
dilation (int): Spacing between kernel elements. Default: 1.
style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``,
the stride-two layer is the 3x3 conv layer,
otherwise the stride-two layer is the first 1x1 conv layer.
Default: ``pytorch``.
inflate (int | Sequence[int]): Determine whether to inflate
for each block. Default: 1.
inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines
the kernel sizes and padding strides for conv1 and
conv2 in each block. Default: ``3x1x1``.
non_local (int | Sequence[int]): Determine whether to apply
non-local module in the corresponding block of each stages.
Default: 0.
non_local_cfg (dict): Config for non-local module.
Default: ``dict()``.
conv_cfg (dict | None): Config for conv layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
inflate = inflate if not isinstance(inflate,
int) else (inflate, ) * blocks
non_local = non_local if not isinstance(
non_local, int) else (non_local, ) * blocks
assert len(inflate) == blocks and len(non_local) == blocks
if self.lateral:
lateral_inplanes = inplanes * 2 // self.channel_ratio
else:
lateral_inplanes = 0
if (spatial_stride != 1
or (inplanes + lateral_inplanes) != planes * block.expansion):
downsample = ConvModule(
inplanes + lateral_inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
else:
downsample = None
layers = []
layers.append(
block(
inplanes + lateral_inplanes,
planes,
spatial_stride,
temporal_stride,
dilation,
downsample,
style=style,
inflate=(inflate[0] == 1),
inflate_style=inflate_style,
non_local=(non_local[0] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
1,
dilation,
style=style,
inflate=(inflate[i] == 1),
inflate_style=inflate_style,
non_local=(non_local[i] == 1),
non_local_cfg=non_local_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def inflate_weights(self, logger):
"""Inflate the resnet2d parameters to resnet3d pathway.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d model,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart. For pathway the ``lateral_connection`` part should
not be inflated from 2d weights.
Args:
logger (logging.Logger): The logger used to print
debugging infomation.
"""
state_dict_r2d = _load_checkpoint(self.pretrained)
if 'state_dict' in state_dict_r2d:
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for name, module in self.named_modules():
if 'lateral' in name:
continue
if isinstance(module, ConvModule):
# we use a ConvModule to wrap conv+bn+relu layers, thus the
# name mapping is needed
if 'downsample' in name:
# layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0
original_conv_name = name + '.0'
# layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1
original_bn_name = name + '.1'
else:
# layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n}
original_conv_name = name
# layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n}
original_bn_name = name.replace('conv', 'bn')
if original_conv_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_conv_name}')
else:
self._inflate_conv_params(module.conv, state_dict_r2d,
original_conv_name,
inflated_param_names)
if original_bn_name + '.weight' not in state_dict_r2d:
logger.warning(f'Module not exist in the state_dict_r2d'
f': {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d,
original_bn_name,
inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(
state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded'
f': {remaining_names}')
def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
The differences of conv modules betweene 2d and 3d in Pathway
mainly lie in the inplanes due to lateral connections. To fit the
shapes of the lateral connection counterpart, it will expand
parameters by concatting conv2d parameters and extra zero paddings.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d model.
module_name_2d (str): The name of corresponding conv module in the
2d model.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
old_shape = conv2d_weight.shape
new_shape = conv3d.weight.data.shape
kernel_t = new_shape[2]
if new_shape[1] != old_shape[1]:
# Inplanes may be different due to lateral connections
new_channels = new_shape[1] - old_shape[1]
pad_shape = old_shape
pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:]
# Expand parameters by concat extra channels
conv2d_weight = torch.cat(
(conv2d_weight,
torch.zeros(pad_shape).type_as(conv2d_weight).to(
conv2d_weight.device)),
dim=1)
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(
conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
`self.frozen_stages`."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
if i != len(self.res_layers) and self.lateral:
# No fusion needed in the final stage
lateral_name = self.lateral_connections[i - 1]
conv_lateral = getattr(self, lateral_name)
conv_lateral.eval()
for param in conv_lateral.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if pretrained:
self.pretrained = pretrained
# Override the init_weights of i3d
super().init_weights()
for module_name in self.lateral_connections:
layer = getattr(self, module_name)
for m in layer.modules():
if isinstance(m, (nn.Conv3d, nn.Conv2d)):
kaiming_init(m)
pathway_cfg = {
'resnet3d': ResNet3dPathway,
# TODO: BNInceptionPathway
}
def build_pathway(cfg, *args, **kwargs):
"""Build pathway.
Args:
cfg (None or dict): cfg should contain:
- type (str): identify conv layer type.
Returns:
nn.Module: Created pathway.
"""
if not (isinstance(cfg, dict) and 'type' in cfg):
raise TypeError('cfg must be a dict containing the key "type"')
cfg_ = cfg.copy()
pathway_type = cfg_.pop('type')
if pathway_type not in pathway_cfg:
raise KeyError(f'Unrecognized pathway type {pathway_type}')
pathway_cls = pathway_cfg[pathway_type]
pathway = pathway_cls(*args, **kwargs, **cfg_)
return pathway
@BACKBONES.register_module()
class ResNet3dSlowFast(nn.Module):
"""Slowfast backbone.
This module is proposed in `SlowFast Networks for Video Recognition
<https://arxiv.org/abs/1812.03982>`_
Args:
pretrained (str): The file path to a pretrained model.
resample_rate (int): A large temporal stride ``resample_rate``
on input frames. The actual resample rate is calculated by
multipling the ``interval`` in ``SampleFrames`` in the
pipeline with ``resample_rate``, equivalent to the :math:`\\tau`
in the paper, i.e. it processes only one out of
``resample_rate * interval`` frames. Default: 8.
speed_ratio (int): Speed ratio indicating the ratio between time
dimension of the fast and slow pathway, corresponding to the
:math:`\\alpha` in the paper. Default: 8.
channel_ratio (int): Reduce the channel number of fast pathway
by ``channel_ratio``, corresponding to :math:`\\beta` in the paper.
Default: 8.
slow_pathway (dict): Configuration of slow branch, should contain
necessary arguments for building the specific type of pathway
and:
type (str): type of backbone the pathway bases on.
lateral (bool): determine whether to build lateral connection
for the pathway.Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=True, depth=50, pretrained=None,
conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1),
conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1))
fast_pathway (dict): Configuration of fast branch, similar to
`slow_pathway`. Default:
.. code-block:: Python
dict(type='ResNetPathway',
lateral=False, depth=50, pretrained=None, base_channels=8,
conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1)
"""
def __init__(self,
pretrained,
resample_rate=8,
speed_ratio=8,
channel_ratio=8,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1)):
super().__init__()
self.pretrained = pretrained
self.resample_rate = resample_rate
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
if slow_pathway['lateral']:
slow_pathway['speed_ratio'] = speed_ratio
slow_pathway['channel_ratio'] = channel_ratio
self.slow_path = build_pathway(slow_pathway)
self.fast_path = build_pathway(fast_pathway)
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if pretrained:
self.pretrained = pretrained
if isinstance(self.pretrained, str):
logger = get_root_logger()
msg = f'load model from: {self.pretrained}'
print_log(msg, logger=logger)
# Directly load 3D model.
load_checkpoint(self, self.pretrained, strict=True, logger=logger)
elif self.pretrained is None:
# Init two branch seperately.
self.fast_path.init_weights()
self.slow_path.init_weights()
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
tuple[torch.Tensor]: The feature of the input samples extracted
by the backbone.
"""
x_slow = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / self.resample_rate, 1.0, 1.0))
x_slow = self.slow_path.conv1(x_slow)
x_slow = self.slow_path.maxpool(x_slow)
x_fast = nn.functional.interpolate(
x,
mode='nearest',
scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0,
1.0))
x_fast = self.fast_path.conv1(x_fast)
x_fast = self.fast_path.maxpool(x_fast)
if self.slow_path.lateral:
x_fast_lateral = self.slow_path.conv1_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
for i, layer_name in enumerate(self.slow_path.res_layers):
res_layer = getattr(self.slow_path, layer_name)
x_slow = res_layer(x_slow)
res_layer_fast = getattr(self.fast_path, layer_name)
x_fast = res_layer_fast(x_fast)
if (i != len(self.slow_path.res_layers) - 1
and self.slow_path.lateral):
# No fusion needed in the final stage
lateral_name = self.slow_path.lateral_connections[i]
conv_lateral = getattr(self.slow_path, lateral_name)
x_fast_lateral = conv_lateral(x_fast)
x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)
out = (x_slow, x_fast)
return out
if mmdet_imported:
MMDET_BACKBONES.register_module()(ResNet3dSlowFast)
| 21,060
| 39.424184
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet2plus1d.py
|
from ..builder import BACKBONES
from .resnet3d import ResNet3d
@BACKBONES.register_module()
class ResNet2Plus1d(ResNet3d):
"""ResNet (2+1)d backbone.
This model is proposed in `A Closer Look at Spatiotemporal Convolutions for
Action Recognition <https://arxiv.org/abs/1711.11248>`_
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.pretrained2d is False
assert self.conv_cfg['type'] == 'Conv2plus1d'
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1(x)
x = self.maxpool(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
# no pool2 in R(2+1)d
x = res_layer(x)
return x
| 1,482
| 28.66
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/mobilenet_v2_tsm.py
|
from ..builder import BACKBONES
from .mobilenet_v2 import InvertedResidual, MobileNetV2
from .resnet_tsm import TemporalShift
@BACKBONES.register_module()
class MobileNetV2TSM(MobileNetV2):
"""MobileNetV2 backbone for TSM.
Args:
num_segments (int): Number of frame segments. Default: 8.
is_shift (bool): Whether to make temporal shift in reset layers.
Default: True.
shift_div (int): Number of div for shift. Default: 8.
**kwargs (keyword arguments, optional): Arguments for MobilNetV2.
"""
def __init__(self, num_segments=8, is_shift=True, shift_div=8, **kwargs):
super().__init__(**kwargs)
self.num_segments = num_segments
self.is_shift = is_shift
self.shift_div = shift_div
def make_temporal_shift(self):
"""Make temporal shift for some layers."""
for m in self.modules():
if isinstance(m, InvertedResidual) and \
len(m.conv) == 3 and m.use_res_connect:
m.conv[0] = TemporalShift(
m.conv[0],
num_segments=self.num_segments,
shift_div=self.shift_div,
)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super().init_weights()
if self.is_shift:
self.make_temporal_shift()
| 1,416
| 33.560976
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet_tsm.py
|
import torch
import torch.nn as nn
from mmcv.cnn import NonLocal3d
from torch.nn.modules.utils import _ntuple
from ..builder import BACKBONES
from .resnet import ResNet
class NL3DWrapper(nn.Module):
"""3D Non-local wrapper for ResNet50.
Wrap ResNet layers with 3D NonLocal modules.
Args:
block (nn.Module): Residual blocks to be built.
num_segments (int): Number of frame segments.
non_local_cfg (dict): Config for non-local layers. Default: ``dict()``.
"""
def __init__(self, block, num_segments, non_local_cfg=dict()):
super(NL3DWrapper, self).__init__()
self.block = block
self.non_local_cfg = non_local_cfg
self.non_local_block = NonLocal3d(self.block.conv3.norm.num_features,
**self.non_local_cfg)
self.num_segments = num_segments
def forward(self, x):
x = self.block(x)
n, c, h, w = x.size()
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2).contiguous()
x = self.non_local_block(x)
x = x.transpose(1, 2).contiguous().view(n, c, h, w)
return x
class TemporalShift(nn.Module):
"""Temporal shift module.
This module is proposed in
`TSM: Temporal Shift Module for Efficient Video Understanding
<https://arxiv.org/abs/1811.08383>`_
Args:
net (nn.module): Module to make temporal shift.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of divisions for shift. Default: 8.
"""
def __init__(self, net, num_segments=3, shift_div=8):
super().__init__()
self.net = net
self.num_segments = num_segments
self.shift_div = shift_div
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
x = self.shift(x, self.num_segments, shift_div=self.shift_div)
return self.net(x)
@staticmethod
def shift(x, num_segments, shift_div=3):
"""Perform temporal shift operation on the feature.
Args:
x (torch.Tensor): The input feature to be shifted.
num_segments (int): Number of frame segments.
shift_div (int): Number of divisions for shift. Default: 3.
Returns:
torch.Tensor: The shifted feature.
"""
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, num_segments, C, H*W]
# can't use 5 dimensional array on PPL2D backend for caffe
x = x.view(-1, num_segments, c, h * w)
# get shift fold
fold = c // shift_div
# split c channel into three parts:
# left_split, mid_split, right_split
left_split = x[:, :, :fold, :]
mid_split = x[:, :, fold:2 * fold, :]
right_split = x[:, :, 2 * fold:, :]
# can't use torch.zeros(*A.shape) or torch.zeros_like(A)
# because array on caffe inference must be got by computing
# shift left on num_segments channel in `left_split`
zeros = left_split - left_split
blank = zeros[:, :1, :, :]
left_split = left_split[:, 1:, :, :]
left_split = torch.cat((left_split, blank), 1)
# shift right on num_segments channel in `mid_split`
zeros = mid_split - mid_split
blank = zeros[:, :1, :, :]
mid_split = mid_split[:, :-1, :, :]
mid_split = torch.cat((blank, mid_split), 1)
# right_split: no shift
# concatenate
out = torch.cat((left_split, mid_split, right_split), 2)
# [N, C, H, W]
# restore the original dimension
return out.view(n, c, h, w)
@BACKBONES.register_module()
class ResNetTSM(ResNet):
"""ResNet backbone for TSM.
Args:
num_segments (int): Number of frame segments. Default: 8.
is_shift (bool): Whether to make temporal shift in reset layers.
Default: True.
non_local (Sequence[int]): Determine whether to apply non-local module
in the corresponding block of each stages. Default: (0, 0, 0, 0).
non_local_cfg (dict): Config for non-local module. Default: ``dict()``.
shift_div (int): Number of div for shift. Default: 8.
shift_place (str): Places in resnet layers for shift, which is chosen
from ['block', 'blockres'].
If set to 'block', it will apply temporal shift to all child blocks
in each resnet layer.
If set to 'blockres', it will apply temporal shift to each `conv1`
layer of all child blocks in each resnet layer.
Default: 'blockres'.
temporal_pool (bool): Whether to add temporal pooling. Default: False.
**kwargs (keyword arguments, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_shift=True,
non_local=(0, 0, 0, 0),
non_local_cfg=dict(),
shift_div=8,
shift_place='blockres',
temporal_pool=False,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_shift = is_shift
self.shift_div = shift_div
self.shift_place = shift_place
self.temporal_pool = temporal_pool
self.non_local = non_local
self.non_local_stages = _ntuple(self.num_stages)(non_local)
self.non_local_cfg = non_local_cfg
def make_temporal_shift(self):
"""Make temporal shift for some layers."""
if self.temporal_pool:
num_segment_list = [
self.num_segments, self.num_segments // 2,
self.num_segments // 2, self.num_segments // 2
]
else:
num_segment_list = [self.num_segments] * 4
if num_segment_list[-1] <= 0:
raise ValueError('num_segment_list[-1] must be positive')
if self.shift_place == 'block':
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
blocks[i] = TemporalShift(
b, num_segments=num_segments, shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
elif 'blockres' in self.shift_place:
n_round = 1
if len(list(self.layer3.children())) >= 23:
n_round = 2
def make_block_temporal(stage, num_segments):
"""Make temporal shift on some blocks.
Args:
stage (nn.Module): Model layers to be shifted.
num_segments (int): Number of frame segments.
Returns:
nn.Module: The shifted blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
blocks[i].conv1.conv = TemporalShift(
b.conv1.conv,
num_segments=num_segments,
shift_div=self.shift_div)
return nn.Sequential(*blocks)
self.layer1 = make_block_temporal(self.layer1, num_segment_list[0])
self.layer2 = make_block_temporal(self.layer2, num_segment_list[1])
self.layer3 = make_block_temporal(self.layer3, num_segment_list[2])
self.layer4 = make_block_temporal(self.layer4, num_segment_list[3])
else:
raise NotImplementedError
def make_temporal_pool(self):
"""Make temporal pooling between layer1 and layer2, using a 3D max
pooling layer."""
class TemporalPool(nn.Module):
"""Temporal pool module.
Wrap layer2 in ResNet50 with a 3D max pooling layer.
Args:
net (nn.Module): Module to make temporal pool.
num_segments (int): Number of frame segments.
"""
def __init__(self, net, num_segments):
super().__init__()
self.net = net
self.num_segments = num_segments
self.max_pool3d = nn.MaxPool3d(
kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
def forward(self, x):
# [N, C, H, W]
n, c, h, w = x.size()
# [N // num_segments, C, num_segments, H, W]
x = x.view(n // self.num_segments, self.num_segments, c, h,
w).transpose(1, 2)
# [N // num_segmnets, C, num_segments // 2, H, W]
x = self.max_pool3d(x)
# [N // 2, C, H, W]
x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w)
return self.net(x)
self.layer2 = TemporalPool(self.layer2, self.num_segments)
def make_non_local(self):
# This part is for ResNet50
for i in range(self.num_stages):
non_local_stage = self.non_local_stages[i]
if sum(non_local_stage) == 0:
continue
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
for idx, non_local in enumerate(non_local_stage):
if non_local:
res_layer[idx] = NL3DWrapper(res_layer[idx],
self.num_segments,
self.non_local_cfg)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super().init_weights()
if self.is_shift:
self.make_temporal_shift()
if len(self.non_local_cfg) != 0:
self.make_non_local()
if self.temporal_pool:
self.make_temporal_pool()
| 10,742
| 35.416949
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/resnet_tin.py
|
import torch
import torch.nn as nn
from mmaction.utils import import_module_error_func
from ..builder import BACKBONES
from .resnet_tsm import ResNetTSM
try:
from mmcv.ops import tin_shift
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmcv-full')
def tin_shift(*args, **kwargs):
pass
def linear_sampler(data, offset):
"""Differentiable Temporal-wise Frame Sampling, which is essentially a
linear interpolation process.
It gets the feature map which has been split into several groups
and shift them by different offsets according to their groups.
Then compute the weighted sum along with the temporal dimension.
Args:
data (torch.Tensor): Split data for certain group in shape
[N, num_segments, C, H, W].
offset (torch.Tensor): Data offsets for this group data in shape
[N, num_segments].
"""
# [N, num_segments, C, H, W]
n, t, c, h, w = data.shape
# offset0, offset1: [N, num_segments]
offset0 = torch.floor(offset).int()
offset1 = offset0 + 1
# data, data0, data1: [N, num_segments, C, H * W]
data = data.view(n, t, c, h * w).contiguous()
data0 = tin_shift(data, offset0)
data1 = tin_shift(data, offset1)
# weight0, weight1: [N, num_segments]
weight0 = 1 - (offset - offset0.float())
weight1 = 1 - weight0
# weight0, weight1:
# [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C]
group_size = offset.shape[1]
weight0 = weight0[:, :, None].repeat(1, 1, c // group_size)
weight0 = weight0.view(weight0.size(0), -1)
weight1 = weight1[:, :, None].repeat(1, 1, c // group_size)
weight1 = weight1.view(weight1.size(0), -1)
# weight0, weight1: [N, C] -> [N, 1, C, 1]
weight0 = weight0[:, None, :, None]
weight1 = weight1[:, None, :, None]
# output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W]
output = weight0 * data0 + weight1 * data1
output = output.view(n, t, c, h, w)
return output
class CombineNet(nn.Module):
"""Combine Net.
It combines Temporal interlace module with some part of ResNet layer.
Args:
net1 (nn.module): Temporal interlace module.
net2 (nn.module): Some part of ResNet layer.
"""
def __init__(self, net1, net2):
super().__init__()
self.net1 = net1
self.net2 = net2
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# input shape: [num_batches * num_segments, C, H, W]
# output x shape: [num_batches * num_segments, C, H, W]
x = self.net1(x)
# [num_batches * num_segments, C, H, W]
x = self.net2(x)
return x
class WeightNet(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# we set the initial bias of the convolution
# layer to 0, and the final initial output will be 1.0
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate weight
# [N, C, T]
n, _, t = x.shape
# [N, groups, T]
x = self.conv(x)
x = x.view(n, self.groups, t)
# [N, T, groups]
x = x.permute(0, 2, 1)
# scale the output to range (0, 2)
x = 2 * self.sigmoid(x)
# [N, T, groups]
return x
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
# hard code ``kernel_size`` and ``padding`` according to original repo.
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
# The bias of the last fc layer is initialized to
# make the post-sigmoid output start from 1
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# calculate offset
# [N, C, T]
n, _, t = x.shape
# [N, 1, T]
x = self.conv(x)
# [N, T]
x = x.view(n, t)
# [N, T]
x = self.relu(self.fc1(x))
# [N, groups]
x = self.fc2(x)
# [N, 1, groups]
x = x.view(n, 1, -1)
# to make sure the output is in (-t/2, t/2)
# where t = num_segments = 8
x = 4 * (self.sigmoid(x) - 0.5)
# [N, 1, groups]
return x
class TemporalInterlace(nn.Module):
"""Temporal interlace module.
This module is proposed in `Temporal Interlacing Network
<https://arxiv.org/abs/2001.06499>`_
Args:
in_channels (int): Channel num of input features.
num_segments (int): Number of frame segments. Default: 3.
shift_div (int): Number of division parts for shift. Default: 1.
"""
def __init__(self, in_channels, num_segments=3, shift_div=1):
super().__init__()
self.num_segments = num_segments
self.shift_div = shift_div
self.in_channels = in_channels
# hard code ``deform_groups`` according to original repo.
self.deform_groups = 2
self.offset_net = OffsetNet(in_channels // shift_div,
self.deform_groups, num_segments)
self.weight_net = WeightNet(in_channels // shift_div,
self.deform_groups)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
# x: [N, C, H, W],
# where N = num_batches x num_segments, C = shift_div * num_folds
n, c, h, w = x.size()
num_batches = n // self.num_segments
num_folds = c // self.shift_div
# x_out: [num_batches x num_segments, C, H, W]
x_out = torch.zeros((n, c, h, w), device=x.device)
# x_descriptor: [num_batches, num_segments, num_folds, H, W]
x_descriptor = x[:, :num_folds, :, :].view(num_batches,
self.num_segments,
num_folds, h, w)
# x should only obtain information on temporal and channel dimensions
# x_pooled: [num_batches, num_segments, num_folds, W]
x_pooled = torch.mean(x_descriptor, 3)
# x_pooled: [num_batches, num_segments, num_folds]
x_pooled = torch.mean(x_pooled, 3)
# x_pooled: [num_batches, num_folds, num_segments]
x_pooled = x_pooled.permute(0, 2, 1).contiguous()
# Calculate weight and bias, here groups = 2
# x_offset: [num_batches, groups]
x_offset = self.offset_net(x_pooled).view(num_batches, -1)
# x_weight: [num_batches, num_segments, groups]
x_weight = self.weight_net(x_pooled)
# x_offset: [num_batches, 2 * groups]
x_offset = torch.cat([x_offset, -x_offset], 1)
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = linear_sampler(x_descriptor, x_offset)
# x_weight: [num_batches, num_segments, groups, 1]
x_weight = x_weight[:, :, :, None]
# x_weight:
# [num_batches, num_segments, groups * 2, c // self.shift_div // 4]
x_weight = x_weight.repeat(1, 1, 2, num_folds // 2 // 2)
# x_weight:
# [num_batches, num_segments, c // self.shift_div = num_folds]
x_weight = x_weight.view(x_weight.size(0), x_weight.size(1), -1)
# x_weight: [num_batches, num_segments, num_folds, 1, 1]
x_weight = x_weight[:, :, :, None, None]
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift * x_weight
# x_shift: [num_batches, num_segments, num_folds, H, W]
x_shift = x_shift.contiguous().view(n, num_folds, h, w)
# x_out: [num_batches x num_segments, C, H, W]
x_out[:, :num_folds, :] = x_shift
x_out[:, num_folds:, :] = x[:, num_folds:, :]
return x_out
@BACKBONES.register_module()
class ResNetTIN(ResNetTSM):
"""ResNet backbone for TIN.
Args:
depth (int): Depth of ResNet, from {18, 34, 50, 101, 152}.
num_segments (int): Number of frame segments. Default: 8.
is_tin (bool): Whether to apply temporal interlace. Default: True.
shift_div (int): Number of division parts for shift. Default: 4.
kwargs (dict, optional): Arguments for ResNet.
"""
def __init__(self,
depth,
num_segments=8,
is_tin=True,
shift_div=4,
**kwargs):
super().__init__(depth, **kwargs)
self.num_segments = num_segments
self.is_tin = is_tin
self.shift_div = shift_div
def make_temporal_interlace(self):
"""Make temporal interlace for some layers."""
num_segment_list = [self.num_segments] * 4
assert num_segment_list[-1] > 0
n_round = 1
if len(list(self.layer3.children())) >= 23:
print(f'=> Using n_round {n_round} to insert temporal shift.')
def make_block_interlace(stage, num_segments, shift_div):
"""Apply Deformable shift for a ResNet layer module.
Args:
stage (nn.module): A ResNet layer to be deformed.
num_segments (int): Number of frame segments.
shift_div (int): Number of division parts for shift.
Returns:
nn.Sequential: A Sequential container consisted of
deformed Interlace blocks.
"""
blocks = list(stage.children())
for i, b in enumerate(blocks):
if i % n_round == 0:
tds = TemporalInterlace(
b.conv1.in_channels,
num_segments=num_segments,
shift_div=shift_div)
blocks[i].conv1.conv = CombineNet(tds,
blocks[i].conv1.conv)
return nn.Sequential(*blocks)
self.layer1 = make_block_interlace(self.layer1, num_segment_list[0],
self.shift_div)
self.layer2 = make_block_interlace(self.layer2, num_segment_list[1],
self.shift_div)
self.layer3 = make_block_interlace(self.layer3, num_segment_list[2],
self.shift_div)
self.layer4 = make_block_interlace(self.layer4, num_segment_list[3],
self.shift_div)
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
super(ResNetTSM, self).init_weights()
if self.is_tin:
self.make_temporal_interlace()
if len(self.non_local_cfg) != 0:
self.make_non_local()
| 13,132
| 33.651715
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/__init__.py
|
from .c3d import C3D
from .mobilenet_v2 import MobileNetV2
from .mobilenet_v2_tsm import MobileNetV2TSM
from .resnet import ResNet
from .resnet2plus1d import ResNet2Plus1d
from .resnet3d import ResNet3d, ResNet3dLayer
from .resnet3d_csn import ResNet3dCSN
from .resnet3d_slowfast import ResNet3dSlowFast
from .resnet3d_slowonly import ResNet3dSlowOnly
from .resnet_audio import ResNetAudio
from .resnet_tin import ResNetTIN
from .resnet_tsm import ResNetTSM
from .tanet import TANet
from .x3d import X3D
from .swin_transformer import SwinTransformer3D
__all__ = [
'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d',
'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D',
'ResNetAudio', 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'SwinTransformer3D'
]
| 807
| 35.727273
| 97
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/tanet.py
|
from copy import deepcopy
import torch.nn as nn
from torch.utils import checkpoint as cp
from ..builder import BACKBONES
from ..common import TAM
from .resnet import Bottleneck, ResNet
class TABlock(nn.Module):
"""Temporal Adaptive Block (TA-Block) for TANet.
This block is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
The temporal adaptive module (TAM) is embedded into ResNet-Block
after the first Conv2D, which turns the vanilla ResNet-Block
into TA-Block.
Args:
block (nn.Module): Residual blocks to be substituted.
num_segments (int): Number of frame segments.
tam_cfg (dict): Config for temporal adaptive module (TAM).
Default: dict().
"""
def __init__(self, block, num_segments, tam_cfg=dict()):
super().__init__()
self.tam_cfg = deepcopy(tam_cfg)
self.block = block
self.num_segments = num_segments
self.tam = TAM(
in_channels=block.conv1.out_channels,
num_segments=num_segments,
**self.tam_cfg)
if not isinstance(self.block, Bottleneck):
raise NotImplementedError('TA-Blocks have not been fully '
'implemented except the pattern based '
'on Bottleneck block.')
def forward(self, x):
assert isinstance(self.block, Bottleneck)
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.block.conv1(x)
out = self.tam(out)
out = self.block.conv2(out)
out = self.block.conv3(out)
if self.block.downsample is not None:
identity = self.block.downsample(x)
out = out + identity
return out
if self.block.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.block.relu(out)
return out
@BACKBONES.register_module()
class TANet(ResNet):
"""Temporal Adaptive Network (TANet) backbone.
This backbone is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO
RECOGNITION <https://arxiv.org/pdf/2005.06803>`_
Embedding the temporal adaptive module (TAM) into ResNet to
instantiate TANet.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_segments (int): Number of frame segments.
tam_cfg (dict | None): Config for temporal adaptive module (TAM).
Default: dict().
**kwargs (keyword arguments, optional): Arguments for ResNet except
```depth```.
"""
def __init__(self, depth, num_segments, tam_cfg=dict(), **kwargs):
super().__init__(depth, **kwargs)
assert num_segments >= 3
self.num_segments = num_segments
self.tam_cfg = deepcopy(tam_cfg)
def init_weights(self):
super().init_weights()
self.make_tam_modeling()
def make_tam_modeling(self):
"""Replace ResNet-Block with TA-Block."""
def make_tam_block(stage, num_segments, tam_cfg=dict()):
blocks = list(stage.children())
for i, block in enumerate(blocks):
blocks[i] = TABlock(block, num_segments, deepcopy(tam_cfg))
return nn.Sequential(*blocks)
for i in range(self.num_stages):
layer_name = f'layer{i + 1}'
res_layer = getattr(self, layer_name)
setattr(self, layer_name,
make_tam_block(res_layer, self.num_segments, self.tam_cfg))
| 3,690
| 31.095652
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/backbones/x3d.py
|
import math
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, Swish, build_activation_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from ...utils import get_root_logger
from ..builder import BACKBONES
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(
channels, self.bottleneck, kernel_size=1, padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(
self.bottleneck, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width,
int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BlockX3D(nn.Module):
"""BlockX3D 3d building block for X3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
outplanes (int): Number of channels produced by final the conv3d layer.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
inplanes,
planes,
outplanes,
spatial_stride=1,
downsample=None,
se_ratio=None,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.outplanes = outplanes
self.spatial_stride = spatial_stride
self.downsample = downsample
self.se_ratio = se_ratio
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.act_cfg_swish = dict(type='Swish')
self.with_cp = with_cp
self.conv1 = ConvModule(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# Here we use the channel-wise conv
self.conv2 = ConvModule(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=(1, self.spatial_stride, self.spatial_stride),
padding=1,
groups=planes,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.swish = Swish()
self.conv3 = ConvModule(
in_channels=planes,
out_channels=outplanes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
if self.se_ratio is not None:
self.se_module = SEModule(planes, self.se_ratio)
self.relu = build_activation_layer(self.act_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.se_ratio is not None:
out = self.se_module(out)
out = self.swish(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# We do not support initialize with 2D pretrain weight for X3D
@BACKBONES.register_module()
class X3D(nn.Module):
"""X3D backbone. https://arxiv.org/pdf/2004.04730.pdf.
Args:
gamma_w (float): Global channel width expansion factor. Default: 1.
gamma_b (float): Bottleneck channel width expansion factor. Default: 1.
gamma_d (float): Network depth expansion factor. Default: 1.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
frozen_stages (int): Stages to be frozen (all param fixed). If set to
-1, it means not freezing any parameters. Default: -1.
se_style (str): The style of inserting SE modules into BlockX3D, 'half'
denotes insert into half of the blocks, while 'all' denotes insert
into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: 1 / 16.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
gamma_w=1.0,
gamma_b=1.0,
gamma_d=1.0,
pretrained=None,
in_channels=3,
num_stages=4,
spatial_strides=(2, 2, 2, 2),
frozen_stages=-1,
se_style='half',
se_ratio=1 / 16,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.gamma_w = gamma_w
self.gamma_b = gamma_b
self.gamma_d = gamma_d
self.pretrained = pretrained
self.in_channels = in_channels
# Hard coded, can be changed by gamma_w
self.base_channels = 24
self.stage_blocks = [1, 2, 5, 3]
# apply parameters gamma_w and gamma_d
self.base_channels = self._round_width(self.base_channels,
self.gamma_w)
self.stage_blocks = [
self._round_repeats(x, self.gamma_d) for x in self.stage_blocks
]
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.spatial_strides = spatial_strides
assert len(spatial_strides) == num_stages
self.frozen_stages = frozen_stages
self.se_style = se_style
assert self.se_style in ['all', 'half']
self.se_ratio = se_ratio
assert (self.se_ratio is None) or (self.se_ratio > 0)
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block = BlockX3D
self.stage_blocks = self.stage_blocks[:num_stages]
self.layer_inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
inplanes = self.base_channels * 2**i
planes = int(inplanes * self.gamma_b)
res_layer = self.make_res_layer(
self.block,
self.layer_inplanes,
inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
se_style=self.se_style,
se_ratio=self.se_ratio,
use_swish=self.use_swish,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
with_cp=with_cp,
**kwargs)
self.layer_inplanes = inplanes
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.base_channels * 2**(len(self.stage_blocks) - 1)
self.conv5 = ConvModule(
self.feat_dim,
int(self.feat_dim * self.gamma_b),
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.feat_dim = int(self.feat_dim * self.gamma_b)
@staticmethod
def _round_width(width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
@staticmethod
def _round_repeats(repeats, multiplier):
"""Round number of layers based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
# the module is parameterized with gamma_b
# no temporal_stride
def make_res_layer(self,
block,
layer_inplanes,
inplanes,
planes,
blocks,
spatial_stride=1,
se_style='half',
se_ratio=None,
use_swish=True,
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if spatial_stride != 1 or layer_inplanes != inplanes:
downsample = ConvModule(
layer_inplanes,
inplanes,
kernel_size=1,
stride=(1, spatial_stride, spatial_stride),
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
use_se = [False] * blocks
if self.se_style == 'all':
use_se = [True] * blocks
elif self.se_style == 'half':
use_se = [i % 2 == 0 for i in range(blocks)]
else:
raise NotImplementedError
layers = []
layers.append(
block(
layer_inplanes,
planes,
inplanes,
spatial_stride=spatial_stride,
downsample=downsample,
se_ratio=se_ratio if use_se[0] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
inplanes,
spatial_stride=1,
se_ratio=se_ratio if use_se[i] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1_s = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.conv1_t = ConvModule(
self.base_channels,
self.base_channels,
kernel_size=(5, 1, 1),
stride=(1, 1, 1),
padding=(2, 0, 0),
groups=self.base_channels,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1_s.eval()
self.conv1_t.eval()
for param in self.conv1_s.parameters():
param.requires_grad = False
for param in self.conv1_t.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, BlockX3D):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1_s(x)
x = self.conv1_t(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
x = self.conv5(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 19,116
| 35.482824
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/base.py
|
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from ...core import top_k_accuracy
from ..builder import build_loss
class AvgConsensus(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True)
class BaseHead(nn.Module, metaclass=ABCMeta):
"""Base class for head.
All Head should subclass it.
All subclass should overwrite:
- Methods:``init_weights``, initializing weights in some modules.
- Methods:``forward``, supporting to forward both for training and testing.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss', loss_weight=1.0).
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0),
multi_class=False,
label_smooth_eps=0.0):
super().__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.loss_type = loss_cls['type']
self.loss_cls = build_loss(loss_cls)
self.multi_class = multi_class
self.label_smooth_eps = label_smooth_eps
@abstractmethod
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
@abstractmethod
def forward(self, x):
"""Defines the computation performed at every call."""
def loss(self, cls_score, labels, **kwargs):
"""Calculate the loss given output ``cls_score``, target ``labels``.
Args:
cls_score (torch.Tensor): The output of the model.
labels (torch.Tensor): The target output of the model.
Returns:
dict: A dict containing field 'loss_cls'(mandatory)
and 'top1_acc', 'top5_acc'(optional).
"""
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_score.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
labels = labels.unsqueeze(0)
if not self.multi_class and cls_score.size() != labels.size():
top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(),
labels.detach().cpu().numpy(), (1, 5))
losses['top1_acc'] = torch.tensor(
top_k_acc[0], device=cls_score.device)
losses['top5_acc'] = torch.tensor(
top_k_acc[1], device=cls_score.device)
elif self.multi_class and self.label_smooth_eps != 0:
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_score, labels, **kwargs)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses
| 3,854
| 34.045455
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/fbo_head.py
|
import copy
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from mmcv.utils import _BatchNorm
from mmaction.models.common import LFB
from mmaction.utils import get_root_logger
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class NonLocalLayer(nn.Module):
"""Non-local layer used in `FBONonLocal` is a variation of the vanilla non-
local block.
Args:
st_feat_channels (int): Channels of short-term features.
lt_feat_channels (int): Channels of long-term features.
latent_channels (int): Channels of latent features.
use_scale (bool): Whether to scale pairwise_weight by
`1/sqrt(latent_channels)`. Default: True.
pre_activate (bool): Whether to use the activation function before
upsampling. Default: False.
conv_cfg (Dict | None): The config dict for convolution layers. If
not specified, it will use `nn.Conv2d` for convolution layers.
Default: None.
norm_cfg (Dict | None): he config dict for normalization layers.
Default: None.
dropout_ratio (float, optional): Probability of dropout layer.
Default: 0.2.
zero_init_out_conv (bool): Whether to use zero initialization for
out_conv. Default: False.
"""
def __init__(self,
st_feat_channels,
lt_feat_channels,
latent_channels,
num_st_feat,
num_lt_feat,
use_scale=True,
pre_activate=True,
pre_activate_with_ln=True,
conv_cfg=None,
norm_cfg=None,
dropout_ratio=0.2,
zero_init_out_conv=False):
super().__init__()
if conv_cfg is None:
conv_cfg = dict(type='Conv3d')
self.st_feat_channels = st_feat_channels
self.lt_feat_channels = lt_feat_channels
self.latent_channels = latent_channels
self.num_st_feat = num_st_feat
self.num_lt_feat = num_lt_feat
self.use_scale = use_scale
self.pre_activate = pre_activate
self.pre_activate_with_ln = pre_activate_with_ln
self.dropout_ratio = dropout_ratio
self.zero_init_out_conv = zero_init_out_conv
self.st_feat_conv = ConvModule(
self.st_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.lt_feat_conv = ConvModule(
self.lt_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.global_conv = ConvModule(
self.lt_feat_channels,
self.latent_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
if pre_activate:
self.ln = nn.LayerNorm([latent_channels, num_st_feat, 1, 1])
else:
self.ln = nn.LayerNorm([st_feat_channels, num_st_feat, 1, 1])
self.relu = nn.ReLU()
self.out_conv = ConvModule(
self.latent_channels,
self.st_feat_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
if self.dropout_ratio > 0:
self.dropout = nn.Dropout(self.dropout_ratio)
def init_weights(self, pretrained=None):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(pretrained, str):
logger = get_root_logger()
logger.info(f'load model from: {pretrained}')
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_out_conv:
constant_init(self.out_conv, 0, bias=0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, st_feat, lt_feat):
n, c = st_feat.size(0), self.latent_channels
num_st_feat, num_lt_feat = self.num_st_feat, self.num_lt_feat
theta = self.st_feat_conv(st_feat)
theta = theta.view(n, c, num_st_feat)
phi = self.lt_feat_conv(lt_feat)
phi = phi.view(n, c, num_lt_feat)
g = self.global_conv(lt_feat)
g = g.view(n, c, num_lt_feat)
# (n, num_st_feat, c), (n, c, num_lt_feat)
# -> (n, num_st_feat, num_lt_feat)
theta_phi = torch.matmul(theta.permute(0, 2, 1), phi)
if self.use_scale:
theta_phi /= c**0.5
p = theta_phi.softmax(dim=-1)
# (n, c, num_lt_feat), (n, num_lt_feat, num_st_feat)
# -> (n, c, num_st_feat, 1, 1)
out = torch.matmul(g, p.permute(0, 2, 1)).view(n, c, num_st_feat, 1, 1)
# If need to activate it before out_conv, use relu here, otherwise
# use relu outside the non local layer.
if self.pre_activate:
if self.pre_activate_with_ln:
out = self.ln(out)
out = self.relu(out)
out = self.out_conv(out)
if not self.pre_activate:
out = self.ln(out)
if self.dropout_ratio > 0:
out = self.dropout(out)
return out
class FBONonLocal(nn.Module):
"""Non local feature bank operator.
Args:
st_feat_channels (int): Channels of short-term features.
lt_feat_channels (int): Channels of long-term features.
latent_channels (int): Channles of latent features.
num_st_feat (int): Number of short-term roi features.
num_lt_feat (int): Number of long-term roi features.
num_non_local_layers (int): Number of non-local layers, which is
at least 1. Default: 2.
st_feat_dropout_ratio (float): Probability of dropout layer for
short-term features. Default: 0.2.
lt_feat_dropout_ratio (float): Probability of dropout layer for
long-term features. Default: 0.2.
pre_activate (bool): Whether to use the activation function before
upsampling in non local layers. Default: True.
zero_init_out_conv (bool): Whether to use zero initialization for
out_conv in NonLocalLayer. Default: False.
"""
def __init__(self,
st_feat_channels,
lt_feat_channels,
latent_channels,
num_st_feat,
num_lt_feat,
num_non_local_layers=2,
st_feat_dropout_ratio=0.2,
lt_feat_dropout_ratio=0.2,
pre_activate=True,
zero_init_out_conv=False):
super().__init__()
assert num_non_local_layers >= 1, (
'At least one non_local_layer is needed.')
self.st_feat_channels = st_feat_channels
self.lt_feat_channels = lt_feat_channels
self.latent_channels = latent_channels
self.num_st_feat = num_st_feat
self.num_lt_feat = num_lt_feat
self.num_non_local_layers = num_non_local_layers
self.st_feat_dropout_ratio = st_feat_dropout_ratio
self.lt_feat_dropout_ratio = lt_feat_dropout_ratio
self.pre_activate = pre_activate
self.zero_init_out_conv = zero_init_out_conv
self.st_feat_conv = nn.Conv3d(
st_feat_channels, latent_channels, kernel_size=1)
self.lt_feat_conv = nn.Conv3d(
lt_feat_channels, latent_channels, kernel_size=1)
if self.st_feat_dropout_ratio > 0:
self.st_feat_dropout = nn.Dropout(self.st_feat_dropout_ratio)
if self.lt_feat_dropout_ratio > 0:
self.lt_feat_dropout = nn.Dropout(self.lt_feat_dropout_ratio)
if not self.pre_activate:
self.relu = nn.ReLU()
self.non_local_layers = []
for idx in range(self.num_non_local_layers):
layer_name = f'non_local_layer_{idx + 1}'
self.add_module(
layer_name,
NonLocalLayer(
latent_channels,
latent_channels,
latent_channels,
num_st_feat,
num_lt_feat,
pre_activate=self.pre_activate,
zero_init_out_conv=self.zero_init_out_conv))
self.non_local_layers.append(layer_name)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
kaiming_init(self.st_feat_conv)
kaiming_init(self.lt_feat_conv)
for layer_name in self.non_local_layers:
non_local_layer = getattr(self, layer_name)
non_local_layer.init_weights(pretrained=pretrained)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, st_feat, lt_feat):
# prepare st_feat
st_feat = self.st_feat_conv(st_feat)
if self.st_feat_dropout_ratio > 0:
st_feat = self.st_feat_dropout(st_feat)
# prepare lt_feat
lt_feat = self.lt_feat_conv(lt_feat)
if self.lt_feat_dropout_ratio > 0:
lt_feat = self.lt_feat_dropout(lt_feat)
# fuse short-term and long-term features in NonLocal Layer
for layer_name in self.non_local_layers:
identity = st_feat
non_local_layer = getattr(self, layer_name)
nl_out = non_local_layer(st_feat, lt_feat)
nl_out = identity + nl_out
if not self.pre_activate:
nl_out = self.relu(nl_out)
st_feat = nl_out
return nl_out
class FBOAvg(nn.Module):
"""Avg pool feature bank operator."""
def __init__(self):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, None, None))
def init_weights(self, pretrained=None):
# FBOAvg has no parameters to be initalized.
pass
def forward(self, st_feat, lt_feat):
out = self.avg_pool(lt_feat)
return out
class FBOMax(nn.Module):
"""Max pool feature bank operator."""
def __init__(self):
super().__init__()
self.max_pool = nn.AdaptiveMaxPool3d((1, None, None))
def init_weights(self, pretrained=None):
# FBOMax has no parameters to be initialized.
pass
def forward(self, st_feat, lt_feat):
out = self.max_pool(lt_feat)
return out
class FBOHead(nn.Module):
"""Feature Bank Operator Head.
Add feature bank operator for the spatiotemporal detection model to fuse
short-term features and long-term features.
Args:
lfb_cfg (Dict): The config dict for LFB which is used to sample
long-term features.
fbo_cfg (Dict): The config dict for feature bank operator (FBO). The
type of fbo is also in the config dict and supported fbo type is
`fbo_dict`.
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
"""
fbo_dict = {'non_local': FBONonLocal, 'avg': FBOAvg, 'max': FBOMax}
def __init__(self,
lfb_cfg,
fbo_cfg,
temporal_pool_type='avg',
spatial_pool_type='max'):
super().__init__()
fbo_type = fbo_cfg.pop('type', 'non_local')
assert fbo_type in FBOHead.fbo_dict
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.lfb_cfg = copy.deepcopy(lfb_cfg)
self.fbo_cfg = copy.deepcopy(fbo_cfg)
self.lfb = LFB(**self.lfb_cfg)
self.fbo = self.fbo_dict[fbo_type](**self.fbo_cfg)
# Pool by default
if temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
def init_weights(self, pretrained=None):
"""Initialize the weights in the module.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
self.fbo.init_weights(pretrained=pretrained)
def sample_lfb(self, rois, img_metas):
"""Sample long-term features for each ROI feature."""
inds = rois[:, 0].type(torch.int64)
lt_feat_list = []
for ind in inds:
lt_feat_list.append(self.lfb[img_metas[ind]['img_key']].to())
lt_feat = torch.stack(lt_feat_list, dim=0)
# [N, lfb_channels, window_size * max_num_feat_per_step]
lt_feat = lt_feat.permute(0, 2, 1).contiguous()
return lt_feat.unsqueeze(-1).unsqueeze(-1)
def forward(self, x, rois, img_metas, **kwargs):
# [N, C, 1, 1, 1]
st_feat = self.temporal_pool(x)
st_feat = self.spatial_pool(st_feat)
identity = st_feat
# [N, C, window_size * num_feat_per_step, 1, 1]
lt_feat = self.sample_lfb(rois, img_metas).to(st_feat.device)
fbo_feat = self.fbo(st_feat, lt_feat)
out = torch.cat([identity, fbo_feat], dim=1)
return out
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(FBOHead)
| 14,120
| 34.390977
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/ssn_head.py
|
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
def parse_stage_config(stage_cfg):
"""Parse config of STPP for three stages.
Args:
stage_cfg (int | tuple[int]):
Config of structured temporal pyramid pooling.
Returns:
tuple[tuple[int], int]:
Config of structured temporal pyramid pooling and
total number of parts(number of multipliers).
"""
if isinstance(stage_cfg, int):
return (stage_cfg, ), stage_cfg
if isinstance(stage_cfg, tuple):
return stage_cfg, sum(stage_cfg)
raise ValueError(f'Incorrect STPP config {stage_cfg}')
class STPPTrain(nn.Module):
"""Structured temporal pyramid pooling for SSN at training.
Args:
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
num_segments_list (tuple): Number of segments to be sampled
in three stages. Default: (2, 5, 2).
"""
def __init__(self, stpp_stage=(1, (1, 2), 1), num_segments_list=(2, 5, 2)):
super().__init__()
starting_part, starting_multiplier = parse_stage_config(stpp_stage[0])
course_part, course_multiplier = parse_stage_config(stpp_stage[1])
ending_part, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
self.stpp_stages = (starting_part, course_part, ending_part)
self.multiplier_list = (starting_multiplier, course_multiplier,
ending_multiplier)
self.num_segments_list = num_segments_list
@staticmethod
def _extract_stage_feature(stage_feat, stage_parts, num_multipliers,
scale_factors, num_samples):
"""Extract stage feature based on structured temporal pyramid pooling.
Args:
stage_feat (torch.Tensor): Stage features to be STPP.
stage_parts (tuple): Config of STPP.
num_multipliers (int): Total number of parts in the stage.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
num_samples (int): Number of samples.
Returns:
torch.Tensor: Features of the stage.
"""
stage_stpp_feat = []
stage_len = stage_feat.size(1)
for stage_part in stage_parts:
ticks = torch.arange(0, stage_len + 1e-5,
stage_len / stage_part).int()
for i in range(stage_part):
part_feat = stage_feat[:, ticks[i]:ticks[i + 1], :].mean(
dim=1) / num_multipliers
if scale_factors is not None:
part_feat = (
part_feat * scale_factors.view(num_samples, 1))
stage_stpp_feat.append(part_feat)
return stage_stpp_feat
def forward(self, x, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor]:
Features for predicting activity scores and
completeness scores.
"""
x0 = self.num_segments_list[0]
x1 = x0 + self.num_segments_list[1]
num_segments = x1 + self.num_segments_list[2]
feat_dim = x.size(1)
x = x.view(-1, num_segments, feat_dim)
num_samples = x.size(0)
scale_factors = scale_factors.view(-1, 2)
stage_stpp_feats = []
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0],
self.multiplier_list[0],
scale_factors[:, 0], num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1],
self.multiplier_list[1], None,
num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2],
self.multiplier_list[2],
scale_factors[:, 1], num_samples))
stpp_feat = torch.cat(stage_stpp_feats, dim=1)
course_feat = x[:, x0:x1, :].mean(dim=1)
return course_feat, stpp_feat
class STPPTest(nn.Module):
"""Structured temporal pyramid pooling for SSN at testing.
Args:
num_classes (int): Number of classes to be classified.
use_regression (bool): Whether to perform regression or not.
Default: True.
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
"""
def __init__(self,
num_classes,
use_regression=True,
stpp_stage=(1, (1, 2), 1)):
super().__init__()
self.activity_score_len = num_classes + 1
self.complete_score_len = num_classes
self.reg_score_len = num_classes * 2
self.use_regression = use_regression
starting_parts, starting_multiplier = parse_stage_config(stpp_stage[0])
course_parts, course_multiplier = parse_stage_config(stpp_stage[1])
ending_parts, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
if self.use_regression:
self.feat_dim = (
self.activity_score_len + self.num_multipliers *
(self.complete_score_len + self.reg_score_len))
else:
self.feat_dim = (
self.activity_score_len +
self.num_multipliers * self.complete_score_len)
self.stpp_stage = (starting_parts, course_parts, ending_parts)
self.activity_slice = slice(0, self.activity_score_len)
self.complete_slice = slice(
self.activity_slice.stop, self.activity_slice.stop +
self.complete_score_len * self.num_multipliers)
self.reg_slice = slice(
self.complete_slice.stop, self.complete_slice.stop +
self.reg_score_len * self.num_multipliers)
@staticmethod
def _pyramids_pooling(out_scores, index, raw_scores, ticks, scale_factors,
score_len, stpp_stage):
"""Perform pyramids pooling.
Args:
out_scores (torch.Tensor): Scores to be returned.
index (int): Index of output scores.
raw_scores (torch.Tensor): Raw scores before STPP.
ticks (list): Ticks of raw scores.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
score_len (int): Length of the score.
stpp_stage (tuple): Config of STPP.
"""
offset = 0
for stage_idx, stage_cfg in enumerate(stpp_stage):
if stage_idx == 0:
scale_factor = scale_factors[0]
elif stage_idx == len(stpp_stage) - 1:
scale_factor = scale_factors[1]
else:
scale_factor = 1.0
sum_parts = sum(stage_cfg)
tick_left = ticks[stage_idx]
tick_right = float(max(ticks[stage_idx] + 1, ticks[stage_idx + 1]))
if tick_right <= 0 or tick_left >= raw_scores.size(0):
offset += sum_parts
continue
for num_parts in stage_cfg:
part_ticks = torch.arange(tick_left, tick_right + 1e-5,
(tick_right - tick_left) /
num_parts).int()
for i in range(num_parts):
part_tick_left = part_ticks[i]
part_tick_right = part_ticks[i + 1]
if part_tick_right - part_tick_left >= 1:
raw_score = raw_scores[part_tick_left:part_tick_right,
offset *
score_len:(offset + 1) *
score_len]
raw_scale_score = raw_score.mean(dim=0) * scale_factor
out_scores[index, :] += raw_scale_score.detach().cpu()
offset += 1
return out_scores
def forward(self, x, proposal_ticks, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
proposal_ticks (list): Ticks of proposals to be STPP.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out_activity_scores (torch.Tensor): Activity scores
out_complete_scores (torch.Tensor): Completeness scores.
out_reg_scores (torch.Tensor): Regression scores.
"""
assert x.size(1) == self.feat_dim
num_ticks = proposal_ticks.size(0)
out_activity_scores = torch.zeros((num_ticks, self.activity_score_len),
dtype=x.dtype)
raw_activity_scores = x[:, self.activity_slice]
out_complete_scores = torch.zeros((num_ticks, self.complete_score_len),
dtype=x.dtype)
raw_complete_scores = x[:, self.complete_slice]
if self.use_regression:
out_reg_scores = torch.zeros((num_ticks, self.reg_score_len),
dtype=x.dtype)
raw_reg_scores = x[:, self.reg_slice]
else:
out_reg_scores = None
raw_reg_scores = None
for i in range(num_ticks):
ticks = proposal_ticks[i]
out_activity_scores[i, :] = raw_activity_scores[
ticks[1]:max(ticks[1] + 1, ticks[2]), :].mean(dim=0)
out_complete_scores = self._pyramids_pooling(
out_complete_scores, i, raw_complete_scores, ticks,
scale_factors[i], self.complete_score_len, self.stpp_stage)
if self.use_regression:
out_reg_scores = self._pyramids_pooling(
out_reg_scores, i, raw_reg_scores, ticks, scale_factors[i],
self.reg_score_len, self.stpp_stage)
return out_activity_scores, out_complete_scores, out_reg_scores
@HEADS.register_module()
class SSNHead(nn.Module):
"""The classification head for SSN.
Args:
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
in_channels (int): Number of channels for input data. Default: 1024.
num_classes (int): Number of classes to be classified. Default: 20.
consensus (dict): Config of segmental consensus.
use_regression (bool): Whether to perform regression or not.
Default: True.
init_std (float): Std value for Initiation. Default: 0.001.
"""
def __init__(self,
dropout_ratio=0.8,
in_channels=1024,
num_classes=20,
consensus=dict(
type='STPPTrain',
standalong_classifier=True,
stpp_cfg=(1, 1, 1),
num_seg=(2, 5, 2)),
use_regression=True,
init_std=0.001):
super().__init__()
self.dropout_ratio = dropout_ratio
self.num_classes = num_classes
self.use_regression = use_regression
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# Based on this copy, the model will utilize different
# structured temporal pyramid pooling at training and testing.
# Warning: this copy cannot be removed.
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'STPPTrain':
self.consensus = STPPTrain(**consensus_)
elif consensus_type == 'STPPTest':
consensus_['num_classes'] = self.num_classes
self.consensus = STPPTest(**consensus_)
self.in_channels_activity = in_channels
self.in_channels_complete = (
self.consensus.num_multipliers * in_channels)
self.activity_fc = nn.Linear(in_channels, num_classes + 1)
self.completeness_fc = nn.Linear(self.in_channels_complete,
num_classes)
if self.use_regression:
self.regressor_fc = nn.Linear(self.in_channels_complete,
num_classes * 2)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.activity_fc, std=self.init_std)
normal_init(self.completeness_fc, std=self.init_std)
if self.use_regression:
normal_init(self.regressor_fc, std=self.init_std)
def prepare_test_fc(self, stpp_feat_multiplier):
"""Reorganize the shape of fully connected layer at testing, in order
to improve testing efficiency.
Args:
stpp_feat_multiplier (int): Total number of parts.
Returns:
bool: Whether the shape transformation is ready for testing.
"""
in_features = self.activity_fc.in_features
out_features = (
self.activity_fc.out_features +
self.completeness_fc.out_features * stpp_feat_multiplier)
if self.use_regression:
out_features += (
self.regressor_fc.out_features * stpp_feat_multiplier)
self.test_fc = nn.Linear(in_features, out_features)
# Fetch weight and bias of the reorganized fc.
complete_weight = self.completeness_fc.weight.data.view(
self.completeness_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0, 1).contiguous().view(-1, in_features)
complete_bias = self.completeness_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.completeness_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((self.activity_fc.weight.data, complete_weight))
bias = torch.cat((self.activity_fc.bias.data, complete_bias))
if self.use_regression:
reg_weight = self.regressor_fc.weight.data.view(
self.regressor_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0,
1).contiguous().view(-1, in_features)
reg_bias = self.regressor_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.regressor_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((weight, reg_weight))
bias = torch.cat((bias, reg_bias))
self.test_fc.weight.data = weight
self.test_fc.bias.data = bias
return True
def forward(self, x, test_mode=False):
"""Defines the computation performed at every call."""
if not test_mode:
x, proposal_scale_factor = x
activity_feat, completeness_feat = self.consensus(
x, proposal_scale_factor)
if self.dropout is not None:
activity_feat = self.dropout(activity_feat)
completeness_feat = self.dropout(completeness_feat)
activity_scores = self.activity_fc(activity_feat)
complete_scores = self.completeness_fc(completeness_feat)
if self.use_regression:
bbox_preds = self.regressor_fc(completeness_feat)
bbox_preds = bbox_preds.view(-1,
self.completeness_fc.out_features,
2)
else:
bbox_preds = None
return activity_scores, complete_scores, bbox_preds
x, proposal_tick_list, scale_factor_list = x
test_scores = self.test_fc(x)
(activity_scores, completeness_scores,
bbox_preds) = self.consensus(test_scores, proposal_tick_list,
scale_factor_list)
return (test_scores, activity_scores, completeness_scores, bbox_preds)
| 16,778
| 39.627119
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/audio_tsn_head.py
|
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class AudioTSNHead(BaseHead):
"""Classification head for TSN on audio.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, h, w]
x = self.avg_pool(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 2,421
| 31.72973
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/trn_head.py
|
import itertools
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
class RelationModule(nn.Module):
"""Relation Module of TRN.
Args:
hidden_dim (int): The dimension of hidden layer of MLP in relation
module.
num_segments (int): Number of frame segments.
num_classes (int): Number of classes to be classified.
"""
def __init__(self, hidden_dim, num_segments, num_classes):
super().__init__()
self.hidden_dim = hidden_dim
self.num_segments = num_segments
self.num_classes = num_classes
bottleneck_dim = 512
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Linear(self.num_segments * self.hidden_dim, bottleneck_dim),
nn.ReLU(), nn.Linear(bottleneck_dim, self.num_classes))
def init_weights(self):
# Use the default kaiming_uniform for all nn.linear layers.
pass
def forward(self, x):
# [N, num_segs * hidden_dim]
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class RelationModuleMultiScale(nn.Module):
"""Relation Module with Multi Scale of TRN.
Args:
hidden_dim (int): The dimension of hidden layer of MLP in relation
module.
num_segments (int): Number of frame segments.
num_classes (int): Number of classes to be classified.
"""
def __init__(self, hidden_dim, num_segments, num_classes):
super().__init__()
self.hidden_dim = hidden_dim
self.num_segments = num_segments
self.num_classes = num_classes
# generate the multiple frame relations
self.scales = range(num_segments, 1, -1)
self.relations_scales = []
self.subsample_scales = []
max_subsample = 3
for scale in self.scales:
# select the different frame features for different scales
relations_scale = list(
itertools.combinations(range(self.num_segments), scale))
self.relations_scales.append(relations_scale)
# sample `max_subsample` relation_scale at most
self.subsample_scales.append(
min(max_subsample, len(relations_scale)))
assert len(self.relations_scales[0]) == 1
bottleneck_dim = 256
self.fc_fusion_scales = nn.ModuleList()
for scale in self.scales:
fc_fusion = nn.Sequential(
nn.ReLU(), nn.Linear(scale * self.hidden_dim, bottleneck_dim),
nn.ReLU(), nn.Linear(bottleneck_dim, self.num_classes))
self.fc_fusion_scales.append(fc_fusion)
def init_weights(self):
# Use the default kaiming_uniform for all nn.linear layers.
pass
def forward(self, x):
# the first one is the largest scale
act_all = x[:, self.relations_scales[0][0], :]
act_all = act_all.view(
act_all.size(0), self.scales[0] * self.hidden_dim)
act_all = self.fc_fusion_scales[0](act_all)
for scaleID in range(1, len(self.scales)):
# iterate over the scales
idx_relations_randomsample = np.random.choice(
len(self.relations_scales[scaleID]),
self.subsample_scales[scaleID],
replace=False)
for idx in idx_relations_randomsample:
act_relation = x[:, self.relations_scales[scaleID][idx], :]
act_relation = act_relation.view(
act_relation.size(0),
self.scales[scaleID] * self.hidden_dim)
act_relation = self.fc_fusion_scales[scaleID](act_relation)
act_all += act_relation
return act_all
@HEADS.register_module()
class TRNHead(BaseHead):
"""Class head for TRN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss. Default:
dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
relation_type (str): The relation module type. Choices are 'TRN' or
'TRNMultiScale'. Default: 'TRNMultiScale'.
hidden_dim (int): The dimension of hidden layer of MLP in relation
module. Default: 256.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.001.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
relation_type='TRNMultiScale',
hidden_dim=256,
dropout_ratio=0.8,
init_std=0.001,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.num_classes = num_classes
self.in_channels = in_channels
self.num_segments = num_segments
self.spatial_type = spatial_type
self.relation_type = relation_type
self.hidden_dim = hidden_dim
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.relation_type == 'TRN':
self.consensus = RelationModule(self.hidden_dim, self.num_segments,
self.num_classes)
elif self.relation_type == 'TRNMultiScale':
self.consensus = RelationModuleMultiScale(self.hidden_dim,
self.num_segments,
self.num_classes)
else:
raise ValueError(f'Unknown Relation Type {self.relation_type}!')
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.hidden_dim)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
self.consensus.init_weights()
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TRNHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TRN models. The `self.num_segments` we need is a
hyper parameter to build TRN models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N, num_segs, hidden_dim]
cls_score = self.fc_cls(x)
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, num_classes]
cls_score = self.consensus(cls_score)
return cls_score
| 7,868
| 36.293839
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/tsm_head.py
|
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSMHead(BaseHead):
"""Class head for TSM.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
num_segments (int): Number of frame segments. Default: 8.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
is_shift (bool): Indicating whether the feature is shifted.
Default: True.
temporal_pool (bool): Indicating whether feature is temporal pooled.
Default: False.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.8,
init_std=0.001,
is_shift=True,
temporal_pool=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_segments = num_segments
self.init_std = init_std
self.is_shift = is_shift
self.temporal_pool = temporal_pool
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Useless in TSMHead. By default, `num_segs`
is equal to `clip_len * num_clips * num_crops`, which is
automatically generated in Recognizer forward phase and
useless in TSM models. The `self.num_segments` we need is a
hyper parameter to build TSM models.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
if self.dropout is not None:
x = self.dropout(x)
# [N * num_segs, num_classes]
cls_score = self.fc_cls(x)
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1)
| 4,170
| 36.241071
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/bbox_head.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.core.bbox import bbox_target
try:
from mmdet.models.builder import HEADS as MMDET_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class BBoxHeadAVA(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively.
Args:
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
in_channels (int): The number of input channels. Default: 2048.
focal_alpha (float): The hyper-parameter alpha for Focal Loss.
When alpha == 1 and gamma == 0, Focal Loss degenerates to
BCELossWithLogits. Default: 1.
focal_gamma (float): The hyper-parameter gamma for Focal Loss.
When alpha == 1 and gamma == 0, Focal Loss degenerates to
BCELossWithLogits. Default: 0.
num_classes (int): The number of classes. Default: 81.
dropout_ratio (float): A float in [0, 1], indicates the dropout_ratio.
Default: 0.
dropout_before_pool (bool): Dropout Feature before spatial temporal
pooling. Default: True.
topk (int or tuple[int]): Parameter for evaluating multilabel accuracy.
Default: (3, 5)
multilabel (bool): Whether used for a multilabel task. Default: True.
(Only support multilabel == True now).
"""
def __init__(
self,
temporal_pool_type='avg',
spatial_pool_type='max',
in_channels=2048,
# The first class is reserved, to classify bbox as pos / neg
focal_gamma=0.,
focal_alpha=1.,
num_classes=81,
dropout_ratio=0,
dropout_before_pool=True,
topk=(3, 5),
multilabel=True):
super(BBoxHeadAVA, self).__init__()
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.temporal_pool_type = temporal_pool_type
self.spatial_pool_type = spatial_pool_type
self.in_channels = in_channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.dropout_before_pool = dropout_before_pool
self.multilabel = multilabel
self.focal_gamma = focal_gamma
self.focal_alpha = focal_alpha
if topk is None:
self.topk = ()
elif isinstance(topk, int):
self.topk = (topk, )
elif isinstance(topk, tuple):
assert all([isinstance(k, int) for k in topk])
self.topk = topk
else:
raise TypeError('topk should be int or tuple[int], '
f'but get {type(topk)}')
# Class 0 is ignored when calculaing multilabel accuracy,
# so topk cannot be equal to num_classes
assert all([k < num_classes for k in self.topk])
# Handle AVA first
assert self.multilabel
in_channels = self.in_channels
# Pool by default
if self.temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if self.spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
if dropout_ratio > 0:
self.dropout = nn.Dropout(dropout_ratio)
self.fc_cls = nn.Linear(in_channels, num_classes)
self.debug_imgs = None
def init_weights(self):
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
def forward(self, x):
if self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = self.temporal_pool(x)
x = self.spatial_pool(x)
if not self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
# We do not predict bbox, so return None
return cls_score, None
@staticmethod
def get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = bbox_target(pos_proposals, neg_proposals,
pos_gt_labels, rcnn_train_cfg)
return cls_reg_targets
@staticmethod
def recall_prec(pred_vec, target_vec):
"""
Args:
pred_vec (tensor[N x C]): each element is either 0 or 1
target_vec (tensor[N x C]): each element is either 0 or 1
"""
correct = pred_vec & target_vec
# Seems torch 1.5 has no auto type conversion
recall = correct.sum(1) / target_vec.sum(1).float()
prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6)
return recall.mean(), prec.mean()
def multi_label_accuracy(self, pred, target, thr=0.5):
pred = pred.sigmoid()
pred_vec = pred > thr
# Target is 0 or 1, so using 0.5 as the borderline is OK
target_vec = target > 0.5
recall_thr, prec_thr = self.recall_prec(pred_vec, target_vec)
recalls, precs = [], []
for k in self.topk:
_, pred_label = pred.topk(k, 1, True, True)
pred_vec = pred.new_full(pred.size(), 0, dtype=torch.bool)
num_sample = pred.shape[0]
for i in range(num_sample):
pred_vec[i, pred_label[i]] = 1
recall_k, prec_k = self.recall_prec(pred_vec, target_vec)
recalls.append(recall_k)
precs.append(prec_k)
return recall_thr, prec_thr, recalls, precs
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets=None,
bbox_weights=None,
reduce=True):
losses = dict()
if cls_score is not None:
# Only use the cls_score
labels = labels[:, 1:]
pos_inds = torch.sum(labels, dim=-1) > 0
cls_score = cls_score[pos_inds, 1:]
labels = labels[pos_inds]
bce_loss = F.binary_cross_entropy_with_logits
loss = bce_loss(cls_score, labels, reduction='none')
pt = torch.exp(-loss)
F_loss = self.focal_alpha * (1 - pt)**self.focal_gamma * loss
losses['loss_action_cls'] = torch.mean(F_loss)
recall_thr, prec_thr, recall_k, prec_k = self.multi_label_accuracy(
cls_score, labels, thr=0.5)
losses['recall@thr=0.5'] = recall_thr
losses['prec@thr=0.5'] = prec_thr
for i, k in enumerate(self.topk):
losses[f'recall@top{k}'] = recall_k[i]
losses[f'prec@top{k}'] = prec_k[i]
return losses
def get_det_bboxes(self,
rois,
cls_score,
img_shape,
flip=False,
crop_quadruple=None,
cfg=None):
# might be used by testing w. augmentation
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
assert self.multilabel
scores = cls_score.sigmoid() if cls_score is not None else None
bboxes = rois[:, 1:]
assert bboxes.shape[-1] == 4
# First reverse the flip
img_h, img_w = img_shape
if flip:
bboxes_ = bboxes.clone()
bboxes_[:, 0] = img_w - 1 - bboxes[:, 2]
bboxes_[:, 2] = img_w - 1 - bboxes[:, 0]
bboxes = bboxes_
# Then normalize the bbox to [0, 1]
bboxes[:, 0::2] /= img_w
bboxes[:, 1::2] /= img_h
def _bbox_crop_undo(bboxes, crop_quadruple):
decropped = bboxes.clone()
if crop_quadruple is not None:
x1, y1, tw, th = crop_quadruple
decropped[:, 0::2] = bboxes[..., 0::2] * tw + x1
decropped[:, 1::2] = bboxes[..., 1::2] * th + y1
return decropped
bboxes = _bbox_crop_undo(bboxes, crop_quadruple)
return bboxes, scores
if mmdet_imported:
MMDET_HEADS.register_module()(BBoxHeadAVA)
| 8,768
| 34.358871
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/misc_head.py
|
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.utils import _BatchNorm
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
# Note: All these heads take 5D Tensors as input (N, C, T, H, W)
class ACRNHead(nn.Module):
"""ACRN Head: Tile + 1x1 convolution + 3x3 convolution.
This module is proposed in
`Actor-Centric Relation Network
<https://arxiv.org/abs/1807.10982>`_
Args:
in_channels (int): The input channel.
out_channels (int): The output channel.
stride (int): The spatial stride.
num_convs (int): The number of 3x3 convolutions in ACRNHead.
conv_cfg (dict): Config for norm layers. Default: dict(type='Conv').
norm_cfg (dict):
Config for norm layers. required keys are `type` and
`requires_grad`. Default: dict(type='BN2d', requires_grad=True).
act_cfg (dict): Config for activate layers.
Default: dict(type='ReLU', inplace=True).
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
num_convs=1,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.num_convs = num_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.max_pool = nn.AdaptiveMaxPool3d(1)
self.conv1 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
assert num_convs >= 1
self.conv2 = ConvModule(
out_channels,
out_channels,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
convs = []
for _ in range(num_convs - 1):
conv = ConvModule(
out_channels,
out_channels,
kernel_size=(1, 3, 3),
padding=(0, 1, 1),
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
convs.append(conv)
self.convs = nn.ModuleList(convs)
def init_weights(self, **kwargs):
"""Weight Initialization for ACRNHead."""
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
def forward(self, x, feat, rois, **kwargs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The extracted RoI feature.
feat (torch.Tensor): The context feature.
rois (torch.Tensor): The regions of interest.
Returns:
torch.Tensor: The RoI features that have interacted with context
feature.
"""
# We use max pooling by default
x = self.max_pool(x)
h, w = feat.shape[-2:]
x_tile = x.repeat(1, 1, 1, h, w)
roi_inds = rois[:, 0].type(torch.long)
roi_gfeat = feat[roi_inds]
new_feat = torch.cat([x_tile, roi_gfeat], dim=1)
new_feat = self.conv1(new_feat)
new_feat = self.conv2(new_feat)
for conv in self.convs:
new_feat = conv(new_feat)
return new_feat
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(ACRNHead)
| 4,040
| 29.613636
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/tpn_head.py
|
import torch.nn as nn
from ..builder import HEADS
from .tsn_head import TSNHead
@HEADS.register_module()
class TPNHead(TSNHead):
"""Class head for TPN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
multi_class (bool): Determines whether it is a multi-class
recognition task. Default: False.
label_smooth_eps (float): Epsilon used in label smooth.
Reference: https://arxiv.org/abs/1906.02629. Default: 0.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
self.new_cls = None
def _init_new_cls(self):
self.new_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, 1, 0)
if next(self.fc_cls.parameters()).is_cuda:
self.new_cls = self.new_cls.cuda()
self.new_cls.weight.copy_(self.fc_cls.weight[..., None, None, None])
self.new_cls.bias.copy_(self.fc_cls.bias)
def forward(self, x, num_segs=None, fcn_test=False):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int | None): Number of segments into which a video
is divided. Default: None.
fcn_test (bool): Whether to apply full convolution (fcn) testing.
Default: False.
Returns:
torch.Tensor: The classification scores for input samples.
"""
if fcn_test:
if self.avg_pool3d:
x = self.avg_pool3d(x)
if self.new_cls is None:
self._init_new_cls()
cls_score_feat_map = self.new_cls(x)
return cls_score_feat_map
if self.avg_pool2d is None:
kernel_size = (1, x.shape[-2], x.shape[-1])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if num_segs is None:
# [N, in_channels, 3, 7, 7]
x = self.avg_pool3d(x)
else:
# [N * num_segs, in_channels, 7, 7]
x = self.avg_pool2d(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 3,306
| 35.340659
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/x3d_head.py
|
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class X3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
fc1_bias (bool): If the first fc layer has bias. Default: False.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
fc1_bias=False):
super().__init__(num_classes, in_channels, loss_cls)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.in_channels = in_channels
self.mid_channels = 2048
self.num_classes = num_classes
self.fc1_bias = fc1_bias
self.fc1 = nn.Linear(
self.in_channels, self.mid_channels, bias=self.fc1_bias)
self.fc2 = nn.Linear(self.mid_channels, self.num_classes)
self.relu = nn.ReLU()
self.pool = None
if self.spatial_type == 'avg':
self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
elif self.spatial_type == 'max':
self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
else:
raise NotImplementedError
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc1, std=self.init_std)
normal_init(self.fc2, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, T, H, W]
assert self.pool is not None
x = self.pool(x)
# [N, in_channels, 1, 1, 1]
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
x = self.fc1(x)
# [N, 2048]
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
cls_score = self.fc2(x)
# [N, num_classes]
return cls_score
| 2,837
| 30.533333
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/slowfast_head.py
|
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class SlowFastHead(BaseHead):
"""The classification head for SlowFast.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.8,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.spatial_type == 'avg':
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)])
x_fast, x_slow = x
# ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1])
x_fast = self.avg_pool(x_fast)
x_slow = self.avg_pool(x_slow)
# [N, channel_fast + channel_slow, 1, 1, 1]
x = torch.cat((x_slow, x_fast), dim=1)
if self.dropout is not None:
x = self.dropout(x)
# [N x C]
x = x.view(x.size(0), -1)
# [N x num_classes]
cls_score = self.fc_cls(x)
return cls_score
| 2,542
| 30.7875
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/tsn_head.py
|
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import AvgConsensus, BaseHead
@HEADS.register_module()
class TSNHead(BaseHead):
"""Class head for TSN.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss').
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
consensus (dict): Consensus config dict.
dropout_ratio (float): Probability of dropout layer. Default: 0.4.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
self.avg_pool = None
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x, num_segs):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
num_segs (int): Number of segments into which a video
is divided.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = x.reshape((-1, num_segs) + x.shape[1:])
# [N, num_segs, in_channels, 1, 1]
x = self.consensus(x)
# [N, 1, in_channels, 1, 1]
x = x.squeeze(1)
# [N, in_channels, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1]
x = x.view(x.size(0), -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 3,148
| 33.228261
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/__init__.py
|
from .audio_tsn_head import AudioTSNHead
from .base import BaseHead
from .bbox_head import BBoxHeadAVA
from .fbo_head import FBOHead
from .i3d_head import I3DHead
from .lfb_infer_head import LFBInferHead
from .misc_head import ACRNHead
from .roi_head import AVARoIHead
from .slowfast_head import SlowFastHead
from .ssn_head import SSNHead
from .tpn_head import TPNHead
from .trn_head import TRNHead
from .tsm_head import TSMHead
from .tsn_head import TSNHead
from .x3d_head import X3DHead
__all__ = [
'TSNHead', 'I3DHead', 'BaseHead', 'TSMHead', 'SlowFastHead', 'SSNHead',
'TPNHead', 'AudioTSNHead', 'X3DHead', 'BBoxHeadAVA', 'AVARoIHead',
'FBOHead', 'LFBInferHead', 'TRNHead', 'ACRNHead'
]
| 704
| 31.045455
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/lfb_infer_head.py
|
import os.path as osp
import mmcv
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import get_dist_info
try:
from mmdet.models.builder import SHARED_HEADS as MMDET_SHARED_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
class LFBInferHead(nn.Module):
"""Long-Term Feature Bank Infer Head.
This head is used to derive and save the LFB without affecting the input.
Args:
lfb_prefix_path (str): The prefix path to store the lfb.
dataset_mode (str, optional): Which dataset to be inferred. Choices are
'train', 'val' or 'test'. Default: 'train'.
use_half_precision (bool, optional): Whether to store the
half-precision roi features. Default: True.
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
"""
def __init__(self,
lfb_prefix_path,
dataset_mode='train',
use_half_precision=True,
temporal_pool_type='avg',
spatial_pool_type='max'):
super().__init__()
rank, _ = get_dist_info()
if rank == 0:
if not osp.exists(lfb_prefix_path):
print(f'lfb prefix path {lfb_prefix_path} does not exist. '
f'Creating the folder...')
mmcv.mkdir_or_exist(lfb_prefix_path)
print('\nInferring LFB...')
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.lfb_prefix_path = lfb_prefix_path
self.dataset_mode = dataset_mode
self.use_half_precision = use_half_precision
# Pool by default
if temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
self.all_features = []
self.all_metadata = []
def init_weights(self, pretrained=None):
# LFBInferHead has no parameters to be initialized.
pass
def forward(self, x, rois, img_metas, **kwargs):
# [N, C, 1, 1, 1]
features = self.temporal_pool(x)
features = self.spatial_pool(features)
if self.use_half_precision:
features = features.half()
inds = rois[:, 0].type(torch.int64)
for ind in inds:
self.all_metadata.append(img_metas[ind]['img_key'])
self.all_features += list(features)
# Return the input directly and doesn't affect the input.
return x
def __del__(self):
assert len(self.all_features) == len(self.all_metadata), (
'features and metadata are not equal in length!')
rank, world_size = get_dist_info()
if world_size > 1:
dist.barrier()
_lfb = {}
for feature, metadata in zip(self.all_features, self.all_metadata):
video_id, timestamp = metadata.split(',')
timestamp = int(timestamp)
if video_id not in _lfb:
_lfb[video_id] = {}
if timestamp not in _lfb[video_id]:
_lfb[video_id][timestamp] = []
_lfb[video_id][timestamp].append(torch.squeeze(feature))
_lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path,
f'_lfb_{self.dataset_mode}_{rank}.pkl'))
torch.save(_lfb, _lfb_file_path)
print(f'{len(self.all_features)} features from {len(_lfb)} videos '
f'on GPU {rank} have been stored in {_lfb_file_path}.')
# Synchronizes all processes to make sure all gpus have stored their
# roi features
if world_size > 1:
dist.barrier()
if rank > 0:
return
print('Gathering all the roi features...')
lfb = {}
for rank_id in range(world_size):
_lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path,
f'_lfb_{self.dataset_mode}_{rank_id}.pkl'))
# Since each frame will only be distributed to one GPU,
# the roi features on the same timestamp of the same video are all
# on the same GPU
_lfb = torch.load(_lfb_file_path)
for video_id in _lfb:
if video_id not in lfb:
lfb[video_id] = _lfb[video_id]
else:
lfb[video_id].update(_lfb[video_id])
lfb_file_path = osp.normpath(
osp.join(self.lfb_prefix_path, f'lfb_{self.dataset_mode}.pkl'))
torch.save(lfb, lfb_file_path)
print(f'LFB has been constructed in {lfb_file_path}!')
if mmdet_imported:
MMDET_SHARED_HEADS.register_module()(LFBInferHead)
| 5,150
| 34.280822
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/i3d_head.py
|
import torch.nn as nn
from mmcv.cnn import normal_init
from ..builder import HEADS
from .base import BaseHead
@HEADS.register_module()
class I3DHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.fc_cls, std=self.init_std)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
# [N, in_channels, 4, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
| 2,446
| 32.067568
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/models/heads/roi_head.py
|
import numpy as np
from mmaction.core.bbox import bbox2result
from mmaction.utils import import_module_error_class
try:
from mmdet.core.bbox import bbox2roi
from mmdet.models import HEADS as MMDET_HEADS
from mmdet.models.roi_heads import StandardRoIHead
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
if mmdet_imported:
@MMDET_HEADS.register_module()
class AVARoIHead(StandardRoIHead):
def _bbox_forward(self, x, rois, img_metas):
"""Defines the computation performed to get bbox predictions.
Args:
x (torch.Tensor): The input tensor.
rois (torch.Tensor): The regions of interest.
img_metas (list): The meta info of images
Returns:
dict: bbox predictions with features and classification scores.
"""
bbox_feat, global_feat = self.bbox_roi_extractor(x, rois)
if self.with_shared_head:
bbox_feat = self.shared_head(
bbox_feat,
feat=global_feat,
rois=rois,
img_metas=img_metas)
cls_score, bbox_pred = self.bbox_head(bbox_feat)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feat)
return bbox_results
def _bbox_forward_train(self, x, sampling_results, gt_bboxes,
gt_labels, img_metas):
"""Run forward function and calculate loss for box head in
training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois, img_metas)
bbox_targets = self.bbox_head.get_targets(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Defines the computation performed for simple testing."""
assert self.with_bbox, 'Bbox head must be implemented.'
if isinstance(x, tuple):
x_shape = x[0].shape
else:
x_shape = x.shape
assert x_shape[0] == 1, 'only accept 1 sample at test mode'
assert x_shape[0] == len(img_metas) == len(proposal_list)
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(
det_bboxes,
det_labels,
self.bbox_head.num_classes,
thr=self.test_cfg.action_thr)
return [bbox_results]
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_results = self._bbox_forward(x, rois, img_metas)
cls_score = bbox_results['cls_score']
img_shape = img_metas[0]['img_shape']
crop_quadruple = np.array([0, 0, 1, 1])
flip = False
if 'crop_quadruple' in img_metas[0]:
crop_quadruple = img_metas[0]['crop_quadruple']
if 'flip' in img_metas[0]:
flip = img_metas[0]['flip']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
img_shape,
flip=flip,
crop_quadruple=crop_quadruple,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
else:
# Just define an empty class, so that __init__ can import it.
@import_module_error_class('mmdet')
class AVARoIHead:
pass
| 4,487
| 35.487805
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/localization/bsn_utils.py
|
import os.path as osp
import numpy as np
from .proposal_utils import temporal_iop, temporal_iou
def generate_candidate_proposals(video_list,
video_infos,
tem_results_dir,
temporal_scale,
peak_threshold,
tem_results_ext='.csv',
result_dict=None):
"""Generate Candidate Proposals with given temporal evalutation results.
Each proposal file will contain:
'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'.
Args:
video_list (list[int]): List of video indexs to generate proposals.
video_infos (list[dict]): List of video_info dict that contains
'video_name', 'duration_frame', 'duration_second',
'feature_frame', and 'annotations'.
tem_results_dir (str): Directory to load temporal evaluation
results.
temporal_scale (int): The number (scale) on temporal axis.
peak_threshold (float): The threshold for proposal generation.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
dict: A dict contains video_name as keys and proposal list as value.
If result_dict is not None, save the results to it.
"""
if tem_results_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
tscale = temporal_scale
tgap = 1. / tscale
proposal_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
start_scores = tem_results[:, 1]
end_scores = tem_results[:, 2]
max_start = max(start_scores)
max_end = max(end_scores)
start_bins = np.zeros(len(start_scores))
start_bins[[0, -1]] = 1
end_bins = np.zeros(len(end_scores))
end_bins[[0, -1]] = 1
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[
idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (peak_threshold * max_start):
start_bins[idx] = 1
if end_scores[idx] > end_scores[
idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (peak_threshold * max_end):
end_bins[idx] = 1
tmin_list = []
tmin_score_list = []
tmax_list = []
tmax_score_list = []
for idx in range(tscale):
if start_bins[idx] == 1:
tmin_list.append(tgap / 2 + tgap * idx)
tmin_score_list.append(start_scores[idx])
if end_bins[idx] == 1:
tmax_list.append(tgap / 2 + tgap * idx)
tmax_score_list.append(end_scores[idx])
new_props = []
for tmax, tmax_score in zip(tmax_list, tmax_score_list):
for tmin, tmin_score in zip(tmin_list, tmin_score_list):
if tmin >= tmax:
break
new_props.append([tmin, tmax, tmin_score, tmax_score])
new_props = np.stack(new_props)
score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1)
new_props = np.concatenate((new_props, score), axis=1)
new_props = new_props[new_props[:, -1].argsort()[::-1]]
video_info = video_infos[video_index]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
gt_tmins = []
gt_tmaxs = []
for annotations in video_info['annotations']:
gt_tmins.append(annotations['segment'][0] / corrected_second)
gt_tmaxs.append(annotations['segment'][1] / corrected_second)
new_iou_list = []
new_ioa_list = []
for new_prop in new_props:
new_iou = max(
temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_ioa = max(
temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs))
new_iou_list.append(new_iou)
new_ioa_list.append(new_ioa)
new_iou_list = np.array(new_iou_list).reshape(-1, 1)
new_ioa_list = np.array(new_ioa_list).reshape(-1, 1)
new_props = np.concatenate((new_props, new_iou_list), axis=1)
new_props = np.concatenate((new_props, new_ioa_list), axis=1)
proposal_dict[video_name] = new_props
if result_dict is not None:
result_dict[video_name] = new_props
return proposal_dict
def generate_bsp_feature(video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
top_k=1000,
bsp_boundary_ratio=0.2,
num_sample_start=8,
num_sample_end=8,
num_sample_action=16,
num_sample_interp=3,
tem_results_ext='.csv',
pgm_proposal_ext='.csv',
result_dict=None):
"""Generate Boundary-Sensitive Proposal Feature with given proposals.
Args:
video_list (list[int]): List of video indexs to generate bsp_feature.
video_infos (list[dict]): List of video_info dict that contains
'video_name'.
tem_results_dir (str): Directory to load temporal evaluation
results.
pgm_proposals_dir (str): Directory to load proposals.
top_k (int): Number of proposals to be considered. Default: 1000
bsp_boundary_ratio (float): Ratio for proposal boundary
(start/end). Default: 0.2.
num_sample_start (int): Num of samples for actionness in
start region. Default: 8.
num_sample_end (int): Num of samples for actionness in end region.
Default: 8.
num_sample_action (int): Num of samples for actionness in center
region. Default: 16.
num_sample_interp (int): Num of samples for interpolation for
each sample point. Default: 3.
tem_results_ext (str): File extension for temporal evaluation
model output. Default: '.csv'.
pgm_proposal_ext (str): File extension for proposals. Default: '.csv'.
result_dict (dict | None): The dict to save the results. Default: None.
Returns:
bsp_feature_dict (dict): A dict contains video_name as keys and
bsp_feature as value. If result_dict is not None, save the
results to it.
"""
if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv':
raise NotImplementedError('Only support csv format now.')
bsp_feature_dict = {}
for video_index in video_list:
video_name = video_infos[video_index]['video_name']
# Load temporal evaluation results
tem_path = osp.join(tem_results_dir, video_name + tem_results_ext)
tem_results = np.loadtxt(
tem_path, dtype=np.float32, delimiter=',', skiprows=1)
score_action = tem_results[:, 0]
seg_tmins = tem_results[:, 3]
seg_tmaxs = tem_results[:, 4]
video_scale = len(tem_results)
video_gap = seg_tmaxs[0] - seg_tmins[0]
video_extend = int(video_scale / 4 + 10)
# Load proposals results
proposal_path = osp.join(pgm_proposals_dir,
video_name + pgm_proposal_ext)
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = pgm_proposals[:top_k]
# Generate temporal sample points
boundary_zeros = np.zeros([video_extend])
score_action = np.concatenate(
(boundary_zeros, score_action, boundary_zeros))
begin_tp = []
middle_tp = []
end_tp = []
for i in range(video_extend):
begin_tp.append(-video_gap / 2 -
(video_extend - 1 - i) * video_gap)
end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap)
for i in range(video_scale):
middle_tp.append(video_gap / 2 + i * video_gap)
t_points = begin_tp + middle_tp + end_tp
bsp_feature = []
for pgm_proposal in pgm_proposals:
tmin = pgm_proposal[0]
tmax = pgm_proposal[1]
tlen = tmax - tmin
# Temporal range for start
tmin_0 = tmin - tlen * bsp_boundary_ratio
tmin_1 = tmin + tlen * bsp_boundary_ratio
# Temporal range for end
tmax_0 = tmax - tlen * bsp_boundary_ratio
tmax_1 = tmax + tlen * bsp_boundary_ratio
# Generate features at start boundary
tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1)
tlen_start_sample = tlen_start / num_sample_interp
t_new = [
tmin_0 - tlen_start / 2 + tlen_start_sample * i
for i in range(num_sample_start * num_sample_interp + 1)
]
y_new_start_action = np.interp(t_new, t_points, score_action)
y_new_start = [
np.mean(y_new_start_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_start)
]
# Generate features at end boundary
tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1)
tlen_end_sample = tlen_end / num_sample_interp
t_new = [
tmax_0 - tlen_end / 2 + tlen_end_sample * i
for i in range(num_sample_end * num_sample_interp + 1)
]
y_new_end_action = np.interp(t_new, t_points, score_action)
y_new_end = [
np.mean(y_new_end_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_end)
]
# Generate features for action
tlen_action = (tmax - tmin) / (num_sample_action - 1)
tlen_action_sample = tlen_action / num_sample_interp
t_new = [
tmin - tlen_action / 2 + tlen_action_sample * i
for i in range(num_sample_action * num_sample_interp + 1)
]
y_new_action = np.interp(t_new, t_points, score_action)
y_new_action = [
np.mean(y_new_action[i * num_sample_interp:(i + 1) *
num_sample_interp + 1])
for i in range(num_sample_action)
]
feature = np.concatenate([y_new_action, y_new_start, y_new_end])
bsp_feature.append(feature)
bsp_feature = np.array(bsp_feature)
bsp_feature_dict[video_name] = bsp_feature
if result_dict is not None:
result_dict[video_name] = bsp_feature
return bsp_feature_dict
| 11,496
| 41.899254
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/localization/proposal_utils.py
|
import numpy as np
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
"""
len_anchors = proposal_max - proposal_min
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
union_len = len_anchors - inter_len + gt_max - gt_min
jaccard = np.divide(inter_len, union_len)
return jaccard
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
"""
len_anchors = np.array(proposal_max - proposal_min)
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k):
"""Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
"""
proposals = proposals[proposals[:, -1].argsort()[::-1]]
tstart = list(proposals[:, 0])
tend = list(proposals[:, 1])
tscore = list(proposals[:, -1])
rstart = []
rend = []
rscore = []
while len(tscore) > 0 and len(rscore) <= top_k:
max_index = np.argmax(tscore)
max_width = tend[max_index] - tstart[max_index]
iou_list = temporal_iou(tstart[max_index], tend[max_index],
np.array(tstart), np.array(tend))
iou_exp_list = np.exp(-np.square(iou_list) / alpha)
for idx, _ in enumerate(tscore):
if idx != max_index:
current_iou = iou_list[idx]
if current_iou > low_threshold + (high_threshold -
low_threshold) * max_width:
tscore[idx] = tscore[idx] * iou_exp_list[idx]
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
rstart = np.array(rstart).reshape(-1, 1)
rend = np.array(rend).reshape(-1, 1)
rscore = np.array(rscore).reshape(-1, 1)
new_proposals = np.concatenate((rstart, rend, rscore), axis=1)
return new_proposals
| 3,450
| 35.326316
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/localization/__init__.py
|
from .bsn_utils import generate_bsp_feature, generate_candidate_proposals
from .proposal_utils import soft_nms, temporal_iop, temporal_iou
from .ssn_utils import (eval_ap, load_localize_proposal_file,
perform_regression, temporal_nms)
__all__ = [
'generate_candidate_proposals', 'generate_bsp_feature', 'temporal_iop',
'temporal_iou', 'soft_nms', 'load_localize_proposal_file',
'perform_regression', 'temporal_nms', 'eval_ap'
]
| 465
| 41.363636
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/localization/ssn_utils.py
|
from itertools import groupby
import numpy as np
from ..core import average_precision_at_temporal_iou
from . import temporal_iou
def load_localize_proposal_file(filename):
"""Load the proposal file and split it into many parts which contain one
video's information separately.
Args:
filename(str): Path to the proposal file.
Returns:
list: List of all videos' information.
"""
lines = list(open(filename))
# Split the proposal file into many parts which contain one video's
# information separately.
groups = groupby(lines, lambda x: x.startswith('#'))
video_infos = [[x.strip() for x in list(g)] for k, g in groups if not k]
def parse_group(video_info):
"""Parse the video's information.
Template information of a video in a standard file:
# index
video_id
num_frames
fps
num_gts
label, start_frame, end_frame
label, start_frame, end_frame
...
num_proposals
label, best_iou, overlap_self, start_frame, end_frame
label, best_iou, overlap_self, start_frame, end_frame
...
Example of a standard annotation file:
.. code-block:: txt
# 0
video_validation_0000202
5666
1
3
8 130 185
8 832 1136
8 1303 1381
5
8 0.0620 0.0620 790 5671
8 0.1656 0.1656 790 2619
8 0.0833 0.0833 3945 5671
8 0.0960 0.0960 4173 5671
8 0.0614 0.0614 3327 5671
Args:
video_info (list): Information of the video.
Returns:
tuple[str, int, list, list]:
video_id (str): Name of the video.
num_frames (int): Number of frames in the video.
gt_boxes (list): List of the information of gt boxes.
proposal_boxes (list): List of the information of
proposal boxes.
"""
offset = 0
video_id = video_info[offset]
offset += 1
num_frames = int(float(video_info[1]) * float(video_info[2]))
num_gts = int(video_info[3])
offset = 4
gt_boxes = [x.split() for x in video_info[offset:offset + num_gts]]
offset += num_gts
num_proposals = int(video_info[offset])
offset += 1
proposal_boxes = [
x.split() for x in video_info[offset:offset + num_proposals]
]
return video_id, num_frames, gt_boxes, proposal_boxes
return [parse_group(video_info) for video_info in video_infos]
def perform_regression(detections):
"""Perform regression on detection results.
Args:
detections (list): Detection results before regression.
Returns:
list: Detection results after regression.
"""
starts = detections[:, 0]
ends = detections[:, 1]
centers = (starts + ends) / 2
durations = ends - starts
new_centers = centers + durations * detections[:, 3]
new_durations = durations * np.exp(detections[:, 4])
new_detections = np.concatenate(
(np.clip(new_centers - new_durations / 2, 0,
1)[:, None], np.clip(new_centers + new_durations / 2, 0,
1)[:, None], detections[:, 2:]),
axis=1)
return new_detections
def temporal_nms(detections, threshold):
"""Parse the video's information.
Args:
detections (list): Detection results before NMS.
threshold (float): Threshold of NMS.
Returns:
list: Detection results after NMS.
"""
starts = detections[:, 0]
ends = detections[:, 1]
scores = detections[:, 2]
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ious = temporal_iou(starts[order[1:]], ends[order[1:]], starts[i],
ends[i])
idxs = np.where(ious <= threshold)[0]
order = order[idxs + 1]
return detections[keep, :]
def eval_ap(detections, gt_by_cls, iou_range):
"""Evaluate average precisions.
Args:
detections (dict): Results of detections.
gt_by_cls (dict): Information of groudtruth.
iou_range (list): Ranges of iou.
Returns:
list: Average precision values of classes at ious.
"""
ap_values = np.zeros((len(detections), len(iou_range)))
for iou_idx, min_overlap in enumerate(iou_range):
for class_idx, _ in enumerate(detections):
ap = average_precision_at_temporal_iou(gt_by_cls[class_idx],
detections[class_idx],
[min_overlap])
ap_values[class_idx, iou_idx] = ap
return ap_values
| 4,902
| 28.011834
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/base.py
|
import copy
import os.path as osp
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import mmcv
import numpy as np
import torch
from mmcv.utils import print_log
from torch.utils.data import Dataset
from ..core import (mean_average_precision, mean_class_accuracy,
mmit_mean_average_precision, top_k_accuracy)
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets to process video should subclass it.
All subclasses should overwrite:
- Methods:`load_annotations`, supporting to load information from an
annotation file.
- Methods:`prepare_train_frames`, providing train data.
- Methods:`prepare_test_frames`, providing test data.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
multi_class (bool): Determines whether the dataset is a multi-class
dataset. Default: False.
num_classes (int | None): Number of classes of the dataset, used in
multi-class datasets. Default: None.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow', 'Audio'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0,
dynamic_length=False):
super().__init__()
self.ann_file = ann_file
self.data_prefix = osp.realpath(
data_prefix) if data_prefix is not None and osp.isdir(
data_prefix) else data_prefix
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.sample_by_class = sample_by_class
self.power = power
self.dynamic_length = dynamic_length
assert not (self.multi_class and self.sample_by_class)
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
if self.sample_by_class:
self.video_infos_by_class = self.parse_by_class()
class_prob = []
for _, samples in self.video_infos_by_class.items():
class_prob.append(len(samples) / len(self.video_infos))
class_prob = [x**self.power for x in class_prob]
summ = sum(class_prob)
class_prob = [x / summ for x in class_prob]
self.class_prob = dict(zip(self.video_infos_by_class, class_prob))
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename'
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert self.num_classes is not None
else:
assert len(video_infos[i]['label']) == 1
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
def parse_by_class(self):
video_infos_by_class = defaultdict(list)
for item in self.video_infos:
label = item['label']
video_infos_by_class[label].append(item)
return video_infos_by_class
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='top_k_accuracy',
metric_options=dict(top_k_accuracy=dict(topk=(1, 5))),
logger=None,
**deprecated_kwargs):
"""Perform evaluation for common datasets.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'top_k_accuracy'.
metric_options (dict): Dict for metric options. Options are
``topk`` for ``top_k_accuracy``.
Default: ``dict(top_k_accuracy=dict(topk=(1, 5)))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['top_k_accuracy'] = dict(
metric_options['top_k_accuracy'], **deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = [
'top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision',
'mmit_mean_average_precision'
]
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = OrderedDict()
gt_labels = [ann['label'] for ann in self.video_infos]
for metric in metrics:
msg = f'Evaluating {metric} ...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'top_k_accuracy':
topk = metric_options.setdefault('top_k_accuracy',
{}).setdefault(
'topk', (1, 5))
if not isinstance(topk, (int, tuple)):
raise TypeError('topk must be int or tuple of int, '
f'but got {type(topk)}')
if isinstance(topk, int):
topk = (topk, )
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for k, acc in zip(topk, top_k_acc):
eval_results[f'top{k}_acc'] = acc
log_msg.append(f'\ntop{k}_acc\t{acc:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric == 'mean_class_accuracy':
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results['mean_class_accuracy'] = mean_acc
log_msg = f'\nmean_acc\t{mean_acc:.4f}'
print_log(log_msg, logger=logger)
continue
if metric in [
'mean_average_precision', 'mmit_mean_average_precision'
]:
gt_labels = [
self.label2array(self.num_classes, label)
for label in gt_labels
]
if metric == 'mean_average_precision':
mAP = mean_average_precision(results, gt_labels)
eval_results['mean_average_precision'] = mAP
log_msg = f'\nmean_average_precision\t{mAP:.4f}'
elif metric == 'mmit_mean_average_precision':
mAP = mmit_mean_average_precision(results, gt_labels)
eval_results['mmit_mean_average_precision'] = mAP
log_msg = f'\nmmit_mean_average_precision\t{mAP:.4f}'
print_log(log_msg, logger=logger)
continue
return eval_results
@staticmethod
def dump_results(results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
return self.prepare_train_frames(idx)
| 11,612
| 39.322917
| 105
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/rawvideo_dataset.py
|
import copy
import os.path as osp
import random
import mmcv
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawVideoDataset(BaseDataset):
"""RawVideo dataset for action recognition, used in the Project OmniSource.
The dataset loads clips of raw videos and apply specified transforms to
return a dict containing the frame tensors and other information. Not that
for this dataset, `multi_class` should be False.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath (without suffix), label, number of clips
and index of positive clips (starting from 0), which are split with a
whitespace. Raw videos should be first trimmed into 10 second clips,
organized in the following format:
.. code-block:: txt
some/path/D32_1gwq35E/part_0.mp4
some/path/D32_1gwq35E/part_1.mp4
......
some/path/D32_1gwq35E/part_n.mp4
Example of a annotation file:
.. code-block:: txt
some/path/D32_1gwq35E 66 10 0 1 2
some/path/-G-5CJ0JkKY 254 5 3 4
some/path/T4h1bvOd9DA 33 1 0
some/path/4uZ27ivBl00 341 2 0 1
some/path/0LfESFkfBSw 186 234 7 9 11
some/path/-YIsNpBEx6c 169 100 9 10 11
The first line indicates that the raw video `some/path/D32_1gwq35E` has
action label `66`, consists of 10 clips (from `part_0.mp4` to
`part_9.mp4`). The 1st, 2nd and 3rd clips are positive clips.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
sampling_strategy (str): The strategy to sample clips from raw videos.
Choices are 'random' or 'positive'. Default: 'positive'.
clipname_tmpl (str): The template of clip name in the raw video.
Default: 'part_{}.mp4'.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='positive',
**kwargs):
super().__init__(ann_file, pipeline, start_index=0, **kwargs)
assert self.multi_class is False
self.sampling_strategy = sampling_strategy
self.clipname_tmpl = clipname_tmpl
# If positive, we should only keep those raw videos with positive
# clips
if self.sampling_strategy == 'positive':
self.video_infos = [
x for x in self.video_infos if len(x['positive_clip_inds'])
]
# do not support multi_class
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_dir = line_split[0]
label = int(line_split[1])
num_clips = int(line_split[2])
positive_clip_inds = [int(ind) for ind in line_split[3:]]
if self.data_prefix is not None:
video_dir = osp.join(self.data_prefix, video_dir)
video_infos.append(
dict(
video_dir=video_dir,
label=label,
num_clips=num_clips,
positive_clip_inds=positive_clip_inds))
return video_infos
# do not support multi_class
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'video_dir'
for i in range(num_videos):
if self.data_prefix is not None:
path_value = video_infos[i][path_key]
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
return video_infos
def sample_clip(self, results):
"""Sample a clip from the raw video given the sampling strategy."""
assert self.sampling_strategy in ['positive', 'random']
if self.sampling_strategy == 'positive':
assert results['positive_clip_inds']
ind = random.choice(results['positive_clip_inds'])
else:
ind = random.randint(0, results['num_clips'] - 1)
clipname = self.clipname_tmpl.format(ind)
# if the first char of self.clipname_tmpl is a letter, use osp.join;
# otherwise, directly concat them
if self.clipname_tmpl[0].isalpha():
filename = osp.join(results['video_dir'], clipname)
else:
filename = results['video_dir'] + clipname
results['filename'] = filename
return results
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results = self.sample_clip(results)
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results = self.sample_clip(results)
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
| 5,635
| 37.340136
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/audio_visual_dataset.py
|
import os.path as osp
from .builder import DATASETS
from .rawframe_dataset import RawframeDataset
@DATASETS.register_module()
class AudioVisualDataset(RawframeDataset):
"""Dataset that reads both audio and visual data, supporting both rawframes
and videos. The annotation file is same as that of the rawframe dataset,
such as:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
audio_prefix (str): Directory of the audio files.
kwargs (dict): Other keyword args for `RawframeDataset`. `video_prefix`
is also allowed if pipeline is designed for videos.
"""
def __init__(self, ann_file, pipeline, audio_prefix, **kwargs):
self.audio_prefix = audio_prefix
self.video_prefix = kwargs.pop('video_prefix', None)
self.data_prefix = kwargs.get('data_prefix', None)
super().__init__(ann_file, pipeline, **kwargs)
def load_annotations(self):
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.audio_prefix is not None:
audio_path = osp.join(self.audio_prefix,
frame_dir + '.npy')
video_info['audio_path'] = audio_path
if self.video_prefix:
video_path = osp.join(self.video_prefix,
frame_dir + '.mp4')
video_info['filename'] = video_path
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label) != 0, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 3,073
| 38.922078
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.