repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pose_dataset.py
|
import os.path as osp
import mmcv
import numpy as np
from ..utils import get_root_logger
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class PoseDataset(BaseDataset):
"""Pose dataset for action recognition.
The dataset loads pose and apply specified transforms to return a
dict containing pose information.
The ann_file is a pickle file, the json file contains a list of
annotations, the fields of an annotation include frame_dir(video_id),
total_frames, label, kp, kpscore.
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
valid_ratio (float | None): The valid_ratio for videos in KineticsPose.
For a video with n frames, it is a valid training sample only if
n * valid_ratio frames have human pose. None means not applicable
(only applicable to Kinetics Pose). Default: None.
box_thr (str | None): The threshold for human proposals. Only boxes
with confidence score larger than `box_thr` is kept. None means
not applicable (only applicable to Kinetics Pose [ours]). Allowed
choices are '0.5', '0.6', '0.7', '0.8', '0.9'. Default: None.
class_prob (dict | None): The per class sampling probability. If not
None, it will override the class_prob calculated in
BaseDataset.__init__(). Default: None.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
valid_ratio=None,
box_thr=None,
class_prob=None,
**kwargs):
modality = 'Pose'
super().__init__(
ann_file, pipeline, start_index=0, modality=modality, **kwargs)
# box_thr, which should be a string
self.box_thr = box_thr
if self.box_thr is not None:
assert box_thr in ['0.5', '0.6', '0.7', '0.8', '0.9']
# Thresholding Training Examples
self.valid_ratio = valid_ratio
if self.valid_ratio is not None:
assert isinstance(self.valid_ratio, float)
if self.box_thr is None:
self.video_infos = self.video_infos = [
x for x in self.video_infos
if x['valid_frames'] / x['total_frames'] >= valid_ratio
]
else:
key = f'valid@{self.box_thr}'
self.video_infos = [
x for x in self.video_infos
if x[key] / x['total_frames'] >= valid_ratio
]
if self.box_thr != '0.5':
box_thr = float(self.box_thr)
for item in self.video_infos:
inds = [
i for i, score in enumerate(item['box_score'])
if score >= box_thr
]
item['anno_inds'] = np.array(inds)
if class_prob is not None:
self.class_prob = class_prob
logger = get_root_logger()
logger.info(f'{len(self)} videos remain after valid thresholding')
def load_annotations(self):
"""Load annotation file to get video information."""
assert self.ann_file.endswith('.pkl')
return self.load_pkl_annotations()
def load_pkl_annotations(self):
data = mmcv.load(self.ann_file)
for item in data:
# Sometimes we may need to load anno from the file
if 'filename' in item:
item['filename'] = osp.join(self.data_prefix, item['filename'])
return data
| 3,757
| 36.959596
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/audio_dataset.py
|
import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class AudioDataset(BaseDataset):
"""Audio dataset for video recognition. Extracts the audio feature on-the-
fly. Annotation file can be that of the rawframe dataset, or:
.. code-block:: txt
some/directory-1.wav 163 1
some/directory-2.wav 122 1
some/directory-3.wav 258 2
some/directory-4.wav 234 2
some/directory-5.wav 295 3
some/directory-6.wav 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio file. Default: '.wav'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.wav', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename + self.suffix)
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 2,555
| 35.514286
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/image_dataset.py
|
from .builder import DATASETS
from .video_dataset import VideoDataset
@DATASETS.register_module()
class ImageDataset(VideoDataset):
"""Image dataset for action recognition, used in the Project OmniSource.
The dataset loads image list and apply specified transforms to return a
dict containing the image tensors and other information. For the
ImageDataset
The ann_file is a text file with multiple lines, and each line indicates
the image path and the image label, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
path/to/image1.jpg 1
path/to/image2.jpg 1
path/to/image3.jpg 2
path/to/image4.jpg 2
path/to/image5.jpg 3
path/to/image6.jpg 3
Example of a multi-class annotation file:
.. code-block:: txt
path/to/image1.jpg 1 3 5
path/to/image2.jpg 1 2
path/to/image3.jpg 2
path/to/image4.jpg 2 4 6 8
path/to/image5.jpg 3
path/to/image6.jpg 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self, ann_file, pipeline, **kwargs):
super().__init__(ann_file, pipeline, start_index=None, **kwargs)
# use `start_index=None` to indicate it is for `ImageDataset`
| 1,420
| 29.891304
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/audio_feature_dataset.py
|
import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class AudioFeatureDataset(BaseDataset):
"""Audio feature dataset for video recognition. Reads the features
extracted off-line. Annotation file can be that of the rawframe dataset,
or:
.. code-block:: txt
some/directory-1.npy 163 1
some/directory-2.npy 122 1
some/directory-3.npy 258 2
some/directory-4.npy 234 2
some/directory-5.npy 295 3
some/directory-6.npy 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
suffix (str): The suffix of the audio feature file. Default: '.npy'.
kwargs (dict): Other keyword args for `BaseDataset`.
"""
def __init__(self, ann_file, pipeline, suffix='.npy', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix is not None:
if not filename.endswith(self.suffix):
filename = osp.join(self.data_prefix,
filename) + self.suffix
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 2,581
| 35.366197
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/ava_dataset.py
|
import copy
import os
import os.path as osp
from collections import defaultdict
from datetime import datetime
import mmcv
import numpy as np
from mmcv.utils import print_log
from ..core.evaluation.ava_utils import ava_eval, read_labelmap, results2csv
from ..utils import get_root_logger
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class AVADataset(BaseDataset):
"""AVA dataset for spatial temporal detection.
Based on official AVA annotation files, the dataset loads raw frames,
bounding boxes, proposals and applies specified transformations to return
a dict containing the frame tensors and other information.
This datasets can load information from the following files:
.. code-block:: txt
ann_file -> ava_{train, val}_{v2.1, v2.2}.csv
exclude_file -> ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv
label_file -> ava_action_list_{v2.1, v2.2}.pbtxt /
ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt
proposal_file -> ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl
Particularly, the proposal_file is a pickle file which contains
``img_key`` (in format of ``{video_id},{timestamp}``). Example of a pickle
file:
.. code-block:: JSON
{
...
'0f39OWEqJ24,0902':
array([[0.011 , 0.157 , 0.655 , 0.983 , 0.998163]]),
'0f39OWEqJ24,0912':
array([[0.054 , 0.088 , 0.91 , 0.998 , 0.068273],
[0.016 , 0.161 , 0.519 , 0.974 , 0.984025],
[0.493 , 0.283 , 0.981 , 0.984 , 0.983621]]),
...
}
Args:
ann_file (str): Path to the annotation file like
``ava_{train, val}_{v2.1, v2.2}.csv``.
exclude_file (str): Path to the excluded timestamp file like
``ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv``.
pipeline (list[dict | callable]): A sequence of data transforms.
label_file (str): Path to the label file like
``ava_action_list_{v2.1, v2.2}.pbtxt`` or
``ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt``.
Default: None.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
proposal_file (str): Path to the proposal file like
``ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl``.
Default: None.
person_det_score_thr (float): The threshold of person detection scores,
bboxes with scores above the threshold will be used. Default: 0.9.
Note that 0 <= person_det_score_thr <= 1. If no proposal has
detection score larger than the threshold, the one with the largest
detection score will be used.
num_classes (int): The number of classes of the dataset. Default: 81.
(AVA has 80 action classes, another 1-dim is added for potential
usage)
custom_classes (list[int]): A subset of class ids from origin dataset.
Please note that 0 should NOT be selected, and ``num_classes``
should be equal to ``len(custom_classes) + 1``
data_prefix (str): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
num_max_proposals (int): Max proposals number to store. Default: 1000.
timestamp_start (int): The start point of included timestamps. The
default value is referred from the official website. Default: 902.
timestamp_end (int): The end point of included timestamps. The
default value is referred from the official website. Default: 1798.
"""
_FPS = 30
def __init__(self,
ann_file,
exclude_file,
pipeline,
label_file=None,
filename_tmpl='img_{:05}.jpg',
proposal_file=None,
person_det_score_thr=0.9,
num_classes=81,
custom_classes=None,
data_prefix=None,
test_mode=False,
modality='RGB',
num_max_proposals=1000,
timestamp_start=900,
timestamp_end=1800):
# since it inherits from `BaseDataset`, some arguments
# should be assigned before performing `load_annotations()`
self.custom_classes = custom_classes
if custom_classes is not None:
assert num_classes == len(custom_classes) + 1
assert 0 not in custom_classes
_, class_whitelist = read_labelmap(open(label_file))
assert set(custom_classes).issubset(class_whitelist)
self.custom_classes = tuple([0] + custom_classes)
self.exclude_file = exclude_file
self.label_file = label_file
self.proposal_file = proposal_file
assert 0 <= person_det_score_thr <= 1, (
'The value of '
'person_det_score_thr should in [0, 1]. ')
self.person_det_score_thr = person_det_score_thr
self.num_classes = num_classes
self.filename_tmpl = filename_tmpl
self.num_max_proposals = num_max_proposals
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
self.logger = get_root_logger()
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
modality=modality,
num_classes=num_classes)
if self.proposal_file is not None:
self.proposals = mmcv.load(self.proposal_file)
else:
self.proposals = None
if not test_mode:
valid_indexes = self.filter_exclude_file()
self.logger.info(
f'{len(valid_indexes)} out of {len(self.video_infos)} '
f'frames are valid.')
self.video_infos = [self.video_infos[i] for i in valid_indexes]
def parse_img_record(self, img_records):
"""Merge image records of the same entity at the same time.
Args:
img_records (list[dict]): List of img_records (lines in AVA
annotations).
Returns:
tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids
"""
bboxes, labels, entity_ids = [], [], []
while len(img_records) > 0:
img_record = img_records[0]
num_img_records = len(img_records)
selected_records = list(
filter(
lambda x: np.array_equal(x['entity_box'], img_record[
'entity_box']), img_records))
num_selected_records = len(selected_records)
img_records = list(
filter(
lambda x: not np.array_equal(x['entity_box'], img_record[
'entity_box']), img_records))
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
selected_record['label']
for selected_record in selected_records
])
# The format can be directly used by BCELossWithLogits
label = np.zeros(self.num_classes, dtype=np.float32)
label[valid_labels] = 1.
labels.append(label)
entity_ids.append(img_record['entity_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
entity_ids = np.stack(entity_ids)
return bboxes, labels, entity_ids
def filter_exclude_file(self):
"""Filter out records in the exclude_file."""
valid_indexes = []
if self.exclude_file is None:
valid_indexes = list(range(len(self.video_infos)))
else:
exclude_video_infos = [
x.strip().split(',') for x in open(self.exclude_file)
]
for i, video_info in enumerate(self.video_infos):
valid_indexes.append(i)
for video_id, timestamp in exclude_video_infos:
if (video_info['video_id'] == video_id
and video_info['timestamp'] == int(timestamp)):
valid_indexes.pop()
break
return valid_indexes
def load_annotations(self):
"""Load AVA annotations."""
video_infos = []
records_dict_by_img = defaultdict(list)
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split(',')
label = int(line_split[6])
if self.custom_classes is not None:
if label not in self.custom_classes:
continue
label = self.custom_classes.index(label)
video_id = line_split[0]
timestamp = int(line_split[1])
img_key = f'{video_id},{timestamp:04d}'
entity_box = np.array(list(map(float, line_split[2:6])))
entity_id = int(line_split[7])
shot_info = (0, (self.timestamp_end - self.timestamp_start) *
self._FPS)
video_info = dict(
video_id=video_id,
timestamp=timestamp,
entity_box=entity_box,
label=label,
entity_id=entity_id,
shot_info=shot_info)
records_dict_by_img[img_key].append(video_info)
for img_key in records_dict_by_img:
video_id, timestamp = img_key.split(',')
bboxes, labels, entity_ids = self.parse_img_record(
records_dict_by_img[img_key])
ann = dict(
gt_bboxes=bboxes, gt_labels=labels, entity_ids=entity_ids)
frame_dir = video_id
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info = dict(
frame_dir=frame_dir,
video_id=video_id,
timestamp=int(timestamp),
img_key=img_key,
shot_info=shot_info,
fps=self._FPS,
ann=ann)
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
img_key = results['img_key']
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
results['timestamp_start'] = self.timestamp_start
results['timestamp_end'] = self.timestamp_end
if self.proposals is not None:
if img_key not in self.proposals:
results['proposals'] = np.array([[0, 0, 1, 1]])
results['scores'] = np.array([1])
else:
proposals = self.proposals[img_key]
assert proposals.shape[-1] in [4, 5]
if proposals.shape[-1] == 5:
thr = min(self.person_det_score_thr, max(proposals[:, 4]))
positive_inds = (proposals[:, 4] >= thr)
proposals = proposals[positive_inds]
proposals = proposals[:self.num_max_proposals]
results['proposals'] = proposals[:, :4]
results['scores'] = proposals[:, 4]
else:
proposals = proposals[:self.num_max_proposals]
results['proposals'] = proposals
ann = results.pop('ann')
results['gt_bboxes'] = ann['gt_bboxes']
results['gt_labels'] = ann['gt_labels']
results['entity_ids'] = ann['entity_ids']
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
img_key = results['img_key']
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
results['timestamp_start'] = self.timestamp_start
results['timestamp_end'] = self.timestamp_end
if self.proposals is not None:
if img_key not in self.proposals:
results['proposals'] = np.array([[0, 0, 1, 1]])
results['scores'] = np.array([1])
else:
proposals = self.proposals[img_key]
assert proposals.shape[-1] in [4, 5]
if proposals.shape[-1] == 5:
thr = min(self.person_det_score_thr, max(proposals[:, 4]))
positive_inds = (proposals[:, 4] >= thr)
proposals = proposals[positive_inds]
proposals = proposals[:self.num_max_proposals]
results['proposals'] = proposals[:, :4]
results['scores'] = proposals[:, 4]
else:
proposals = proposals[:self.num_max_proposals]
results['proposals'] = proposals
ann = results.pop('ann')
# Follow the mmdet variable naming style.
results['gt_bboxes'] = ann['gt_bboxes']
results['gt_labels'] = ann['gt_labels']
results['entity_ids'] = ann['entity_ids']
return self.pipeline(results)
def dump_results(self, results, out):
"""Dump predictions into a csv file."""
assert out.endswith('csv')
results2csv(self, results, out, self.custom_classes)
def evaluate(self,
results,
metrics=('mAP', ),
metric_options=None,
logger=None):
"""Evaluate the prediction results and report mAP."""
assert len(metrics) == 1 and metrics[0] == 'mAP', (
'For evaluation on AVADataset, you need to use metrics "mAP" '
'See https://github.com/open-mmlab/mmaction2/pull/567 '
'for more info.')
time_now = datetime.now().strftime('%Y%m%d_%H%M%S')
temp_file = f'AVA_{time_now}_result.csv'
results2csv(self, results, temp_file, self.custom_classes)
ret = {}
for metric in metrics:
msg = f'Evaluating {metric} ...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
eval_result = ava_eval(
temp_file,
metric,
self.label_file,
self.ann_file,
self.exclude_file,
custom_classes=self.custom_classes)
log_msg = []
for k, v in eval_result.items():
log_msg.append(f'\n{k}\t{v: .4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
ret.update(eval_result)
os.remove(temp_file)
return ret
| 15,430
| 39.289817
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/blending_utils.py
|
from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from torch.distributions.beta import Beta
from .builder import BLENDINGS
__all__ = ['BaseMiniBatchBlending', 'MixupBlending', 'CutmixBlending', 'LabelSmoothing']
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
class BaseMiniBatchBlending(metaclass=ABCMeta):
"""Base class for Image Aliasing."""
def __init__(self, num_classes, smoothing=0.):
self.num_classes = num_classes
self.off_value = smoothing / self.num_classes
self.on_value = 1. - smoothing + self.off_value
@abstractmethod
def do_blending(self, imgs, label, **kwargs):
pass
def __call__(self, imgs, label, **kwargs):
"""Blending data in a mini-batch.
Images are float tensors with the shape of (B, N, C, H, W) for 2D
recognizers or (B, N, C, T, H, W) for 3D recognizers.
Besides, labels are converted from hard labels to soft labels.
Hard labels are integer tensors with the shape of (B, 1) and all of the
elements are in the range [0, num_classes - 1].
Soft labels (probablity distribution over classes) are float tensors
with the shape of (B, 1, num_classes) and all of the elements are in
the range [0, 1].
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): Hard labels, integer tensor with the shape
of (B, 1) and all elements are in range [0, num_classes).
kwargs (dict, optional): Other keyword argument to be used to
blending imgs and labels in a mini-batch.
Returns:
mixed_imgs (torch.Tensor): Blending images, float tensor with the
same shape of the input imgs.
mixed_label (torch.Tensor): Blended soft labels, float tensor with
the shape of (B, 1, num_classes) and all elements are in range
[0, 1].
"""
one_hot_label = one_hot(label, num_classes=self.num_classes, on_value=self.on_value, off_value=self.off_value, device=label.device)
mixed_imgs, mixed_label = self.do_blending(imgs, one_hot_label,
**kwargs)
return mixed_imgs, mixed_label
@BLENDINGS.register_module()
class MixupBlending(BaseMiniBatchBlending):
"""Implementing Mixup in a mini-batch.
This module is proposed in `mixup: Beyond Empirical Risk Minimization
<https://arxiv.org/abs/1710.09412>`_.
Code Reference https://github.com/open-mmlab/mmclassification/blob/master/mmcls/models/utils/mixup.py # noqa
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
def do_blending(self, imgs, label, **kwargs):
"""Blending images with mixup."""
assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}'
lam = self.beta.sample()
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :]
mixed_label = lam * label + (1 - lam) * label[rand_index, :]
return mixed_imgs, mixed_label
@BLENDINGS.register_module()
class CutmixBlending(BaseMiniBatchBlending):
"""Implementing Cutmix in a mini-batch.
This module is proposed in `CutMix: Regularization Strategy to Train Strong
Classifiers with Localizable Features <https://arxiv.org/abs/1905.04899>`_.
Code Reference https://github.com/clovaai/CutMix-PyTorch
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
@staticmethod
def rand_bbox(img_size, lam):
"""Generate a random boudning box."""
w = img_size[-1]
h = img_size[-2]
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.tensor(int(w * cut_rat))
cut_h = torch.tensor(int(h * cut_rat))
# uniform
cx = torch.randint(w, (1, ))[0]
cy = torch.randint(h, (1, ))[0]
bbx1 = torch.clamp(cx - cut_w // 2, 0, w)
bby1 = torch.clamp(cy - cut_h // 2, 0, h)
bbx2 = torch.clamp(cx + cut_w // 2, 0, w)
bby2 = torch.clamp(cy + cut_h // 2, 0, h)
return bbx1, bby1, bbx2, bby2
def do_blending(self, imgs, label, **kwargs):
"""Blending images with cutmix."""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}'
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
lam = self.beta.sample()
bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam)
imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2,
bbx1:bbx2]
lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) /
(imgs.size()[-1] * imgs.size()[-2]))
label = lam * label + (1 - lam) * label[rand_index, :]
return imgs, label
@BLENDINGS.register_module()
class LabelSmoothing(BaseMiniBatchBlending):
def do_blending(self, imgs, label, **kwargs):
return imgs, label
| 5,741
| 36.529412
| 139
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/rawframe_dataset.py
|
import copy
import os.path as osp
import torch
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"""Rawframe dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, total frames of the video and
the label of a video, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int | None): Number of classes in the dataset.
Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0.,
dynamic_length=False):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
multi_class,
num_classes,
start_index,
modality,
sample_by_class=sample_by_class,
power=power,
dynamic_length=dynamic_length)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
| 6,689
| 35.358696
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/dataset_wrappers.py
|
from .builder import DATASETS
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be ``times`` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get data."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
| 833
| 25.903226
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/video_dataset.py
|
import os.path as osp
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class VideoDataset(BaseDataset):
"""Video dataset for action recognition.
The dataset loads raw videos and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
a sample video with the filepath and label, which are split with a
whitespace. Example of a annotation file:
.. code-block:: txt
some/path/000.mp4 1
some/path/001.mp4 1
some/path/002.mp4 2
some/path/003.mp4 2
some/path/004.mp4 3
some/path/005.mp4 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
start_index (int): Specify a start index for frames in consideration of
different filename format. However, when taking videos as input,
it should be set to 0, since frames loaded from videos count
from 0. Default: 0.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
if ',' in line:
line_split = line.strip().split(",")
else:
line_split = line.strip().split()
if self.multi_class:
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
else:
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
video_infos.append(dict(filename=filename, label=label))
return video_infos
| 2,323
| 35.3125
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/hvu_dataset.py
|
import copy
import os.path as osp
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from ..core import mean_average_precision
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class HVUDataset(BaseDataset):
"""HVU dataset, which supports the recognition tags of multiple categories.
Accept both video annotation files or rawframe annotation files.
The dataset loads videos or raw frames and applies specified transforms to
return a dict containing the frame tensors and other information.
The ann_file is a json file with multiple dictionaries, and each dictionary
indicates a sample video with the filename and tags, the tags are organized
as different categories. Example of a video dictionary:
.. code-block:: txt
{
'filename': 'gD_G1b0wV5I_001015_001035.mp4',
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Example of a rawframe dictionary:
.. code-block:: txt
{
'frame_dir': 'gD_G1b0wV5I_001015_001035',
'total_frames': 61
'label': {
'concept': [250, 131, 42, 51, 57, 155, 122],
'object': [1570, 508],
'event': [16],
'action': [180],
'scene': [206]
}
}
Args:
ann_file (str): Path to the annotation file, should be a json file.
pipeline (list[dict | callable]): A sequence of data transforms.
tag_categories (list[str]): List of category names of tags.
tag_category_nums (list[int]): List of number of tags in each category.
filename_tmpl (str | None): Template for each filename. If set to None,
video dataset is used. Default: None.
**kwargs: Keyword arguments for ``BaseDataset``.
"""
def __init__(self,
ann_file,
pipeline,
tag_categories,
tag_category_nums,
filename_tmpl=None,
**kwargs):
assert len(tag_categories) == len(tag_category_nums)
self.tag_categories = tag_categories
self.tag_category_nums = tag_category_nums
self.filename_tmpl = filename_tmpl
self.num_categories = len(self.tag_categories)
self.num_tags = sum(self.tag_category_nums)
self.category2num = dict(zip(tag_categories, tag_category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] +
self.tag_category_nums[i])
self.category2startidx = dict(zip(tag_categories, self.start_idx))
self.start_index = kwargs.pop('start_index', 0)
self.dataset_type = None
super().__init__(
ann_file, pipeline, start_index=self.start_index, **kwargs)
def load_annotations(self):
"""Load annotation file to get video information."""
assert self.ann_file.endswith('.json')
return self.load_json_annotations()
def load_json_annotations(self):
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
video_info0 = video_infos[0]
assert ('filename' in video_info0) != ('frame_dir' in video_info0)
path_key = 'filename' if 'filename' in video_info0 else 'frame_dir'
self.dataset_type = 'video' if path_key == 'filename' else 'rawframe'
if self.dataset_type == 'rawframe':
assert self.filename_tmpl is not None
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
# We will convert label to torch tensors in the pipeline
video_infos[i]['categories'] = self.tag_categories
video_infos[i]['category_nums'] = self.tag_category_nums
if self.dataset_type == 'rawframe':
video_infos[i]['filename_tmpl'] = self.filename_tmpl
video_infos[i]['start_index'] = self.start_index
video_infos[i]['modality'] = self.modality
return video_infos
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
def evaluate(self,
results,
metrics='mean_average_precision',
metric_options=None,
logger=None):
"""Evaluation in HVU Video Dataset. We only support evaluating mAP for
each tag categories. Since some tag categories are missing for some
videos, we can not evaluate mAP for all tags.
Args:
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mean_average_precision'.
metric_options (dict | None): Dict for metric options.
Default: None.
logger (logging.Logger | None): Logger for recording.
Default: None.
Returns:
dict: Evaluation results dict.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
# There should be only one metric in the metrics list:
# 'mean_average_precision'
assert len(metrics) == 1
metric = metrics[0]
assert metric == 'mean_average_precision'
gt_labels = [ann['label'] for ann in self.video_infos]
eval_results = OrderedDict()
for category in self.tag_categories:
start_idx = self.category2startidx[category]
num = self.category2num[category]
preds = [
result[start_idx:start_idx + num]
for video_idx, result in enumerate(results)
if category in gt_labels[video_idx]
]
gts = [
gt_label[category] for gt_label in gt_labels
if category in gt_label
]
gts = [self.label2array(num, item) for item in gts]
mAP = mean_average_precision(preds, gts)
eval_results[f'{category}_mAP'] = mAP
log_msg = f'\n{category}_mAP\t{mAP:.4f}'
print_log(log_msg, logger=logger)
return eval_results
| 7,052
| 35.734375
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/ssn_dataset.py
|
import copy
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..core import softmax
from ..localization import (eval_ap, load_localize_proposal_file,
perform_regression, temporal_iou, temporal_nms)
from ..utils import get_root_logger
from .base import BaseDataset
from .builder import DATASETS
class SSNInstance:
"""Proposal instance of SSN.
Args:
start_frame (int): Index of the proposal's start frame.
end_frame (int): Index of the proposal's end frame.
num_video_frames (int): Total frames of the video.
label (int | None): The category label of the proposal. Default: None.
best_iou (float): The highest IOU with the groundtruth instance.
Default: 0.
overlap_self (float): Percent of the proposal's own span contained
in a groundtruth instance. Default: 0.
"""
def __init__(self,
start_frame,
end_frame,
num_video_frames,
label=None,
best_iou=0,
overlap_self=0):
self.start_frame = start_frame
self.end_frame = min(end_frame, num_video_frames)
self.num_video_frames = num_video_frames
self.label = label if label is not None else -1
self.coverage = (end_frame - start_frame) / num_video_frames
self.best_iou = best_iou
self.overlap_self = overlap_self
self.loc_reg = None
self.size_reg = None
self.regression_targets = [0., 0.]
def compute_regression_targets(self, gt_list):
"""Compute regression targets of positive proposals.
Args:
gt_list (list): The list of groundtruth instances.
"""
# Find the groundtruth instance with the highest IOU.
ious = [
temporal_iou(self.start_frame, self.end_frame, gt.start_frame,
gt.end_frame) for gt in gt_list
]
best_gt = gt_list[np.argmax(ious)]
# interval: [start_frame, end_frame)
proposal_center = (self.start_frame + self.end_frame - 1) / 2
gt_center = (best_gt.start_frame + best_gt.end_frame - 1) / 2
proposal_size = self.end_frame - self.start_frame
gt_size = best_gt.end_frame - best_gt.start_frame
# Get regression targets:
# (1). Localization regression target:
# center shift proportional to the proposal duration
# (2). Duration/Size regression target:
# logarithm of the groundtruth duration over proposal duration
self.loc_reg = (gt_center - proposal_center) / proposal_size
self.size_reg = np.log(gt_size / proposal_size)
self.regression_targets = ([self.loc_reg, self.size_reg]
if self.loc_reg is not None else [0., 0.])
@DATASETS.register_module()
class SSNDataset(BaseDataset):
"""Proposal frame dataset for Structured Segment Networks.
Based on proposal information, the dataset loads raw frames and applies
specified transforms to return a dict containing the frame tensors and
other information.
The ann_file is a text file with multiple lines and each
video's information takes up several lines. This file can be a normalized
file with percent or standard file with specific frame indexes. If the file
is a normalized file, it will be converted into a standard file first.
Template information of a video in a standard file:
.. code-block:: txt
# index
video_id
num_frames
fps
num_gts
label, start_frame, end_frame
label, start_frame, end_frame
...
num_proposals
label, best_iou, overlap_self, start_frame, end_frame
label, best_iou, overlap_self, start_frame, end_frame
...
Example of a standard annotation file:
.. code-block:: txt
# 0
video_validation_0000202
5666
1
3
8 130 185
8 832 1136
8 1303 1381
5
8 0.0620 0.0620 790 5671
8 0.1656 0.1656 790 2619
8 0.0833 0.0833 3945 5671
8 0.0960 0.0960 4173 5671
8 0.0614 0.0614 3327 5671
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
train_cfg (dict): Config for training.
test_cfg (dict): Config for testing.
data_prefix (str): Path to a directory where videos are held.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
start_index (int): Specify a start index for frames in consideration of
different filename format. Default: 1.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
video_centric (bool): Whether to sample proposals just from
this video or sample proposals randomly from the entire dataset.
Default: True.
reg_normalize_constants (list): Regression target normalized constants,
including mean and standard deviation of location and duration.
body_segments (int): Number of segments in course period.
Default: 5.
aug_segments (list[int]): Number of segments in starting and
ending period. Default: (2, 2).
aug_ratio (int | float | tuple[int | float]): The ratio of the length
of augmentation to that of the proposal. Defualt: (0.5, 0.5).
clip_len (int): Frames of each sampled output clip.
Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
filter_gt (bool): Whether to filter videos with no annotation
during training. Default: True.
use_regression (bool): Whether to perform regression. Default: True.
verbose (bool): Whether to print full information or not.
Default: False.
"""
def __init__(self,
ann_file,
pipeline,
train_cfg,
test_cfg,
data_prefix,
test_mode=False,
filename_tmpl='img_{:05d}.jpg',
start_index=1,
modality='RGB',
video_centric=True,
reg_normalize_constants=None,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=(0.5, 0.5),
clip_len=1,
frame_interval=1,
filter_gt=True,
use_regression=True,
verbose=False):
self.logger = get_root_logger()
super().__init__(
ann_file,
pipeline,
data_prefix=data_prefix,
test_mode=test_mode,
start_index=start_index,
modality=modality)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.assigner = train_cfg.ssn.assigner
self.sampler = train_cfg.ssn.sampler
self.evaluater = test_cfg.ssn.evaluater
self.verbose = verbose
self.filename_tmpl = filename_tmpl
if filter_gt or not test_mode:
valid_inds = [
i for i, video_info in enumerate(self.video_infos)
if len(video_info['gts']) > 0
]
self.logger.info(f'{len(valid_inds)} out of {len(self.video_infos)} '
f'videos are valid.')
self.video_infos = [self.video_infos[i] for i in valid_inds]
# construct three pools:
# 1. Positive(Foreground)
# 2. Background
# 3. Incomplete
self.positive_pool = []
self.background_pool = []
self.incomplete_pool = []
self.construct_proposal_pools()
if reg_normalize_constants is None:
self.reg_norm_consts = self._compute_reg_normalize_constants()
else:
self.reg_norm_consts = reg_normalize_constants
self.video_centric = video_centric
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
total_ratio = (
self.sampler.positive_ratio + self.sampler.background_ratio +
self.sampler.incomplete_ratio)
self.positive_per_video = int(
self.sampler.num_per_video *
(self.sampler.positive_ratio / total_ratio))
self.background_per_video = int(
self.sampler.num_per_video *
(self.sampler.background_ratio / total_ratio))
self.incomplete_per_video = (
self.sampler.num_per_video - self.positive_per_video -
self.background_per_video)
self.test_interval = self.test_cfg.ssn.sampler.test_interval
# number of consecutive frames
self.clip_len = clip_len
# number of steps (sparse sampling for efficiency of io)
self.frame_interval = frame_interval
# test mode or not
self.filter_gt = filter_gt
self.use_regression = use_regression
self.test_mode = test_mode
# yapf: disable
if self.verbose:
self.logger.info(f"""
SSNDataset: proposal file {self.proposal_file} parsed.
There are {len(self.positive_pool) + len(self.background_pool) +
len(self.incomplete_pool)} usable proposals from {len(self.video_infos)} videos.
{len(self.positive_pool)} positive proposals
{len(self.incomplete_pool)} incomplete proposals
{len(self.background_pool)} background proposals
Sample config:
FG/BG/INCOMP: {self.positive_per_video}/{self.background_per_video}/{self.incomplete_per_video} # noqa:E501
Video Centric: {self.video_centric}
Regression Normalization Constants:
Location: mean {self.reg_norm_consts[0][0]:.05f} std {self.reg_norm_consts[1][0]:.05f} # noqa: E501
Duration: mean {self.reg_norm_consts[0][1]:.05f} std {self.reg_norm_consts[1][1]:.05f} # noqa: E501
""")
# yapf: enable
else:
self.logger.info(
f'SSNDataset: proposal file {self.proposal_file} parsed.')
def load_annotations(self):
"""Load annotation file to get video information."""
video_infos = []
if 'normalized_' in self.ann_file:
self.proposal_file = self.ann_file.replace('normalized_', '')
if not osp.exists(self.proposal_file):
raise Exception(f'Please refer to `$MMACTION2/tools/data` to'
f'denormalize {self.ann_file}.')
else:
self.proposal_file = self.ann_file
proposal_infos = load_localize_proposal_file(self.proposal_file)
# proposal_info:[video_id, num_frames, gt_list, proposal_list]
# gt_list member: [label, start_frame, end_frame]
# proposal_list member: [label, best_iou, overlap_self,
# start_frame, end_frame]
for proposal_info in proposal_infos:
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, proposal_info[0])
num_frames = int(proposal_info[1])
# gts:start, end, num_frames, class_label, tIoU=1
gts = []
for x in proposal_info[2]:
if int(x[2]) > int(x[1]) and int(x[1]) < num_frames:
ssn_instance = SSNInstance(
int(x[1]),
int(x[2]),
num_frames,
label=int(x[0]),
best_iou=1.0)
gts.append(ssn_instance)
# proposals:start, end, num_frames, class_label
# tIoU=best_iou, overlap_self
proposals = []
for x in proposal_info[3]:
if int(x[4]) > int(x[3]) and int(x[3]) < num_frames:
ssn_instance = SSNInstance(
int(x[3]),
int(x[4]),
num_frames,
label=int(x[0]),
best_iou=float(x[1]),
overlap_self=float(x[2]))
proposals.append(ssn_instance)
video_infos.append(
dict(
frame_dir=frame_dir,
video_id=proposal_info[0],
total_frames=num_frames,
gts=gts,
proposals=proposals))
return video_infos
def results_to_detections(self, results, top_k=2000, **kwargs):
"""Convert prediction results into detections.
Args:
results (list): Prediction results.
top_k (int): Number of top results. Default: 2000.
Returns:
list: Detection results.
"""
num_classes = results[0]['activity_scores'].shape[1] - 1
detections = [dict() for _ in range(num_classes)]
for idx in range(len(self)):
video_id = self.video_infos[idx]['video_id']
relative_proposals = results[idx]['relative_proposal_list']
if len(relative_proposals[0].shape) == 3:
relative_proposals = np.squeeze(relative_proposals, 0)
activity_scores = results[idx]['activity_scores']
completeness_scores = results[idx]['completeness_scores']
regression_scores = results[idx]['bbox_preds']
if regression_scores is None:
regression_scores = np.zeros(
(len(relative_proposals), num_classes, 2),
dtype=np.float32)
regression_scores = regression_scores.reshape((-1, num_classes, 2))
if top_k <= 0:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
for i in range(num_classes):
center_scores = regression_scores[:, i, 0][:, None]
duration_scores = regression_scores[:, i, 1][:, None]
detections[i][video_id] = np.concatenate(
(relative_proposals, combined_scores[:, i][:, None],
center_scores, duration_scores),
axis=1)
else:
combined_scores = (
softmax(activity_scores[:, 1:], dim=1) *
np.exp(completeness_scores))
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
for k in keep_idx:
class_idx = k % num_classes
proposal_idx = k // num_classes
new_item = [
relative_proposals[proposal_idx, 0],
relative_proposals[proposal_idx,
1], combined_scores[proposal_idx,
class_idx],
regression_scores[proposal_idx, class_idx,
0], regression_scores[proposal_idx,
class_idx, 1]
]
if video_id not in detections[class_idx]:
detections[class_idx][video_id] = np.array([new_item])
else:
detections[class_idx][video_id] = np.vstack(
[detections[class_idx][video_id], new_item])
return detections
def evaluate(self,
results,
metrics='mAP',
metric_options=dict(mAP=dict(eval_dataset='thumos14')),
logger=None,
**deprecated_kwargs):
"""Evaluation in SSN proposal dataset.
Args:
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mAP'.
metric_options (dict): Dict for metric options. Options are
``eval_dataset`` for ``mAP``.
Default: ``dict(mAP=dict(eval_dataset='thumos14'))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results for evaluation metrics.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['mAP'] = dict(metric_options['mAP'],
**deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
detections = self.results_to_detections(results, **self.evaluater)
if self.use_regression:
self.logger.info('Performing location regression')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: perform_regression(v)
for k, v in detections[class_idx].items()
}
self.logger.info('Regression finished')
self.logger.info('Performing NMS')
for class_idx, _ in enumerate(detections):
detections[class_idx] = {
k: temporal_nms(v, self.evaluater.nms)
for k, v in detections[class_idx].items()
}
self.logger.info('NMS finished')
# get gts
all_gts = self.get_all_gts()
for class_idx, _ in enumerate(detections):
if class_idx not in all_gts:
all_gts[class_idx] = dict()
# get predictions
plain_detections = {}
for class_idx, _ in enumerate(detections):
detection_list = []
for video, dets in detections[class_idx].items():
detection_list.extend([[video, class_idx] + x[:3]
for x in dets.tolist()])
plain_detections[class_idx] = detection_list
eval_results = OrderedDict()
for metric in metrics:
if metric == 'mAP':
eval_dataset = metric_options.setdefault('mAP', {}).setdefault(
'eval_dataset', 'thumos14')
if eval_dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, .1)
ap_values = eval_ap(plain_detections, all_gts, iou_range)
map_ious = ap_values.mean(axis=0)
self.logger.info('Evaluation finished')
for iou, map_iou in zip(iou_range, map_ious):
eval_results[f'mAP@{iou:.02f}'] = map_iou
return eval_results
def construct_proposal_pools(self):
"""Construct positve proposal pool, incomplete proposal pool and
background proposal pool of the entire dataset."""
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
self.positive_pool.extend([(video_info['video_id'], proposal)
for proposal in positives])
incompletes, backgrounds = self.get_negatives(
video_info['proposals'],
self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
self.incomplete_pool.extend([(video_info['video_id'], proposal)
for proposal in incompletes])
self.background_pool.extend([video_info['video_id'], proposal]
for proposal in backgrounds)
def get_all_gts(self):
"""Fetch groundtruth instances of the entire dataset."""
gts = {}
for video_info in self.video_infos:
video = video_info['video_id']
for gt in video_info['gts']:
class_idx = gt.label - 1
# gt_info: [relative_start, relative_end]
gt_info = [
gt.start_frame / video_info['total_frames'],
gt.end_frame / video_info['total_frames']
]
gts.setdefault(class_idx, {}).setdefault(video,
[]).append(gt_info)
return gts
@staticmethod
def get_positives(gts, proposals, positive_threshold, with_gt=True):
"""Get positive/foreground proposals.
Args:
gts (list): List of groundtruth instances(:obj:`SSNInstance`).
proposals (list): List of proposal instances(:obj:`SSNInstance`).
positive_threshold (float): Minimum threshold of overlap of
positive/foreground proposals and groundtruths.
with_gt (bool): Whether to include groundtruth instances in
positive proposals. Default: True.
Returns:
list[:obj:`SSNInstance`]: (positives), positives is a list
comprised of positive proposal instances.
"""
positives = [
proposal for proposal in proposals
if proposal.best_iou > positive_threshold
]
if with_gt:
positives.extend(gts)
for proposal in positives:
proposal.compute_regression_targets(gts)
return positives
@staticmethod
def get_negatives(proposals,
incomplete_iou_threshold,
background_iou_threshold,
background_coverage_threshold=0.01,
incomplete_overlap_threshold=0.7):
"""Get negative proposals, including incomplete proposals and
background proposals.
Args:
proposals (list): List of proposal instances(:obj:`SSNInstance`).
incomplete_iou_threshold (float): Maximum threshold of overlap
of incomplete proposals and groundtruths.
background_iou_threshold (float): Maximum threshold of overlap
of background proposals and groundtruths.
background_coverage_threshold (float): Minimum coverage
of background proposals in video duration. Default: 0.01.
incomplete_overlap_threshold (float): Minimum percent of incomplete
proposals' own span contained in a groundtruth instance.
Default: 0.7.
Returns:
list[:obj:`SSNInstance`]: (incompletes, backgrounds), incompletes
and backgrounds are lists comprised of incomplete
proposal instances and background proposal instances.
"""
incompletes = []
backgrounds = []
for proposal in proposals:
if (proposal.best_iou < incomplete_iou_threshold
and proposal.overlap_self > incomplete_overlap_threshold):
incompletes.append(proposal)
elif (proposal.best_iou < background_iou_threshold
and proposal.coverage > background_coverage_threshold):
backgrounds.append(proposal)
return incompletes, backgrounds
def _video_centric_sampling(self, record):
"""Sample proposals from the this video instance.
Args:
record (dict): Information of the video instance(video_info[idx]).
key: frame_dir, video_id, total_frames,
gts: List of groundtruth instances(:obj:`SSNInstance`).
proposals: List of proposal instances(:obj:`SSNInstance`).
"""
positives = self.get_positives(record['gts'], record['proposals'],
self.assigner.positive_iou_threshold,
self.sampler.add_gt_as_proposals)
incompletes, backgrounds = self.get_negatives(
record['proposals'], self.assigner.incomplete_iou_threshold,
self.assigner.background_iou_threshold,
self.assigner.background_coverage_threshold,
self.assigner.incomplete_overlap_threshold)
def sample_video_proposals(proposal_type, video_id, video_pool,
num_requested_proposals, dataset_pool):
"""This method will sample proposals from the this video pool. If
the video pool is empty, it will fetch from the dataset pool
(collect proposal of the entire dataset).
Args:
proposal_type (int): Type id of proposal.
Positive/Foreground: 0
Negative:
Incomplete: 1
Background: 2
video_id (str): Name of the video.
video_pool (list): Pool comprised of proposals in this video.
num_requested_proposals (int): Number of proposals
to be sampled.
dataset_pool (list): Proposals of the entire dataset.
Returns:
list[(str, :obj:`SSNInstance`), int]:
video_id (str): Name of the video.
:obj:`SSNInstance`: Instance of class SSNInstance.
proposal_type (int): Type of proposal.
"""
if len(video_pool) == 0:
idx = np.random.choice(
len(dataset_pool), num_requested_proposals, replace=False)
return [(dataset_pool[x], proposal_type) for x in idx]
replicate = len(video_pool) < num_requested_proposals
idx = np.random.choice(
len(video_pool), num_requested_proposals, replace=replicate)
return [((video_id, video_pool[x]), proposal_type) for x in idx]
out_proposals = []
out_proposals.extend(
sample_video_proposals(0, record['video_id'], positives,
self.positive_per_video,
self.positive_pool))
out_proposals.extend(
sample_video_proposals(1, record['video_id'], incompletes,
self.incomplete_per_video,
self.incomplete_pool))
out_proposals.extend(
sample_video_proposals(2, record['video_id'], backgrounds,
self.background_per_video,
self.background_pool))
return out_proposals
def _random_sampling(self):
"""Randomly sample proposals from the entire dataset."""
out_proposals = []
positive_idx = np.random.choice(
len(self.positive_pool),
self.positive_per_video,
replace=len(self.positive_pool) < self.positive_per_video)
out_proposals.extend([(self.positive_pool[x], 0)
for x in positive_idx])
incomplete_idx = np.random.choice(
len(self.incomplete_pool),
self.incomplete_per_video,
replace=len(self.incomplete_pool) < self.incomplete_per_video)
out_proposals.extend([(self.incomplete_pool[x], 1)
for x in incomplete_idx])
background_idx = np.random.choice(
len(self.background_pool),
self.background_per_video,
replace=len(self.background_pool) < self.background_per_video)
out_proposals.extend([(self.background_pool[x], 2)
for x in background_idx])
return out_proposals
def _get_stage(self, proposal, num_frames):
"""Fetch the scale factor of starting and ending stage and get the
stage split.
Args:
proposal (:obj:`SSNInstance`): Proposal instance.
num_frames (int): Total frames of the video.
Returns:
tuple[float, float, list]: (starting_scale_factor,
ending_scale_factor, stage_split), starting_scale_factor is
the ratio of the effective sampling length to augment length
in starting stage, ending_scale_factor is the ratio of the
effective sampling length to augment length in ending stage,
stage_split is ending segment id of starting, course and
ending stage.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
starting_scale_factor = ((valid_starting_length + ori_clip_len + 1) /
(duration * self.aug_ratio[0]))
ending_scale_factor = (valid_ending_length + ori_clip_len + 1) / (
duration * self.aug_ratio[1])
aug_start, aug_end = self.aug_segments
stage_split = [
aug_start, aug_start + self.body_segments,
aug_start + self.body_segments + aug_end
]
return starting_scale_factor, ending_scale_factor, stage_split
def _compute_reg_normalize_constants(self):
"""Compute regression target normalized constants."""
if self.verbose:
self.logger.info('Compute regression target normalized constants')
targets = []
for video_info in self.video_infos:
positives = self.get_positives(
video_info['gts'], video_info['proposals'],
self.assigner.positive_iou_threshold, False)
for positive in positives:
targets.append(list(positive.regression_targets))
return np.array((np.mean(targets, axis=0), np.std(targets, axis=0)))
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
if self.video_centric:
# yapf: disable
results['out_proposals'] = self._video_centric_sampling(self.video_infos[idx]) # noqa: E501
# yapf: enable
else:
results['out_proposals'] = self._random_sampling()
out_proposal_scale_factor = []
out_proposal_type = []
out_proposal_labels = []
out_proposal_reg_targets = []
for _, proposal in enumerate(results['out_proposals']):
# proposal: [(video_id, SSNInstance), proposal_type]
num_frames = proposal[0][1].num_video_frames
(starting_scale_factor, ending_scale_factor,
_) = self._get_stage(proposal[0][1], num_frames)
# proposal[1]: Type id of proposal.
# Positive/Foreground: 0
# Negative:
# Incomplete: 1
# Background: 2
# Positivte/Foreground proposal
if proposal[1] == 0:
label = proposal[0][1].label
# Incomplete proposal
elif proposal[1] == 1:
label = proposal[0][1].label
# Background proposal
elif proposal[1] == 2:
label = 0
else:
raise ValueError(f'Proposal type should be 0, 1, or 2,'
f'but got {proposal[1]}')
out_proposal_scale_factor.append(
[starting_scale_factor, ending_scale_factor])
if not isinstance(label, int):
raise TypeError(f'proposal_label must be an int,'
f'but got {type(label)}')
out_proposal_labels.append(label)
out_proposal_type.append(proposal[1])
reg_targets = proposal[0][1].regression_targets
if proposal[1] == 0:
# Normalize regression targets of positive proposals.
reg_targets = ((reg_targets[0] - self.reg_norm_consts[0][0]) /
self.reg_norm_consts[1][0],
(reg_targets[1] - self.reg_norm_consts[0][1]) /
self.reg_norm_consts[1][1])
out_proposal_reg_targets.append(reg_targets)
results['reg_targets'] = np.array(
out_proposal_reg_targets, dtype=np.float32)
results['proposal_scale_factor'] = np.array(
out_proposal_scale_factor, dtype=np.float32)
results['proposal_labels'] = np.array(out_proposal_labels)
results['proposal_type'] = np.array(out_proposal_type)
return self.pipeline(results)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
proposals = results['proposals']
num_frames = results['total_frames']
ori_clip_len = self.clip_len * self.frame_interval
frame_ticks = np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=int) + 1
num_sampled_frames = len(frame_ticks)
if len(proposals) == 0:
proposals.append(SSNInstance(0, num_frames - 1, num_frames))
relative_proposal_list = []
proposal_tick_list = []
scale_factor_list = []
for proposal in proposals:
relative_proposal = (proposal.start_frame / num_frames,
proposal.end_frame / num_frames)
relative_duration = relative_proposal[1] - relative_proposal[0]
relative_starting_duration = relative_duration * self.aug_ratio[0]
relative_ending_duration = relative_duration * self.aug_ratio[1]
relative_starting = (
relative_proposal[0] - relative_starting_duration)
relative_ending = relative_proposal[1] + relative_ending_duration
real_relative_starting = max(0.0, relative_starting)
real_relative_ending = min(1.0, relative_ending)
starting_scale_factor = (
(relative_proposal[0] - real_relative_starting) /
relative_starting_duration)
ending_scale_factor = (
(real_relative_ending - relative_proposal[1]) /
relative_ending_duration)
proposal_ranges = (real_relative_starting, *relative_proposal,
real_relative_ending)
proposal_ticks = (np.array(proposal_ranges) *
num_sampled_frames).astype(np.int32)
relative_proposal_list.append(relative_proposal)
proposal_tick_list.append(proposal_ticks)
scale_factor_list.append(
(starting_scale_factor, ending_scale_factor))
results['relative_proposal_list'] = np.array(
relative_proposal_list, dtype=np.float32)
results['scale_factor_list'] = np.array(
scale_factor_list, dtype=np.float32)
results['proposal_tick_list'] = np.array(
proposal_tick_list, dtype=np.int32)
results['reg_norm_consts'] = self.reg_norm_consts
return self.pipeline(results)
| 37,498
| 41.515873
| 120
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/__init__.py
|
from .activitynet_dataset import ActivityNetDataset
from .audio_dataset import AudioDataset
from .audio_feature_dataset import AudioFeatureDataset
from .audio_visual_dataset import AudioVisualDataset
from .ava_dataset import AVADataset
from .base import BaseDataset
from .blending_utils import (BaseMiniBatchBlending, CutmixBlending,
MixupBlending, LabelSmoothing)
from .builder import (BLENDINGS, DATASETS, PIPELINES, build_dataloader,
build_dataset)
from .dataset_wrappers import RepeatDataset
from .hvu_dataset import HVUDataset
from .image_dataset import ImageDataset
from .pose_dataset import PoseDataset
from .rawframe_dataset import RawframeDataset
from .rawvideo_dataset import RawVideoDataset
from .ssn_dataset import SSNDataset
from .video_dataset import VideoDataset
__all__ = [
'VideoDataset', 'build_dataloader', 'build_dataset', 'RepeatDataset',
'RawframeDataset', 'BaseDataset', 'ActivityNetDataset', 'SSNDataset',
'HVUDataset', 'AudioDataset', 'AudioFeatureDataset', 'ImageDataset',
'RawVideoDataset', 'AVADataset', 'AudioVisualDataset',
'BaseMiniBatchBlending', 'CutmixBlending', 'MixupBlending', 'LabelSmoothing', 'DATASETS',
'PIPELINES', 'BLENDINGS', 'PoseDataset'
]
| 1,262
| 44.107143
| 93
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/builder.py
|
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import ClassSpecificDistributedSampler, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
BLENDINGS = Registry('blending')
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict | None, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if cfg['type'] == 'RepeatDataset':
from .dataset_wrappers import RepeatDataset
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
videos_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
videos_per_gpu (int): Number of videos on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
sample_by_class = getattr(dataset, 'sample_by_class', False)
if dist:
if sample_by_class:
dynamic_length = getattr(dataset, 'dynamic_length', True)
sampler = ClassSpecificDistributedSampler(
dataset,
world_size,
rank,
dynamic_length=dynamic_length,
shuffle=shuffle,
seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, seed=seed)
shuffle = False
batch_size = videos_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * videos_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=videos_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Init the random seed for various workers."""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 4,449
| 32.458647
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/activitynet_dataset.py
|
import copy
import os
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from ..core import average_recall_at_avg_proposals
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class ActivityNetDataset(BaseDataset):
"""ActivityNet dataset for temporal action localization.
The dataset loads raw features and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a json file with multiple objects, and each object has a
key of the name of a video, and value of total frames of the video, total
seconds of the video, annotations of a video, feature frames (frames
covered by features) of the video, fps and rfps. Example of a
annotation file:
.. code-block:: JSON
{
"v_--1DO2V4K74": {
"duration_second": 211.53,
"duration_frame": 6337,
"annotations": [
{
"segment": [
30.025882995319815,
205.2318595943838
],
"label": "Rock climbing"
}
],
"feature_frame": 6336,
"fps": 30.0,
"rfps": 29.9579255898
},
"v_--6bJUbfpnQ": {
"duration_second": 26.75,
"duration_frame": 647,
"annotations": [
{
"segment": [
2.578755070202808,
24.914101404056165
],
"label": "Drinking beer"
}
],
"feature_frame": 624,
"fps": 24.0,
"rfps": 24.1869158879
},
...
}
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super().__init__(ann_file, pipeline, data_prefix, test_mode)
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
video_infos = []
anno_database = mmcv.load(self.ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['data_prefix'] = self.data_prefix
return self.pipeline(results)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['data_prefix'] = self.data_prefix
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def _import_ground_truth(self):
"""Read ground truth data from video_infos."""
ground_truth = {}
for video_info in self.video_infos:
video_id = video_info['video_name'][2:]
this_video_ground_truths = []
for ann in video_info['annotations']:
t_start, t_end = ann['segment']
label = ann['label']
this_video_ground_truths.append([t_start, t_end, label])
ground_truth[video_id] = np.array(this_video_ground_truths)
return ground_truth
@staticmethod
def proposals2json(results, show_progress=False):
"""Convert all proposals to a final dict(json) format.
Args:
results (list[dict]): All proposals.
show_progress (bool): Whether to show the progress bar.
Defaults: False.
Returns:
dict: The final result dict. E.g.
.. code-block:: Python
dict(video-1=[dict(segment=[1.1,2.0]. score=0.9),
dict(segment=[50.1, 129.3], score=0.6)])
"""
result_dict = {}
print('Convert proposals to json format')
if show_progress:
prog_bar = mmcv.ProgressBar(len(results))
for result in results:
video_name = result['video_name']
result_dict[video_name[2:]] = result['proposal_list']
if show_progress:
prog_bar.update()
return result_dict
@staticmethod
def _import_proposals(results):
"""Read predictions from results."""
proposals = {}
num_proposals = 0
for result in results:
video_id = result['video_name'][2:]
this_video_proposals = []
for proposal in result['proposal_list']:
t_start, t_end = proposal['segment']
score = proposal['score']
this_video_proposals.append([t_start, t_end, score])
num_proposals += 1
proposals[video_id] = np.array(this_video_proposals)
return proposals, num_proposals
def dump_results(self, results, out, output_format, version='VERSION 1.3'):
"""Dump data to json/csv files."""
if output_format == 'json':
result_dict = self.proposals2json(results)
output_dict = {
'version': version,
'results': result_dict,
'external_data': {}
}
mmcv.dump(output_dict, out)
elif output_format == 'csv':
# TODO: add csv handler to mmcv and use mmcv.dump
os.makedirs(out, exist_ok=True)
header = 'action,start,end,tmin,tmax'
for result in results:
video_name, outputs = result
output_path = osp.join(out, video_name + '.csv')
np.savetxt(
output_path,
outputs,
header=header,
delimiter=',',
comments='')
else:
raise ValueError(
f'The output format {output_format} is not supported.')
def evaluate(
self,
results,
metrics='AR@AN',
metric_options={
'AR@AN':
dict(
max_avg_proposals=100,
temporal_iou_thresholds=np.linspace(0.5, 0.95, 10))
},
logger=None,
**deprecated_kwargs):
"""Evaluation in feature dataset.
Args:
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'AR@AN'.
metric_options (dict): Dict for metric options. Options are
``max_avg_proposals``, ``temporal_iou_thresholds`` for
``AR@AN``.
default: ``{'AR@AN': dict(max_avg_proposals=100,
temporal_iou_thresholds=np.linspace(0.5, 0.95, 10))}``.
logger (logging.Logger | None): Training logger. Defaults: None.
deprecated_kwargs (dict): Used for containing deprecated arguments.
See 'https://github.com/open-mmlab/mmaction2/pull/286'.
Returns:
dict: Evaluation results for evaluation metrics.
"""
# Protect ``metric_options`` since it uses mutable value as default
metric_options = copy.deepcopy(metric_options)
if deprecated_kwargs != {}:
warnings.warn(
'Option arguments for metrics has been changed to '
"`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501
'for more details')
metric_options['AR@AN'] = dict(metric_options['AR@AN'],
**deprecated_kwargs)
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
f'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics]
allowed_metrics = ['AR@AN']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
eval_results = OrderedDict()
ground_truth = self._import_ground_truth()
proposal, num_proposals = self._import_proposals(results)
for metric in metrics:
if metric == 'AR@AN':
temporal_iou_thresholds = metric_options.setdefault(
'AR@AN', {}).setdefault('temporal_iou_thresholds',
np.linspace(0.5, 0.95, 10))
max_avg_proposals = metric_options.setdefault(
'AR@AN', {}).setdefault('max_avg_proposals', 100)
if isinstance(temporal_iou_thresholds, list):
temporal_iou_thresholds = np.array(temporal_iou_thresholds)
recall, _, _, auc = (
average_recall_at_avg_proposals(
ground_truth,
proposal,
num_proposals,
max_avg_proposals=max_avg_proposals,
temporal_iou_thresholds=temporal_iou_thresholds))
eval_results['auc'] = auc
eval_results['AR@1'] = np.mean(recall[:, 0])
eval_results['AR@5'] = np.mean(recall[:, 4])
eval_results['AR@10'] = np.mean(recall[:, 9])
eval_results['AR@100'] = np.mean(recall[:, 99])
return eval_results
| 10,279
| 37.074074
| 105
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/samplers/distributed_sampler.py
|
import math
from collections import defaultdict
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from
``torch.utils.data.DistributedSampler``.
In pytorch of lower versions, there is no ``shuffle`` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class ClassSpecificDistributedSampler(_DistributedSampler):
"""ClassSpecificDistributedSampler inheriting from
``torch.utils.data.DistributedSampler``.
Samples are sampled with a class specific probability, which should be an
attribute of the dataset (dataset.class_prob, which is a dictionary that
map label index to the prob). This sampler is only applicable to single
class recognition dataset. This sampler is also compatible with
RepeatDataset.
The default value of dynamic_length is True, which means we use
oversampling / subsampling, and the dataset length may changed. If
dynamic_length is set as False, the dataset length is fixed.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
dynamic_length=True,
shuffle=True,
seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
if type(dataset).__name__ == 'RepeatDataset':
dataset = dataset.dataset
assert hasattr(dataset, 'class_prob')
self.class_prob = dataset.class_prob
self.dynamic_length = dynamic_length
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
class_indices = defaultdict(list)
# To be compatible with RepeatDataset
times = 1
dataset = self.dataset
if type(dataset).__name__ == 'RepeatDataset':
times = dataset.times
dataset = dataset.dataset
for i, item in enumerate(dataset.video_infos):
class_indices[item['label']].append(i)
if self.dynamic_length:
indices = []
for k, prob in self.class_prob.items():
prob = prob * times
for i in range(int(prob // 1)):
indices.extend(class_indices[k])
rem = int((prob % 1) * len(class_indices[k]))
rem_indices = torch.randperm(
len(class_indices[k]), generator=g).tolist()[:rem]
indices.extend(rem_indices)
if self.shuffle:
shuffle = torch.randperm(len(indices), generator=g).tolist()
indices = [indices[i] for i in shuffle]
# re-calc num_samples & total_size
self.num_samples = math.ceil(len(indices) / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
else:
# We want to keep the dataloader length same as original
video_labels = [x['label'] for x in dataset.video_infos]
probs = [
self.class_prob[lb] / len(class_indices[lb])
for lb in video_labels
]
indices = torch.multinomial(
torch.Tensor(probs),
self.total_size,
replacement=True,
generator=g)
indices = indices.data.numpy().tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# retrieve indices for current process
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 4,908
| 35.362963
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/samplers/__init__.py
|
from .distributed_sampler import (ClassSpecificDistributedSampler,
DistributedSampler)
__all__ = ['DistributedSampler', 'ClassSpecificDistributedSampler']
| 190
| 37.2
| 67
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/loading.py
|
import io
import os
import os.path as osp
import shutil
import warnings
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from torch.nn.modules.utils import _pair
from ...utils import get_random_string, get_shm_dir, get_thread_id
from ..builder import PIPELINES
import random
@PIPELINES.register_module()
class LoadHVULabel:
"""Convert the HVU label from dictionaries to torch tensors.
Required keys are "label", "categories", "category_nums", added or modified
keys are "label", "mask" and "category_mask".
"""
def __init__(self, **kwargs):
self.hvu_initialized = False
self.kwargs = kwargs
def init_hvu_info(self, categories, category_nums):
assert len(categories) == len(category_nums)
self.categories = categories
self.category_nums = category_nums
self.num_categories = len(self.categories)
self.num_tags = sum(self.category_nums)
self.category2num = dict(zip(categories, category_nums))
self.start_idx = [0]
for i in range(self.num_categories - 1):
self.start_idx.append(self.start_idx[-1] + self.category_nums[i])
self.category2startidx = dict(zip(categories, self.start_idx))
self.hvu_initialized = True
def __call__(self, results):
"""Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not self.hvu_initialized:
self.init_hvu_info(results['categories'], results['category_nums'])
onehot = torch.zeros(self.num_tags)
onehot_mask = torch.zeros(self.num_tags)
category_mask = torch.zeros(self.num_categories)
for category, tags in results['label'].items():
category_mask[self.categories.index(category)] = 1.
start_idx = self.category2startidx[category]
category_num = self.category2num[category]
tags = [idx + start_idx for idx in tags]
onehot[tags] = 1.
onehot_mask[start_idx:category_num + start_idx] = 1.
results['label'] = onehot
results['mask'] = onehot_mask
results['category_mask'] = category_mask
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'hvu_initialized={self.hvu_initialized})')
return repr_str
@PIPELINES.register_module()
class SampleFrames:
"""Sample frames from the video.
Required keys are "total_frames", "start_index" , added or modified keys
are "frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
twice_sample (bool): Whether to use twice sample when testing.
If set to True, it will sample frames with and without fixed shift,
which is commonly used for testing in TSM model. Default: False.
out_of_bound_opt (str): The way to deal with out of bounds frame
indexes. Available options are 'loop', 'repeat_last'.
Default: 'loop'.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None,
frame_uniform=False):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.temporal_jitter = temporal_jitter
self.twice_sample = twice_sample
self.out_of_bound_opt = out_of_bound_opt
self.test_mode = test_mode
self.frame_uniform = frame_uniform
assert self.out_of_bound_opt in ['loop', 'repeat_last']
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2. If set twice_sample True, it will sample
frames together without fixed shift. If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _sample_clips(self, num_frames):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
clip_offsets = self._get_train_clips(num_frames)
return clip_offsets
def get_seq_frames(self, num_frames):
"""
Modified from https://github.com/facebookresearch/SlowFast/blob/64abcc90ccfdcbb11cf91d6e525bed60e92a8796/slowfast/datasets/ssv2.py#L159
Given the video index, return the list of sampled frame indexes.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (list): the indexes of frames of sampled from the video.
"""
seg_size = float(num_frames - 1) / self.clip_len
seq = []
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
if not self.test_mode:
seq.append(random.randint(start, end))
else:
seq.append((start + end) // 2)
return np.array(seq)
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
if self.frame_uniform: # sthv2 sampling strategy
assert results['start_index'] == 0
frame_inds = self.get_seq_frames(total_frames)
else:
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'temporal_jitter={self.temporal_jitter}, '
f'twice_sample={self.twice_sample}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class UntrimmedSampleFrames:
"""Sample frames from the untrimmed video.
Required keys are "filename", "total_frames", added or modified keys are
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): The length of sampled clips. Default: 1.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 16.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self, clip_len=1, frame_interval=16, start_index=None):
self.clip_len = clip_len
self.frame_interval = frame_interval
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
start_index = results['start_index']
clip_centers = np.arange(self.frame_interval // 2, total_frames,
self.frame_interval)
num_clips = clip_centers.shape[0]
frame_inds = clip_centers[:, None] + np.arange(
-(self.clip_len // 2), self.clip_len -
(self.clip_len // 2))[None, :]
# clip frame_inds to legal range
frame_inds = np.clip(frame_inds, 0, total_frames - 1)
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval})')
return repr_str
@PIPELINES.register_module()
class DenseSampleFrames(SampleFrames):
"""Select frames from the video by dense sample strategy.
Required keys are "filename", added or modified keys are "total_frames",
"frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
sample_range (int): Total sample range for dense sample.
Default: 64.
num_sample_positions (int): Number of sample start positions, Which is
only used in test mode. Default: 10. That is to say, by default,
there are at least 10 clips for one input sample in test mode.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
sample_range=64,
num_sample_positions=10,
temporal_jitter=False,
out_of_bound_opt='loop',
test_mode=False):
super().__init__(
clip_len,
frame_interval,
num_clips,
temporal_jitter,
out_of_bound_opt=out_of_bound_opt,
test_mode=test_mode)
self.sample_range = sample_range
self.num_sample_positions = num_sample_positions
def _get_train_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'sample_range={self.sample_range}, '
f'num_sample_positions={self.num_sample_positions}, '
f'temporal_jitter={self.temporal_jitter}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleAVAFrames(SampleFrames):
def __init__(self, clip_len, frame_interval=2, test_mode=False):
super().__init__(clip_len, frame_interval, test_mode=test_mode)
def _get_clips(self, center_index, skip_offsets, shot_info):
start = center_index - (self.clip_len // 2) * self.frame_interval
end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval
frame_inds = list(range(start, end, self.frame_interval))
if not self.test_mode:
frame_inds = frame_inds + skip_offsets
frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1)
return frame_inds
def __call__(self, results):
fps = results['fps']
timestamp = results['timestamp']
timestamp_start = results['timestamp_start']
shot_info = results['shot_info']
center_index = fps * (timestamp - timestamp_start) + 1
skip_offsets = np.random.randint(
-self.frame_interval // 2, (self.frame_interval + 1) // 2,
size=self.clip_len)
frame_inds = self._get_clips(center_index, skip_offsets, shot_info)
results['frame_inds'] = np.array(frame_inds, dtype=np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = 1
results['crop_quadruple'] = np.array([0, 0, 1, 1], dtype=np.float32)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class SampleProposalFrames(SampleFrames):
"""Sample frames from proposals in the video.
Required keys are "total_frames" and "out_proposals", added or
modified keys are "frame_inds", "frame_interval", "num_clips",
'clip_len' and 'num_proposals'.
Args:
clip_len (int): Frames of each sampled output clip.
body_segments (int): Number of segments in course period.
aug_segments (list[int]): Number of segments in starting and
ending period.
aug_ratio (int | float | tuple[int | float]): The ratio
of the length of augmentation to that of the proposal.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
test_interval (int): Temporal interval of adjacent sampled frames
in test mode. Default: 6.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
mode (str): Choose 'train', 'val' or 'test' mode.
Default: 'train'.
"""
def __init__(self,
clip_len,
body_segments,
aug_segments,
aug_ratio,
frame_interval=1,
test_interval=6,
temporal_jitter=False,
mode='train'):
super().__init__(
clip_len,
frame_interval=frame_interval,
temporal_jitter=temporal_jitter)
self.body_segments = body_segments
self.aug_segments = aug_segments
self.aug_ratio = _pair(aug_ratio)
if not mmcv.is_tuple_of(self.aug_ratio, (int, float)):
raise TypeError(f'aug_ratio should be int, float'
f'or tuple of int and float, '
f'but got {type(aug_ratio)}')
assert len(self.aug_ratio) == 2
assert mode in ['train', 'val', 'test']
self.mode = mode
self.test_interval = test_interval
@staticmethod
def _get_train_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in train mode.
It will calculate the average interval for each segment,
and randomly shift them within offsets between [0, average_duration].
If the total number of frames is smaller than num segments, it will
return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
avg_interval = (valid_length + 1) // num_segments
if avg_interval > 0:
base_offsets = np.arange(num_segments) * avg_interval
offsets = base_offsets + np.random.randint(
avg_interval, size=num_segments)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
@staticmethod
def _get_val_indices(valid_length, num_segments):
"""Get indices of different stages of proposals in validation mode.
It will calculate the average interval for each segment.
If the total number of valid length is smaller than num segments,
it will return all zero indices.
Args:
valid_length (int): The length of the starting point's
valid interval.
num_segments (int): Total number of segments.
Returns:
np.ndarray: Sampled frame indices in validation mode.
"""
if valid_length >= num_segments:
avg_interval = valid_length / float(num_segments)
base_offsets = np.arange(num_segments) * avg_interval
offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
else:
offsets = np.zeros((num_segments, ), dtype=np.int)
return offsets
def _get_proposal_clips(self, proposal, num_frames):
"""Get clip offsets in train mode.
It will calculate sampled frame indices in the proposal's three
stages: starting, course and ending stage.
Args:
proposal (obj): The proposal object.
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
# proposal interval: [start_frame, end_frame)
start_frame = proposal.start_frame
end_frame = proposal.end_frame
ori_clip_len = self.clip_len * self.frame_interval
duration = end_frame - start_frame
assert duration != 0
valid_length = duration - ori_clip_len
valid_starting = max(0,
start_frame - int(duration * self.aug_ratio[0]))
valid_ending = min(num_frames - ori_clip_len + 1,
end_frame - 1 + int(duration * self.aug_ratio[1]))
valid_starting_length = start_frame - valid_starting - ori_clip_len
valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len
if self.mode == 'train':
starting_offsets = self._get_train_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_train_indices(valid_length,
self.body_segments)
ending_offsets = self._get_train_indices(valid_ending_length,
self.aug_segments[1])
elif self.mode == 'val':
starting_offsets = self._get_val_indices(valid_starting_length,
self.aug_segments[0])
course_offsets = self._get_val_indices(valid_length,
self.body_segments)
ending_offsets = self._get_val_indices(valid_ending_length,
self.aug_segments[1])
starting_offsets += valid_starting
course_offsets += start_frame
ending_offsets += end_frame
offsets = np.concatenate(
(starting_offsets, course_offsets, ending_offsets))
return offsets
def _get_train_clips(self, num_frames, proposals):
"""Get clip offsets in train mode.
It will calculate sampled frame indices of each proposal, and then
assemble them.
Args:
num_frames (int): Total number of frame in the video.
proposals (list): Proposals fetched.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
clip_offsets = []
for proposal in proposals:
proposal_clip_offsets = self._get_proposal_clips(
proposal[0][1], num_frames)
clip_offsets = np.concatenate(
[clip_offsets, proposal_clip_offsets])
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
It will calculate sampled frame indices based on test interval.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
return np.arange(
0, num_frames - ori_clip_len, self.test_interval, dtype=np.int)
def _sample_clips(self, num_frames, proposals):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
proposals (list | None): Proposals fetched.
It is set to None in test mode.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.mode == 'test':
clip_offsets = self._get_test_clips(num_frames)
else:
assert proposals is not None
clip_offsets = self._get_train_clips(num_frames, proposals)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
out_proposals = results.get('out_proposals', None)
clip_offsets = self._sample_clips(total_frames, out_proposals)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
start_index = results['start_index']
frame_inds = np.mod(frame_inds, total_frames) + start_index
results['frame_inds'] = np.array(frame_inds).astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = (
self.body_segments + self.aug_segments[0] + self.aug_segments[1])
if self.mode in ['train', 'val']:
results['num_proposals'] = len(results['out_proposals'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'body_segments={self.body_segments}, '
f'aug_segments={self.aug_segments}, '
f'aug_ratio={self.aug_ratio}, '
f'frame_interval={self.frame_interval}, '
f'test_interval={self.test_interval}, '
f'temporal_jitter={self.temporal_jitter}, '
f'mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class PyAVInit:
"""Using pyav to initialize the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "filename",
added or modified keys are "video_reader", and "total_frames".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
kwargs (dict): Args for file client.
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(io_backend=disk)'
return repr_str
@PIPELINES.register_module()
class PyAVDecode:
"""Using pyav to decode the video.
PyAV: https://github.com/mikeboers/PyAV
Required keys are "video_reader" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
def __init__(self, multi_thread=False):
self.multi_thread = multi_thread
def __call__(self, results):
"""Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [imgs[i % len(imgs)] for i in results['frame_inds']]
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(multi_thread={self.multi_thread})'
return repr_str
@PIPELINES.register_module()
class PyAVDecodeMotionVector(PyAVDecode):
"""Using pyav to decode the motion vectors from video.
Reference: https://github.com/PyAV-Org/PyAV/
blob/main/tests/test_decode.py
Required keys are "video_reader" and "frame_inds",
added or modified keys are "motion_vectors", "frame_inds".
Args:
multi_thread (bool): If set to True, it will apply multi
thread processing. Default: False.
"""
@staticmethod
def _parse_vectors(mv, vectors, height, width):
"""Parse the returned vectors."""
(w, h, src_x, src_y, dst_x,
dst_y) = (vectors['w'], vectors['h'], vectors['src_x'],
vectors['src_y'], vectors['dst_x'], vectors['dst_y'])
val_x = dst_x - src_x
val_y = dst_y - src_y
start_x = dst_x - w // 2
start_y = dst_y - h // 2
end_x = start_x + w
end_y = start_y + h
for sx, ex, sy, ey, vx, vy in zip(start_x, end_x, start_y, end_y,
val_x, val_y):
if (sx >= 0 and ex < width and sy >= 0 and ey < height):
mv[sy:ey, sx:ex] = (vx, vy)
return mv
def __call__(self, results):
"""Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max index to make early stop
max_idx = max(results['frame_inds'])
i = 0
stream = container.streams.video[0]
codec_context = stream.codec_context
codec_context.options = {'flags2': '+export_mvs'}
for packet in container.demux(stream):
for frame in packet.decode():
if i > max_idx + 1:
break
i += 1
height = frame.height
width = frame.width
mv = np.zeros((height, width, 2), dtype=np.int8)
vectors = frame.side_data.get('MOTION_VECTORS')
if frame.key_frame:
# Key frame don't have motion vectors
assert vectors is None
if vectors is not None and len(vectors) > 0:
mv = self._parse_vectors(mv, vectors.to_ndarray(), height,
width)
imgs.append(mv)
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['motion_vectors'] = np.array(
[imgs[i % len(imgs)] for i in results['frame_inds']])
return results
@PIPELINES.register_module()
class DecordInit:
"""Using decord to initialize the video_reader.
Decord: https://github.com/dmlc/decord
Required keys are "filename",
added or modified keys are "video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', num_threads=1, **kwargs):
self.io_backend = io_backend
self.num_threads = num_threads
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the Decord initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import decord
except ImportError:
raise ImportError(
'Please run "pip install decord" to install Decord first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = decord.VideoReader(file_obj, num_threads=self.num_threads)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'num_threads={self.num_threads})')
return repr_str
@PIPELINES.register_module()
class DecordDecode:
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
"""
def __call__(self, results):
"""Perform the Decord decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
# Generate frame index mapping in order
frame_dict = {
idx: container[idx].asnumpy()
for idx in np.unique(frame_inds)
}
imgs = [frame_dict[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class OpenCVInit:
"""Using OpenCV to initialize the video_reader.
Required keys are "filename", added or modified keys are "new_path",
"video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', **kwargs):
self.io_backend = io_backend
self.kwargs = kwargs
self.file_client = None
self.tmp_folder = None
if self.io_backend != 'disk':
random_string = get_random_string()
thread_id = get_thread_id()
self.tmp_folder = osp.join(get_shm_dir(),
f'{random_string}_{thread_id}')
os.mkdir(self.tmp_folder)
def __call__(self, results):
"""Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __del__(self):
if self.tmp_folder and osp.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend})')
return repr_str
@PIPELINES.register_module()
class OpenCVDecode:
"""Using OpenCV to decode the video.
Required keys are "video_reader", "filename" and "frame_inds", added or
modified keys are "imgs", "img_shape" and "original_shape".
"""
def __call__(self, results):
"""Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class RawFrameDecode:
"""Load and decode frames with given indices.
Required keys are "frame_dir", "filename_tmpl" and "frame_inds",
added or modified keys are "imgs", "img_shape" and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'decoding_backend={self.decoding_backend})')
return repr_str
@PIPELINES.register_module()
class ImageDecode:
"""Load and decode images.
Required key is "filename", added or modified keys are "imgs", "img_shape"
and "original_shape".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
decoding_backend (str): Backend used for image decoding.
Default: 'cv2'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
self.io_backend = io_backend
self.decoding_backend = decoding_backend
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
filename = results['filename']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(img)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class AudioDecodeInit:
"""Using librosa to initialize the audio reader.
Required keys are "audio_path", added or modified keys are "length",
"sample_rate", "audios".
Args:
io_backend (str): io backend where frames are store.
Default: 'disk'.
sample_rate (int): Audio sampling times per second. Default: 16000.
"""
def __init__(self,
io_backend='disk',
sample_rate=16000,
pad_method='zero',
**kwargs):
self.io_backend = io_backend
self.sample_rate = sample_rate
if pad_method in ['random', 'zero']:
self.pad_method = pad_method
else:
raise NotImplementedError
self.kwargs = kwargs
self.file_client = None
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# librosa load raw audio file into a distribution of -1~+1
return np.random.rand(shape).astype(np.float32) * 2 - 1
def __call__(self, results):
"""Perform the librosa initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Please install librosa first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
if osp.exists(results['audio_path']):
file_obj = io.BytesIO(self.file_client.get(results['audio_path']))
y, sr = librosa.load(file_obj, sr=self.sample_rate)
else:
# Generate a random dummy 10s input
pad_func = getattr(self, f'_{self.pad_method}_pad')
y = pad_func(int(round(10.0 * self.sample_rate)))
sr = self.sample_rate
results['length'] = y.shape[0]
results['sample_rate'] = sr
results['audios'] = y
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'sample_rate={self.sample_rate}, '
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class LoadAudioFeature:
"""Load offline extracted audio features.
Required keys are "audio_path", added or modified keys are "length",
audios".
"""
def __init__(self, pad_method='zero'):
if pad_method not in ['zero', 'random']:
raise NotImplementedError
self.pad_method = pad_method
@staticmethod
def _zero_pad(shape):
return np.zeros(shape, dtype=np.float32)
@staticmethod
def _random_pad(shape):
# spectrogram is normalized into a distribution of 0~1
return np.random.rand(shape).astype(np.float32)
def __call__(self, results):
"""Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if osp.exists(results['audio_path']):
feature_map = np.load(results['audio_path'])
else:
# Generate a random dummy 10s input
# Some videos do not have audio stream
pad_func = getattr(self, f'_{self.pad_method}_pad')
feature_map = pad_func((640, 80))
results['length'] = feature_map.shape[0]
results['audios'] = feature_map
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'pad_method={self.pad_method})')
return repr_str
@PIPELINES.register_module()
class AudioDecode:
"""Sample the audio w.r.t. the frames selected.
Args:
fixed_length (int): As the audio clip selected by frames sampled may
not be exactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 32000.
Required keys are "frame_inds", "num_clips", "total_frames", "length",
added or modified keys are "audios", "audios_shape".
"""
def __init__(self, fixed_length=32000):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioDecode`` to pick audio clips."""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0])),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
@PIPELINES.register_module()
class BuildPseudoClip:
"""Build pseudo clips with one single image by repeating it n times.
Required key is "imgs", added or modified key is "imgs", "num_clips",
"clip_len".
Args:
clip_len (int): Frames of the generated pseudo clips.
"""
def __init__(self, clip_len):
self.clip_len = clip_len
def __call__(self, results):
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class FrameSelector(RawFrameDecode):
"""Deprecated class for ``RawFrameDecode``."""
def __init__(self, *args, **kwargs):
warnings.warn('"FrameSelector" is deprecated, please switch to'
'"RawFrameDecode"')
super().__init__(*args, **kwargs)
@PIPELINES.register_module()
class AudioFeatureSelector:
"""Sample the audio feature w.r.t. the frames selected.
Required keys are "audios", "frame_inds", "num_clips", "length",
"total_frames", added or modified keys are "audios", "audios_shape".
Args:
fixed_length (int): As the features selected by frames sampled may
not be extactly the same, `fixed_length` will truncate or pad them
into the same size. Default: 128.
"""
def __init__(self, fixed_length=128):
self.fixed_length = fixed_length
def __call__(self, results):
"""Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx, :]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length, :]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'fix_length={self.fixed_length})')
return repr_str
@PIPELINES.register_module()
class LoadLocalizationFeature:
"""Load Video features for localizer with given video_name list.
Required keys are "video_name" and "data_prefix", added or modified keys
are "raw_feature".
Args:
raw_feature_ext (str): Raw feature file extension. Default: '.csv'.
"""
def __init__(self, raw_feature_ext='.csv'):
valid_raw_feature_ext = ('.csv', )
if raw_feature_ext not in valid_raw_feature_ext:
raise NotImplementedError
self.raw_feature_ext = raw_feature_ext
def __call__(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
data_prefix = results['data_prefix']
data_path = osp.join(data_prefix, video_name + self.raw_feature_ext)
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'raw_feature_ext={self.raw_feature_ext})')
return repr_str
@PIPELINES.register_module()
class GenerateLocalizationLabels:
"""Load video label for localizer with given video_name list.
Required keys are "duration_frame", "duration_second", "feature_frame",
"annotations", added or modified keys are "gt_bbox".
"""
def __call__(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results
@PIPELINES.register_module()
class LoadProposals:
"""Loading proposals with given proposal results.
Required keys are "video_name", added or modified keys are 'bsp_feature',
'tmin', 'tmax', 'tmin_score', 'tmax_score' and 'reference_temporal_iou'.
Args:
top_k (int): The top k proposals to be loaded.
pgm_proposals_dir (str): Directory to load proposals.
pgm_features_dir (str): Directory to load proposal features.
proposal_ext (str): Proposal file extension. Default: '.csv'.
feature_ext (str): Feature file extension. Default: '.npy'.
"""
def __init__(self,
top_k,
pgm_proposals_dir,
pgm_features_dir,
proposal_ext='.csv',
feature_ext='.npy'):
self.top_k = top_k
self.pgm_proposals_dir = pgm_proposals_dir
self.pgm_features_dir = pgm_features_dir
valid_proposal_ext = ('.csv', )
if proposal_ext not in valid_proposal_ext:
raise NotImplementedError
self.proposal_ext = proposal_ext
valid_feature_ext = ('.npy', )
if feature_ext not in valid_feature_ext:
raise NotImplementedError
self.feature_ext = feature_ext
def __call__(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'top_k={self.top_k}, '
f'pgm_proposals_dir={self.pgm_proposals_dir}, '
f'pgm_features_dir={self.pgm_features_dir}, '
f'proposal_ext={self.proposal_ext}, '
f'feature_ext={self.feature_ext})')
return repr_str
| 60,896
| 35.334726
| 143
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/compose.py
|
from collections.abc import Sequence
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose a data pipeline with a sequence of transforms.
Args:
transforms (list[dict | callable]):
Either config dicts of transforms or transform objects.
"""
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, '
f'but got {type(transform)}')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| 1,537
| 28.018868
| 73
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/pose_loading.py
|
import copy as cp
import pickle
import numpy as np
from mmcv.fileio import FileClient
from scipy.stats import mode
from ..builder import PIPELINES
from .augmentations import Flip
@PIPELINES.register_module()
class UniformSampleFrames:
"""Uniformly sample frames from the video.
To sample an n-frame clip from the video. UniformSampleFrames basically
divide the video into n segments of equal length and randomly sample one
frame from each segment. To make the testing results reproducible, a
random seed is set during testing, to make the sampling results
deterministic.
Required keys are "total_frames", "start_index" , added or modified keys
are "frame_inds", "clip_len", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
num_clips (int): Number of clips to be sampled. Default: 1.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
seed (int): The random seed used during test time. Default: 255.
"""
def __init__(self, clip_len, num_clips=1, test_mode=False, seed=255):
self.clip_len = clip_len
self.num_clips = num_clips
self.test_mode = test_mode
self.seed = seed
def _get_train_clips(self, num_frames, clip_len):
"""Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
"""
assert self.num_clips == 1
if num_frames < clip_len:
start = np.random.randint(0, num_frames)
inds = np.arange(start, start + clip_len)
elif clip_len <= num_frames < 2 * clip_len:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int64)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
return inds
def _get_test_clips(self, num_frames, clip_len):
"""Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
"""
np.random.seed(self.seed)
if num_frames < clip_len:
# Then we use a simple strategy
if num_frames < self.num_clips:
start_inds = list(range(self.num_clips))
else:
start_inds = [
i * num_frames // self.num_clips
for i in range(self.num_clips)
]
inds = np.concatenate(
[np.arange(i, i + clip_len) for i in start_inds])
elif clip_len <= num_frames < clip_len * 2:
all_inds = []
for i in range(self.num_clips):
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int64)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
all_inds.append(inds)
inds = np.concatenate(all_inds)
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
all_inds = []
for i in range(self.num_clips):
offset = np.random.randint(bsize)
all_inds.append(bst + offset)
inds = np.concatenate(all_inds)
return inds
def __call__(self, results):
num_frames = results['total_frames']
if self.test_mode:
inds = self._get_test_clips(num_frames, self.clip_len)
else:
inds = self._get_train_clips(num_frames, self.clip_len)
inds = np.mod(inds, num_frames)
start_index = results['start_index']
inds = inds + start_index
results['frame_inds'] = inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'num_clips={self.num_clips}, '
f'test_mode={self.test_mode}, '
f'seed={self.seed})')
return repr_str
@PIPELINES.register_module()
class PoseDecode:
"""Load and decode pose with given indices.
Required keys are "keypoint", "frame_inds" (optional), "keypoint_score"
(optional), added or modified keys are "keypoint", "keypoint_score" (if
applicable).
"""
@staticmethod
def _load_kp(kp, frame_inds):
"""Load keypoints given frame indices.
Args:
kp (np.ndarray): The keypoint coordinates.
frame_inds (np.ndarray): The frame indices.
"""
return [x[frame_inds].astype(np.float32) for x in kp]
@staticmethod
def _load_kpscore(kpscore, frame_inds):
"""Load keypoint scores given frame indices.
Args:
kpscore (np.ndarray): The confidence scores of keypoints.
frame_inds (np.ndarray): The frame indices.
"""
return [x[frame_inds].astype(np.float32) for x in kpscore]
def __call__(self, results):
if 'frame_inds' not in results:
results['frame_inds'] = np.arange(results['total_frames'])
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
frame_inds = results['frame_inds'] + offset
if 'keypoint_score' in results:
kpscore = results['keypoint_score']
results['keypoint_score'] = kpscore[:,
frame_inds].astype(np.float32)
if 'keypoint' in results:
results['keypoint'] = results['keypoint'][:, frame_inds].astype(
np.float32)
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}()'
return repr_str
@PIPELINES.register_module()
class LoadKineticsPose:
"""Load Kinetics Pose given filename (The format should be pickle)
Required keys are "filename", "total_frames", "img_shape", "frame_inds",
"anno_inds" (for mmpose source, optional), added or modified keys are
"keypoint", "keypoint_score".
Args:
io_backend (str): IO backend where frames are stored. Default: 'disk'.
squeeze (bool): Whether to remove frames with no human pose.
Default: True.
max_person (int): The max number of persons in a frame. Default: 10.
keypoint_weight (dict): The weight of keypoints. We set the confidence
score of a person as the weighted sum of confidence scores of each
joint. Persons with low confidence scores are dropped (if exceed
max_person). Default: dict(face=1, torso=2, limb=3).
source (str): The sources of the keypoints used. Choices are 'mmpose'
and 'openpose'. Default: 'mmpose'.
kwargs (dict, optional): Arguments for FileClient.
"""
def __init__(self,
io_backend='disk',
squeeze=True,
max_person=100,
keypoint_weight=dict(face=1, torso=2, limb=3),
source='mmpose',
**kwargs):
self.io_backend = io_backend
self.squeeze = squeeze
self.max_person = max_person
self.keypoint_weight = cp.deepcopy(keypoint_weight)
self.source = source
if source == 'openpose':
self.kpsubset = dict(
face=[0, 14, 15, 16, 17],
torso=[1, 2, 8, 5, 11],
limb=[3, 4, 6, 7, 9, 10, 12, 13])
elif source == 'mmpose':
self.kpsubset = dict(
face=[0, 1, 2, 3, 4],
torso=[5, 6, 11, 12],
limb=[7, 8, 9, 10, 13, 14, 15, 16])
else:
raise NotImplementedError('Unknown source of Kinetics Pose')
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
assert 'filename' in results
filename = results.pop('filename')
# only applicable to source == 'mmpose'
anno_inds = None
if 'anno_inds' in results:
assert self.source == 'mmpose'
anno_inds = results.pop('anno_inds')
results.pop('box_score', None)
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
bytes = self.file_client.get(filename)
# only the kp array is in the pickle file, each kp include x, y, score.
kps = pickle.loads(bytes)
total_frames = results['total_frames']
frame_inds = results.pop('frame_inds')
if anno_inds is not None:
kps = kps[anno_inds]
frame_inds = frame_inds[anno_inds]
frame_inds = list(frame_inds)
def mapinds(inds):
uni = np.unique(inds)
mapp = {x: i for i, x in enumerate(uni)}
inds = [mapp[x] for x in inds]
return np.array(inds, dtype=np.int16)
if self.squeeze:
frame_inds = mapinds(frame_inds)
total_frames = np.max(frame_inds) + 1
# write it back
results['total_frames'] = total_frames
h, w = results['img_shape']
if self.source == 'openpose':
kps[:, :, 0] *= w
kps[:, :, 1] *= h
num_kp = kps.shape[1]
num_person = mode(frame_inds)[-1][0]
new_kp = np.zeros([num_person, total_frames, num_kp, 2],
dtype=np.float16)
new_kpscore = np.zeros([num_person, total_frames, num_kp],
dtype=np.float16)
# 32768 is enough
num_person_frame = np.zeros([total_frames], dtype=np.int16)
for frame_ind, kp in zip(frame_inds, kps):
person_ind = num_person_frame[frame_ind]
new_kp[person_ind, frame_ind] = kp[:, :2]
new_kpscore[person_ind, frame_ind] = kp[:, 2]
num_person_frame[frame_ind] += 1
kpgrp = self.kpsubset
weight = self.keypoint_weight
results['num_person'] = num_person
if num_person > self.max_person:
for i in range(total_frames):
np_frame = num_person_frame[i]
val = new_kpscore[:np_frame, i]
val = (
np.sum(val[:, kpgrp['face']], 1) * weight['face'] +
np.sum(val[:, kpgrp['torso']], 1) * weight['torso'] +
np.sum(val[:, kpgrp['limb']], 1) * weight['limb'])
inds = sorted(range(np_frame), key=lambda x: -val[x])
new_kpscore[:np_frame, i] = new_kpscore[inds, i]
new_kp[:np_frame, i] = new_kp[inds, i]
results['num_person'] = self.max_person
results['keypoint'] = new_kp[:self.max_person]
results['keypoint_score'] = new_kpscore[:self.max_person]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'squeeze={self.squeeze}, '
f'max_person={self.max_person}, '
f'keypoint_weight={self.keypoint_weight}, '
f'source={self.source}, '
f'kwargs={self.kwargs})')
return repr_str
@PIPELINES.register_module()
class GeneratePoseTarget:
"""Generate pseudo heatmaps based on joint coordinates and confidence.
Required keys are "keypoint", "img_shape", "keypoint_score" (optional),
added or modified keys are "imgs".
Args:
sigma (float): The sigma of the generated gaussian map. Default: 0.6.
use_score (bool): Use the confidence score of keypoints as the maximum
of the gaussian maps. Default: True.
with_kp (bool): Generate pseudo heatmaps for keypoints. Default: True.
with_limb (bool): Generate pseudo heatmaps for limbs. At least one of
'with_kp' and 'with_limb' should be True. Default: False.
skeletons (tuple[tuple]): The definition of human skeletons.
Default: ((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), (7, 9),
(0, 6), (6, 8), (8, 10), (5, 11), (11, 13), (13, 15),
(6, 12), (12, 14), (14, 16), (11, 12)),
which is the definition of COCO-17p skeletons.
double (bool): Output both original heatmaps and flipped heatmaps.
Default: False.
left_kp (tuple[int]): Indexes of left keypoints, which is used when
flipping heatmaps. Default: (1, 3, 5, 7, 9, 11, 13, 15),
which is left keypoints in COCO-17p.
right_kp (tuple[int]): Indexes of right keypoints, which is used when
flipping heatmaps. Default: (2, 4, 6, 8, 10, 12, 14, 16),
which is right keypoints in COCO-17p.
"""
def __init__(self,
sigma=0.6,
use_score=True,
with_kp=True,
with_limb=False,
skeletons=((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7),
(7, 9), (0, 6), (6, 8), (8, 10), (5, 11), (11, 13),
(13, 15), (6, 12), (12, 14), (14, 16), (11, 12)),
double=False,
left_kp=(1, 3, 5, 7, 9, 11, 13, 15),
right_kp=(2, 4, 6, 8, 10, 12, 14, 16)):
self.sigma = sigma
self.use_score = use_score
self.with_kp = with_kp
self.with_limb = with_limb
self.double = double
# an auxiliary const
self.eps = 1e-4
assert self.with_kp or self.with_limb, (
'At least one of "with_limb" '
'and "with_kp" should be set as True.')
self.left_kp = left_kp
self.right_kp = right_kp
self.skeletons = skeletons
def generate_a_heatmap(self, img_h, img_w, centers, sigma, max_values):
"""Generate pseudo heatmap for one keypoint in one frame.
Args:
img_h (int): The height of the heatmap.
img_w (int): The width of the heatmap.
centers (np.ndarray): The coordinates of corresponding keypoints
(of multiple persons).
sigma (float): The sigma of generated gaussian.
max_values (np.ndarray): The max values of each keypoint.
Returns:
np.ndarray: The generated pseudo heatmap.
"""
heatmap = np.zeros([img_h, img_w], dtype=np.float32)
for center, max_value in zip(centers, max_values):
mu_x, mu_y = center[0], center[1]
if max_value < self.eps:
continue
st_x = max(int(mu_x - 3 * sigma), 0)
ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)
st_y = max(int(mu_y - 3 * sigma), 0)
ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)
x = np.arange(st_x, ed_x, 1, np.float32)
y = np.arange(st_y, ed_y, 1, np.float32)
# if the keypoint not in the heatmap coordinate system
if not (len(x) and len(y)):
continue
y = y[:, None]
patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)
patch = patch * max_value
heatmap[st_y:ed_y,
st_x:ed_x] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x],
patch)
return heatmap
def generate_a_limb_heatmap(self, img_h, img_w, starts, ends, sigma,
start_values, end_values):
"""Generate pseudo heatmap for one limb in one frame.
Args:
img_h (int): The height of the heatmap.
img_w (int): The width of the heatmap.
starts (np.ndarray): The coordinates of one keypoint in the
corresponding limbs (of multiple persons).
ends (np.ndarray): The coordinates of the other keypoint in the
corresponding limbs (of multiple persons).
sigma (float): The sigma of generated gaussian.
start_values (np.ndarray): The max values of one keypoint in the
corresponding limbs.
end_values (np.ndarray): The max values of the other keypoint in
the corresponding limbs.
Returns:
np.ndarray: The generated pseudo heatmap.
"""
heatmap = np.zeros([img_h, img_w], dtype=np.float32)
for start, end, start_value, end_value in zip(starts, ends,
start_values,
end_values):
value_coeff = min(start_value, end_value)
if value_coeff < self.eps:
continue
min_x, max_x = min(start[0], end[0]), max(start[0], end[0])
min_y, max_y = min(start[1], end[1]), max(start[1], end[1])
min_x = max(int(min_x - 3 * sigma), 0)
max_x = min(int(max_x + 3 * sigma) + 1, img_w)
min_y = max(int(min_y - 3 * sigma), 0)
max_y = min(int(max_y + 3 * sigma) + 1, img_h)
x = np.arange(min_x, max_x, 1, np.float32)
y = np.arange(min_y, max_y, 1, np.float32)
if not (len(x) and len(y)):
continue
y = y[:, None]
x_0 = np.zeros_like(x)
y_0 = np.zeros_like(y)
# distance to start keypoints
d2_start = ((x - start[0])**2 + (y - start[1])**2)
# distance to end keypoints
d2_end = ((x - end[0])**2 + (y - end[1])**2)
# the distance between start and end keypoints.
d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2)
if d2_ab < 1:
full_map = self.generate_a_heatmap(img_h, img_w, [start],
sigma, [start_value])
heatmap = np.maximum(heatmap, full_map)
continue
coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab
a_dominate = coeff <= 0
b_dominate = coeff >= 1
seg_dominate = 1 - a_dominate - b_dominate
position = np.stack([x + y_0, y + x_0], axis=-1)
projection = start + np.stack([coeff, coeff], axis=-1) * (
end - start)
d2_line = position - projection
d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2
d2_seg = (
a_dominate * d2_start + b_dominate * d2_end +
seg_dominate * d2_line)
patch = np.exp(-d2_seg / 2. / sigma**2)
patch = patch * value_coeff
heatmap[min_y:max_y, min_x:max_x] = np.maximum(
heatmap[min_y:max_y, min_x:max_x], patch)
return heatmap
def generate_heatmap(self, img_h, img_w, kps, sigma, max_values):
"""Generate pseudo heatmap for all keypoints and limbs in one frame (if
needed).
Args:
img_h (int): The height of the heatmap.
img_w (int): The width of the heatmap.
kps (np.ndarray): The coordinates of keypoints in this frame.
sigma (float): The sigma of generated gaussian.
max_values (np.ndarray): The confidence score of each keypoint.
Returns:
np.ndarray: The generated pseudo heatmap.
"""
heatmaps = []
if self.with_kp:
num_kp = kps.shape[1]
for i in range(num_kp):
heatmap = self.generate_a_heatmap(img_h, img_w, kps[:, i],
sigma, max_values[:, i])
heatmaps.append(heatmap)
if self.with_limb:
for limb in self.skeletons:
start_idx, end_idx = limb
starts = kps[:, start_idx]
ends = kps[:, end_idx]
start_values = max_values[:, start_idx]
end_values = max_values[:, end_idx]
heatmap = self.generate_a_limb_heatmap(img_h, img_w, starts,
ends, sigma,
start_values,
end_values)
heatmaps.append(heatmap)
return np.stack(heatmaps, axis=-1)
def gen_an_aug(self, results):
"""Generate pseudo heatmaps for all frames.
Args:
results (dict): The dictionary that contains all info of a sample.
Returns:
list[np.ndarray]: The generated pseudo heatmaps.
"""
all_kps = results['keypoint']
kp_shape = all_kps.shape
if 'keypoint_score' in results:
all_kpscores = results['keypoint_score']
else:
all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)
img_h, img_w = results['img_shape']
num_frame = kp_shape[1]
imgs = []
for i in range(num_frame):
sigma = self.sigma
kps = all_kps[:, i]
kpscores = all_kpscores[:, i]
max_values = np.ones(kpscores.shape, dtype=np.float32)
if self.use_score:
max_values = kpscores
hmap = self.generate_heatmap(img_h, img_w, kps, sigma, max_values)
imgs.append(hmap)
return imgs
def __call__(self, results):
if not self.double:
results['imgs'] = np.stack(self.gen_an_aug(results))
else:
results_ = cp.deepcopy(results)
flip = Flip(
flip_ratio=1, left_kp=self.left_kp, right_kp=self.right_kp)
results_ = flip(results_)
results['imgs'] = np.concatenate(
[self.gen_an_aug(results),
self.gen_an_aug(results_)])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'sigma={self.sigma}, '
f'use_score={self.use_score}, '
f'with_kp={self.with_kp}, '
f'with_limb={self.with_limb}, '
f'skeletons={self.skeletons}, '
f'double={self.double}, '
f'left_kp={self.left_kp}, '
f'right_kp={self.right_kp})')
return repr_str
| 23,181
| 35.73851
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/augmentations.py
|
import random
import warnings
from collections.abc import Sequence
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
import timm.data as tdata
import torch
from ..builder import PIPELINES
def _combine_quadruple(a, b):
return (a[0] + a[2] * b[0], a[1] + a[3] * b[1], a[2] * b[2], a[3] * b[3])
def _flip_quadruple(a):
return (1 - a[0] - a[2], a[1], a[2], a[3])
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
@PIPELINES.register_module()
class PoseCompact:
"""Convert the coordinates of keypoints to make it more compact.
Specifically, it first find a tight bounding box that surrounds all joints
in each frame, then we expand the tight box by a given padding ratio. For
example, if 'padding == 0.25', then the expanded box has unchanged center,
and 1.25x width and height.
Required keys in results are "img_shape", "keypoint", add or modified keys
are "img_shape", "keypoint", "crop_quadruple".
Args:
padding (float): The padding size. Default: 0.25.
threshold (int): The threshold for the tight bounding box. If the width
or height of the tight bounding box is smaller than the threshold,
we do not perform the compact operation. Default: 10.
hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded
box. Float indicates the specific ratio and tuple indicates a
ratio range. If set as None, it means there is no requirement on
hw_ratio. Default: None.
allow_imgpad (bool): Whether to allow expanding the box outside the
image to meet the hw_ratio requirement. Default: True.
Returns:
type: Description of returned object.
"""
def __init__(self,
padding=0.25,
threshold=10,
hw_ratio=None,
allow_imgpad=True):
self.padding = padding
self.threshold = threshold
if hw_ratio is not None:
hw_ratio = _pair(hw_ratio)
self.hw_ratio = hw_ratio
self.allow_imgpad = allow_imgpad
assert self.padding >= 0
def __call__(self, results):
img_shape = results['img_shape']
h, w = img_shape
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
kp_x = kp[..., 0]
kp_y = kp[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return results
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
# the order is x, y, w, h (in [0, 1]), a tuple
crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))
new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,
(max_y - min_y) / h)
crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)
results['crop_quadruple'] = crop_quadruple
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '
f'threshold={self.threshold}, '
f'hw_ratio={self.hw_ratio}, '
f'allow_imgpad={self.allow_imgpad})')
return repr_str
class EntityBoxRescale:
def __init__(self, scale_factor):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxCrop:
def __init__(self, crop_bbox):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxFlip:
def __init__(self, img_shape):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class Imgaug:
"""Imgaug augmentation.
Adds custom transformations from imgaug library.
Please visit `https://imgaug.readthedocs.io/en/latest/index.html`
to get more information. Two demo configs could be found in tsn and i3d
config folder.
It's better to use uint8 images as inputs since imgaug works best with
numpy dtype uint8 and isn't well tested with other dtypes. It should be
noted that not all of the augmenters have the same input and output dtype,
which may cause unexpected results.
Required keys are "imgs", "img_shape"(if "gt_bboxes" is not None) and
"modality", added or modified keys are "imgs", "img_shape", "gt_bboxes"
and "proposals".
It is worth mentioning that `Imgaug` will NOT create custom keys like
"interpolation", "crop_bbox", "flip_direction", etc. So when using
`Imgaug` along with other mmaction2 pipelines, we should pay more attention
to required keys.
Two steps to use `Imgaug` pipeline:
1. Create initialization parameter `transforms`. There are three ways
to create `transforms`.
1) string: only support `default` for now.
e.g. `transforms='default'`
2) list[dict]: create a list of augmenters by a list of dicts, each
dict corresponds to one augmenter. Every dict MUST contain a key
named `type`. `type` should be a string(iaa.Augmenter's name) or
an iaa.Augmenter subclass.
e.g. `transforms=[dict(type='Rotate', rotate=(-20, 20))]`
e.g. `transforms=[dict(type=iaa.Rotate, rotate=(-20, 20))]`
3) iaa.Augmenter: create an imgaug.Augmenter object.
e.g. `transforms=iaa.Rotate(rotate=(-20, 20))`
2. Add `Imgaug` in dataset pipeline. It is recommended to insert imgaug
pipeline before `Normalize`. A demo pipeline is listed as follows.
```
pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Imgaug', transforms='default'),
# dict(type='Imgaug', transforms=[
# dict(type='Rotate', rotate=(-20, 20))
# ]),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
```
Args:
transforms (str | list[dict] | :obj:`iaa.Augmenter`): Three different
ways to create imgaug augmenter.
"""
def __init__(self, transforms):
import imgaug.augmenters as iaa
if transforms == 'default':
self.transforms = self.default_transforms()
elif isinstance(transforms, list):
assert all(isinstance(trans, dict) for trans in transforms)
self.transforms = transforms
elif isinstance(transforms, iaa.Augmenter):
self.aug = self.transforms = transforms
else:
raise ValueError('transforms must be `default` or a list of dicts'
' or iaa.Augmenter object')
if not isinstance(transforms, iaa.Augmenter):
self.aug = iaa.Sequential(
[self.imgaug_builder(t) for t in self.transforms])
@staticmethod
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Plase visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
]
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'children' in args:
args['children'] = [
self.imgaug_builder(child) for child in args['children']
]
return obj_cls(**args)
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.aug})'
return repr_str
def __call__(self, results):
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype.type
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results
@PIPELINES.register_module()
class RandomErasing(tdata.random_erasing.RandomErasing):
def __init__(self, device='cpu', **args):
super().__init__(device=device, **args)
def __call__(self, results):
in_type = results['imgs'][0].dtype.type
rand_state = random.getstate()
torchrand_state = torch.get_rng_state()
numpyrand_state = np.random.get_state()
# not using cuda to preserve the determiness
out_frame = []
for frame in results['imgs']:
random.setstate(rand_state)
torch.set_rng_state(torchrand_state)
np.random.set_state(numpyrand_state)
frame = super().__call__(torch.from_numpy(frame).permute(2, 0, 1)).permute(1, 2, 0).numpy()
out_frame.append(frame)
results['imgs'] = out_frame
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Timmaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
raise NotImplementedError('only support recognition now')
assert results['img_shape'] == (img_h, img_w)
return results
@PIPELINES.register_module()
class Fuse:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
@PIPELINES.register_module()
class RandomScale:
"""Resize images by a random scale.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy",
"scale", "resize_size". Required keys in "lazy" is None, added or
modified key is "interpolation".
Args:
scales (tuple[int]): Tuple of scales to be chosen for resize.
mode (str): Selection mode for choosing the scale. Options are "range"
and "value". If set to "range", The short edge will be randomly
chosen from the range of minimum and maximum on the shorter one
in all tuples. Otherwise, the longer edge will be randomly chosen
from the range of minimum and maximum on the longer one in all
tuples. Default: 'range'.
"""
def __init__(self, scales, mode='range', **kwargs):
warnings.warn('"RandomScale" is deprecated and will be removed in '
'later versions. It is currently not used in MMAction2')
self.mode = mode
if self.mode not in ['range', 'value']:
raise ValueError(f"mode should be 'range' or 'value', "
f'but got {self.mode}')
self.scales = scales
self.kwargs = kwargs
def select_scale(self, scales):
num_scales = len(scales)
if num_scales == 1:
# specify a fixed scale
scale = scales[0]
elif num_scales == 2:
if self.mode == 'range':
scale_long = [max(s) for s in scales]
scale_short = [min(s) for s in scales]
long_edge = np.random.randint(
min(scale_long),
max(scale_long) + 1)
short_edge = np.random.randint(
min(scale_short),
max(scale_short) + 1)
scale = (long_edge, short_edge)
elif self.mode == 'value':
scale = random.choice(scales)
else:
if self.mode != 'value':
raise ValueError("Only 'value' mode supports more than "
'2 image scales')
scale = random.choice(scales)
return scale
def __call__(self, results):
scale = self.select_scale(self.scales)
results['scale'] = scale
resize = Resize(scale, **self.kwargs)
results = resize(results)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scales={self.scales}, mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Vanilla square random crop that specifics the output size.
Required keys in results are "img_shape", "keypoint" (optional), "imgs"
(optional), added or modified keys are "keypoint", "imgs", "lazy"; Required
keys in "lazy" are "flip", "crop_bbox", added or modified key is
"crop_bbox".
Args:
size (int): The output size of the images.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, size, lazy=False):
if not isinstance(size, int):
raise TypeError(f'Size must be an int, but got {type(size)}')
self.size = size
self.lazy = lazy
@staticmethod
def _crop_kps(kps, crop_bbox):
return kps - crop_bbox[:2]
@staticmethod
def _crop_imgs(imgs, crop_bbox):
x1, y1, x2, y2 = crop_bbox
return [img[y1:y2, x1:x2] for img in imgs]
@staticmethod
def _box_crop(box, crop_bbox):
"""Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
x1, y1, x2, y2 = crop_bbox
img_w, img_h = x2 - x1, y2 - y1
box_ = box.copy()
box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)
box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1)
return box_
def _all_box_crop(self, results, crop_bbox):
"""Crop the gt_bboxes and proposals in results according to crop_bbox.
Args:
results (dict): All information about the sample, which contain
'gt_bboxes' and 'proposals' (optional).
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_crop(results['proposals'],
crop_bbox)
return results
def __call__(self, results):
"""Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
assert self.size <= img_h and self.size <= img_w
y_offset = 0
x_offset = 0
if img_h > self.size:
y_offset = int(np.random.randint(0, img_h - self.size))
if img_w > self.size:
x_offset = int(np.random.randint(0, img_w - self.size))
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = self.size / img_w, self.size / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
new_h, new_w = self.size, self.size
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
# Process entity boxes
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(size={self.size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(RandomCrop):
"""Random crop that specifics the area and height-weight ratio range.
Required keys in results are "img_shape", "crop_bbox", "imgs" (optional),
"keypoint" (optional), added or modified keys are "imgs", "keypoint",
"crop_bbox" and "lazy"; Required keys in "lazy" are "flip", "crop_bbox",
added or modified key is "crop_bbox".
Args:
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of
output cropped images. Default: (3 / 4, 4 / 3).
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
area_range=(0.08, 1.0),
aspect_ratio_range=(3 / 4, 4 / 3),
lazy=False):
self.area_range = area_range
self.aspect_ratio_range = aspect_ratio_range
self.lazy = lazy
if not mmcv.is_tuple_of(self.area_range, float):
raise TypeError(f'Area_range must be a tuple of float, '
f'but got {type(area_range)}')
if not mmcv.is_tuple_of(self.aspect_ratio_range, float):
raise TypeError(f'Aspect_ratio_range must be a tuple of float, '
f'but got {type(aspect_ratio_range)}')
@staticmethod
def get_crop_bbox(img_shape,
area_range,
aspect_ratio_range,
max_attempts=10):
"""Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range.
"""
assert 0 < area_range[0] <= area_range[1] <= 1
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
img_h, img_w = img_shape
area = img_h * img_w
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
np.random.uniform(
np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
candidate_crop_h = np.round(np.sqrt(target_areas /
aspect_ratios)).astype(np.int32)
for i in range(max_attempts):
crop_w = candidate_crop_w[i]
crop_h = candidate_crop_h[i]
if crop_h <= img_h and crop_w <= img_w:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h
# Fallback
crop_size = min(img_h, img_w)
x_offset = (img_w - crop_size) // 2
y_offset = (img_h - crop_size) // 2
return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size
def __call__(self, results):
"""Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
left, top, right, bottom = self.get_crop_bbox(
(img_h, img_w), self.area_range, self.aspect_ratio_range)
new_h, new_w = bottom - top, right - left
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'area_range={self.area_range}, '
f'aspect_ratio_range={self.aspect_ratio_range}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class MultiScaleCrop(RandomCrop):
"""Crop images with a list of randomly selected scales.
Randomly select the w and h scales from a list of scales. Scale of 1 means
the base size, which is the minimal of image width and height. The scale
level of w and h is controlled to be smaller than a certain value to
prevent too large or small aspect ratio.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "crop_bbox", "img_shape", "lazy" and
"scales". Required keys in "lazy" are "crop_bbox", added or modified key is
"crop_bbox".
Args:
input_size (int | tuple[int]): (w, h) of network input.
scales (tuple[float]): width and height scales to be selected.
max_wh_scale_gap (int): Maximum gap of w and h scale levels.
Default: 1.
random_crop (bool): If set to True, the cropping bbox will be randomly
sampled, otherwise it will be sampler from fixed regions.
Default: False.
num_fixed_crops (int): If set to 5, the cropping bbox will keep 5
basic fixed regions: "upper left", "upper right", "lower left",
"lower right", "center". If set to 13, the cropping bbox will
append another 8 fix regions: "center left", "center right",
"lower center", "upper center", "upper left quarter",
"upper right quarter", "lower left quarter", "lower right quarter".
Default: 5.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
input_size,
scales=(1, ),
max_wh_scale_gap=1,
random_crop=False,
num_fixed_crops=5,
lazy=False):
self.input_size = _pair(input_size)
if not mmcv.is_tuple_of(self.input_size, int):
raise TypeError(f'Input_size must be int or tuple of int, '
f'but got {type(input_size)}')
if not isinstance(scales, tuple):
raise TypeError(f'Scales must be tuple, but got {type(scales)}')
if num_fixed_crops not in [5, 13]:
raise ValueError(f'Num_fix_crops must be in {[5, 13]}, '
f'but got {num_fixed_crops}')
self.scales = scales
self.max_wh_scale_gap = max_wh_scale_gap
self.random_crop = random_crop
self.num_fixed_crops = num_fixed_crops
self.lazy = lazy
def __call__(self, results):
"""Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
base_size = min(img_h, img_w)
crop_sizes = [int(base_size * s) for s in self.scales]
candidate_sizes = []
for i, h in enumerate(crop_sizes):
for j, w in enumerate(crop_sizes):
if abs(i - j) <= self.max_wh_scale_gap:
candidate_sizes.append([w, h])
crop_size = random.choice(candidate_sizes)
for i in range(2):
if abs(crop_size[i] - self.input_size[i]) < 3:
crop_size[i] = self.input_size[i]
crop_w, crop_h = crop_size
if self.random_crop:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
else:
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
candidate_offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
if self.num_fixed_crops == 13:
extra_candidate_offsets = [
(0, 2 * h_step), # center left
(4 * w_step, 2 * h_step), # center right
(2 * w_step, 4 * h_step), # lower center
(2 * w_step, 0 * h_step), # upper center
(1 * w_step, 1 * h_step), # upper left quarter
(3 * w_step, 1 * h_step), # upper right quarter
(1 * w_step, 3 * h_step), # lower left quarter
(3 * w_step, 3 * h_step) # lower right quarter
]
candidate_offsets.extend(extra_candidate_offsets)
x_offset, y_offset = random.choice(candidate_offsets)
new_h, new_w = crop_h, crop_w
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
results['scales'] = self.scales
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'input_size={self.input_size}, scales={self.scales}, '
f'max_wh_scale_gap={self.max_wh_scale_gap}, '
f'random_crop={self.random_crop}, '
f'num_fixed_crops={self.num_fixed_crops}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Resize:
"""Resize images to a specific size.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "img_shape", "keep_ratio",
"scale_factor", "lazy", "resize_size". Required keys in "lazy" is None,
added or modified key is "interpolation".
Args:
scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size:
If it is a float number, the image will be rescaled by this
factor, else if it is a tuple of 2 integers, the image will
be rescaled as large as possible within the scale.
Otherwise, it serves as (w, h) of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: True.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
scale,
keep_ratio=True,
interpolation='bilinear',
lazy=False):
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
else:
raise TypeError(
f'Scale must be float or tuple of int, but got {type(scale)}')
self.scale = scale
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.lazy = lazy
def _resize_imgs(self, imgs, new_w, new_h):
return [
mmcv.imresize(
img, (new_w, new_h), interpolation=self.interpolation)
for img in imgs
]
@staticmethod
def _resize_kps(kps, scale_factor):
return kps * scale_factor
@staticmethod
def _box_resize(box, scale_factor):
"""Rescale the bounding boxes according to the scale_factor.
Args:
box (np.ndarray): The bounding boxes.
scale_factor (np.ndarray): The scale factor used for rescaling.
"""
assert len(scale_factor) == 2
scale_factor = np.concatenate([scale_factor, scale_factor])
return box * scale_factor
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
if 'scale_factor' not in results:
results['scale_factor'] = np.array([1, 1], dtype=np.float32)
img_h, img_w = results['img_shape']
if self.keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)
else:
new_w, new_h = self.scale
self.scale_factor = np.array([new_w / img_w, new_h / img_h],
dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
if not self.lazy:
if 'imgs' in results:
results['imgs'] = self._resize_imgs(results['imgs'], new_w,
new_h)
if 'keypoint' in results:
results['keypoint'] = self._resize_kps(results['keypoint'],
self.scale_factor)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
lazyop['interpolation'] = self.interpolation
if 'gt_bboxes' in results:
assert not self.lazy
results['gt_bboxes'] = self._box_resize(results['gt_bboxes'],
self.scale_factor)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_resize(
results['proposals'], self.scale_factor)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scale={self.scale}, keep_ratio={self.keep_ratio}, '
f'interpolation={self.interpolation}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomRescale:
"""Randomly resize images so that the short_edge is resized to a specific
size in a given range. The scale ratio is unchanged after resizing.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "resize_size",
"short_edge".
Args:
scale_range (tuple[int]): The range of short edge length. A closed
interval.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
"""
def __init__(self, scale_range, interpolation='bilinear'):
self.scale_range = scale_range
# make sure scale_range is legal, first make sure the type is OK
assert mmcv.is_tuple_of(scale_range, int)
assert len(scale_range) == 2
assert scale_range[0] < scale_range[1]
assert np.all([x > 0 for x in scale_range])
self.keep_ratio = True
self.interpolation = interpolation
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
resize = Resize((-1, short_edge),
keep_ratio=True,
interpolation=self.interpolation,
lazy=False)
results = resize(results)
results['short_edge'] = short_edge
return results
def __repr__(self):
scale_range = self.scale_range
repr_str = (f'{self.__class__.__name__}('
f'scale_range=({scale_range[0]}, {scale_range[1]}), '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input images with a probability.
Reverse the order of elements in the given imgs with a specific direction.
The shape of the imgs is preserved, but the elements are reordered.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "keypoint", "lazy" and
"flip_direction". Required keys in "lazy" is None, added or modified key
are "flip" and "flip_direction". The Flip augmentation should be placed
after any cropping / reshaping augmentations, to make sure crop_quadruple
is calculated properly.
Args:
flip_ratio (float): Probability of implementing flip. Default: 0.5.
direction (str): Flip imgs horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
flip_label_map (Dict[int, int] | None): Transform the label of the
flipped image with the specific label. Default: None.
left_kp (list[int]): Indexes of left keypoints, used to flip keypoints.
Default: None.
right_kp (list[ind]): Indexes of right keypoints, used to flip
keypoints. Default: None.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
_directions = ['horizontal', 'vertical']
def __init__(self,
flip_ratio=0.5,
direction='horizontal',
flip_label_map=None,
left_kp=None,
right_kp=None,
lazy=False):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported. '
f'Currently support ones are {self._directions}')
self.flip_ratio = flip_ratio
self.direction = direction
self.flip_label_map = flip_label_map
self.left_kp = left_kp
self.right_kp = right_kp
self.lazy = lazy
def _flip_imgs(self, imgs, modality):
_ = [mmcv.imflip_(img, self.direction) for img in imgs]
lt = len(imgs)
if modality == 'Flow':
# The 1st frame of each 2 frames is flow-x
for i in range(0, lt, 2):
imgs[i] = mmcv.iminvert(imgs[i])
return imgs
def _flip_kps(self, kps, kpscores, img_width):
kp_x = kps[..., 0]
kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0]
new_order = list(range(kps.shape[2]))
if self.left_kp is not None and self.right_kp is not None:
for left, right in zip(self.left_kp, self.right_kp):
new_order[left] = right
new_order[right] = left
kps = kps[:, :, new_order]
if kpscores is not None:
kpscores = kpscores[:, :, new_order]
return kps, kpscores
@staticmethod
def _box_flip(box, img_width):
"""Flip the bounding boxes given the width of the image.
Args:
box (np.ndarray): The bounding boxes.
img_width (int): The img width.
"""
box_ = box.copy()
box_[..., 0::4] = img_width - box[..., 2::4]
box_[..., 2::4] = img_width - box[..., 0::4]
return box_
def __call__(self, results):
"""Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
assert self.direction == 'horizontal', (
'Only horizontal flips are'
'supported for human keypoints')
modality = results['modality']
if modality == 'Flow':
assert self.direction == 'horizontal'
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
img_width = results['img_shape'][1]
if self.flip_label_map is not None and flip:
results['label'] = self.flip_label_map.get(results['label'],
results['label'])
if not self.lazy:
if flip:
if 'imgs' in results:
results['imgs'] = self._flip_imgs(results['imgs'],
modality)
if 'keypoint' in results:
kp = results['keypoint']
kpscore = results.get('keypoint_score', None)
kp, kpscore = self._flip_kps(kp, kpscore, img_width)
results['keypoint'] = kp
if 'keypoint_score' in results:
results['keypoint_score'] = kpscore
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Use one Flip please')
lazyop['flip'] = flip
lazyop['flip_direction'] = self.direction
if 'gt_bboxes' in results and flip:
assert not self.lazy and self.direction == 'horizontal'
width = results['img_shape'][1]
results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_flip(results['proposals'],
width)
return results
def __repr__(self):
repr_str = (
f'{self.__class__.__name__}('
f'flip_ratio={self.flip_ratio}, direction={self.direction}, '
f'flip_label_map={self.flip_label_map}, lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):
if not isinstance(mean, Sequence):
raise TypeError(
f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'
)
if not isinstance(std, Sequence):
raise TypeError(
f'Std must be list, tuple or np.ndarray, but got {type(std)}')
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_bgr = to_bgr
self.adjust_magnitude = adjust_magnitude
def __call__(self, results):
modality = results['modality']
if modality == 'RGB':
n = len(results['imgs'])
h, w, c = results['imgs'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs']):
imgs[i] = img
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
if modality == 'Flow':
num_imgs = len(results['imgs'])
assert num_imgs % 2 == 0
assert self.mean.shape[0] == 2
assert self.std.shape[0] == 2
n = num_imgs // 2
h, w = results['imgs'][0].shape
x_flow = np.empty((n, h, w), dtype=np.float32)
y_flow = np.empty((n, h, w), dtype=np.float32)
for i in range(n):
x_flow[i] = results['imgs'][2 * i]
y_flow[i] = results['imgs'][2 * i + 1]
x_flow = (x_flow - self.mean[0]) / self.std[0]
y_flow = (y_flow - self.mean[1]) / self.std[1]
if self.adjust_magnitude:
x_flow = x_flow * results['scale_factor'][0]
y_flow = y_flow * results['scale_factor'][1]
imgs = np.stack([x_flow, y_flow], axis=-1)
results['imgs'] = imgs
args = dict(
mean=self.mean,
std=self.std,
to_bgr=self.to_bgr,
adjust_magnitude=self.adjust_magnitude)
results['img_norm_cfg'] = args
return results
raise NotImplementedError
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'mean={self.mean}, '
f'std={self.std}, '
f'to_bgr={self.to_bgr}, '
f'adjust_magnitude={self.adjust_magnitude})')
return repr_str
@PIPELINES.register_module()
class ColorJitter:
"""Randomly distort the brightness, contrast, saturation and hue of images,
and add PCA based noise into images.
Note: The input images should be in RGB channel order.
Code Reference:
https://gluon-cv.mxnet.io/_modules/gluoncv/data/transforms/experimental/image.html
https://mxnet.apache.org/api/python/docs/_modules/mxnet/image/image.html#LightingAug
If specified to apply color space augmentation, it will distort the image
color space by changing brightness, contrast and saturation. Then, it will
add some random distort to the images in different color channels.
Note that the input images should be in original range [0, 255] and in RGB
channel sequence.
Required keys are "imgs", added or modified keys are "imgs", "eig_val",
"eig_vec", "alpha_std" and "color_space_aug".
Args:
color_space_aug (bool): Whether to apply color space augmentations. If
specified, it will change the brightness, contrast, saturation and
hue of images, then add PCA based noise to images. Otherwise, it
will directly add PCA based noise to images. Default: False.
alpha_std (float): Std in the normal Gaussian distribution of alpha.
eig_val (np.ndarray | None): Eigenvalues of [1 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvalues. Default: None.
eig_vec (np.ndarray | None): Eigenvectors of [3 x 3] size for RGB
channel jitter. If set to None, it will use the default
eigenvectors. Default: None.
"""
def __init__(self,
color_space_aug=False,
alpha_std=0.1,
eig_val=None,
eig_vec=None):
if eig_val is None:
# note that the data range should be [0, 255]
self.eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
else:
self.eig_val = eig_val
if eig_vec is None:
self.eig_vec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype=np.float32)
else:
self.eig_vec = eig_vec
self.alpha_std = alpha_std
self.color_space_aug = color_space_aug
@staticmethod
def brightness(img, delta):
"""Brightness distortion.
Args:
img (np.ndarray): An input image.
delta (float): Delta value to distort brightness.
It ranges from [-32, 32).
Returns:
np.ndarray: A brightness distorted image.
"""
if np.random.rand() > 0.5:
img = img + np.float32(delta)
return img
@staticmethod
def contrast(img, alpha):
"""Contrast distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort contrast.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A contrast distorted image.
"""
if np.random.rand() > 0.5:
img = img * np.float32(alpha)
return img
@staticmethod
def saturation(img, alpha):
"""Saturation distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to distort the saturation.
It ranges from [0.6, 1.4).
Returns:
np.ndarray: A saturation distorted image.
"""
if np.random.rand() > 0.5:
gray = img * np.array([0.299, 0.587, 0.114], dtype=np.float32)
gray = np.sum(gray, 2, keepdims=True)
gray *= (1.0 - alpha)
img = img * alpha
img = img + gray
return img
@staticmethod
def hue(img, alpha):
"""Hue distortion.
Args:
img (np.ndarray): An input image.
alpha (float): Alpha value to control the degree of rotation
for hue. It ranges from [-18, 18).
Returns:
np.ndarray: A hue distorted image.
"""
if np.random.rand() > 0.5:
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]],
dtype=np.float32)
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]],
dtype=np.float32)
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]],
dtype=np.float32)
t = np.dot(np.dot(ityiq, bt), tyiq).T
t = np.array(t, dtype=np.float32)
img = np.dot(img, t)
return img
def __call__(self, results):
imgs = results['imgs']
out = []
if self.color_space_aug:
bright_delta = np.random.uniform(-32, 32)
contrast_alpha = np.random.uniform(0.6, 1.4)
saturation_alpha = np.random.uniform(0.6, 1.4)
hue_alpha = np.random.uniform(-18, 18)
jitter_coin = np.random.rand()
for img in imgs:
img = self.brightness(img, delta=bright_delta)
if jitter_coin > 0.5:
img = self.contrast(img, alpha=contrast_alpha)
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
else:
img = self.saturation(img, alpha=saturation_alpha)
img = self.hue(img, alpha=hue_alpha)
img = self.contrast(img, alpha=contrast_alpha)
out.append(img)
else:
out = imgs
# Add PCA based noise
alpha = np.random.normal(0, self.alpha_std, size=(3, ))
rgb = np.array(
np.dot(self.eig_vec * alpha, self.eig_val), dtype=np.float32)
rgb = rgb[None, None, ...]
results['imgs'] = [img + rgb for img in out]
results['eig_val'] = self.eig_val
results['eig_vec'] = self.eig_vec
results['alpha_std'] = self.alpha_std
results['color_space_aug'] = self.color_space_aug
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'color_space_aug={self.color_space_aug}, '
f'alpha_std={self.alpha_std}, '
f'eig_val={self.eig_val}, '
f'eig_vec={self.eig_vec})')
return repr_str
@PIPELINES.register_module()
class CenterCrop(RandomCrop):
"""Crop the center area from images.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "keypoint", "crop_bbox", "lazy" and
"img_shape". Required keys in "lazy" is "crop_bbox", added or modified key
is "crop_bbox".
Args:
crop_size (int | tuple[int]): (w, h) of crop size.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, crop_size, lazy=False):
self.crop_size = _pair(crop_size)
self.lazy = lazy
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
crop_w, crop_h = self.crop_size
left = (img_w - crop_w) // 2
top = (img_h - crop_h) // 2
right = left + crop_w
bottom = top + crop_h
new_h, new_w = bottom - top, right - left
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class ThreeCrop:
"""Crop images into three crops.
Crop the images equally into three crops with equal intervals along the
shorter side.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('ThreeCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
assert crop_h == img_h or crop_w == img_w
if crop_h == img_h:
w_step = (img_w - crop_w) // 2
offsets = [
(0, 0), # left
(2 * w_step, 0), # right
(w_step, 0), # middle
]
elif crop_w == img_w:
h_step = (img_h - crop_h) // 2
offsets = [
(0, 0), # top
(0, 2 * h_step), # down
(0, h_step), # middle
]
cropped = []
crop_bboxes = []
for x_offset, y_offset in offsets:
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
cropped.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = cropped
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class TenCrop:
"""Crop the images into 10 crops (corner + center + flip).
Crop the four corners and the center part of the image with the same
given crop_size, and flip it horizontally.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('TenCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
img_crops = list()
crop_bboxes = list()
for x_offset, y_offsets in offsets:
crop = [
img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
flip_crop = [np.flip(c, axis=1).copy() for c in crop]
bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]
img_crops.extend(crop)
img_crops.extend(flip_crop)
crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class MultiGroupCrop:
"""Randomly crop the images into several groups.
Crop the random region with the same given crop_size and bounding box
into several groups.
Required keys are "imgs", added or modified keys are "imgs", "crop_bbox"
and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
groups(int): Number of groups.
"""
def __init__(self, crop_size, groups):
self.crop_size = _pair(crop_size)
self.groups = groups
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError('Crop size must be int or tuple of int, '
f'but got {type(crop_size)}')
if not isinstance(groups, int):
raise TypeError(f'Groups must be int, but got {type(groups)}.')
if groups <= 0:
raise ValueError('Groups must be positive.')
def __call__(self, results):
"""Performs the MultiGroupCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('MultiGroupCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = imgs[0].shape[:2]
crop_w, crop_h = self.crop_size
img_crops = []
crop_bboxes = []
for _ in range(self.groups):
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
img_crops.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(crop_size={self.crop_size}, '
f'groups={self.groups})')
return repr_str
@PIPELINES.register_module()
class AudioAmplify:
"""Amplify the waveform.
Required keys are "audios", added or modified keys are "audios",
"amplify_ratio".
Args:
ratio (float): The ratio used to amplify the audio waveform.
"""
def __init__(self, ratio):
if isinstance(ratio, float):
self.ratio = ratio
else:
raise TypeError('Amplification ratio should be float.')
def __call__(self, results):
"""Perfrom the audio amplification.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'audios' in results
results['audios'] *= self.ratio
results['amplify_ratio'] = self.ratio
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(ratio={self.ratio})'
return repr_str
@PIPELINES.register_module()
class MelSpectrogram:
"""MelSpectrogram. Transfer an audio wave into a melspectogram figure.
Required keys are "audios", "sample_rate", "num_clips", added or modified
keys are "audios".
Args:
window_size (int): The window size in milisecond. Default: 32.
step_size (int): The step size in milisecond. Default: 16.
n_mels (int): Number of mels. Default: 80.
fixed_length (int): The sample length of melspectrogram maybe not
exactly as wished due to different fps, fix the length for batch
collation by truncating or padding. Default: 128.
"""
def __init__(self,
window_size=32,
step_size=16,
n_mels=80,
fixed_length=128):
if all(
isinstance(x, int)
for x in [window_size, step_size, n_mels, fixed_length]):
self.window_size = window_size
self.step_size = step_size
self.n_mels = n_mels
self.fixed_length = fixed_length
else:
raise TypeError('All arguments should be int.')
def __call__(self, results):
"""Perform MelSpectrogram transformation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import librosa
except ImportError:
raise ImportError('Install librosa first.')
signals = results['audios']
sample_rate = results['sample_rate']
n_fft = int(round(sample_rate * self.window_size / 1000))
hop_length = int(round(sample_rate * self.step_size / 1000))
melspectrograms = list()
for clip_idx in range(results['num_clips']):
clip_signal = signals[clip_idx]
mel = librosa.feature.melspectrogram(
y=clip_signal,
sr=sample_rate,
n_fft=n_fft,
hop_length=hop_length,
n_mels=self.n_mels)
if mel.shape[0] >= self.fixed_length:
mel = mel[:self.fixed_length, :]
else:
mel = np.pad(
mel, ((0, mel.shape[-1] - self.fixed_length), (0, 0)),
mode='edge')
melspectrograms.append(mel)
results['audios'] = np.array(melspectrograms)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(window_size={self.window_size}), '
f'step_size={self.step_size}, '
f'n_mels={self.n_mels}, '
f'fixed_length={self.fixed_length})')
return repr_str
| 79,509
| 37.207593
| 104
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/formating.py
|
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type in data
loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Rename:
"""Rename the key in results.
Args:
mapping (dict): The keys in results that need to be renamed. The key of
the dict is the original name, while the value is the new name. If
the original name not found in results, do nothing.
Default: dict().
"""
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, results):
for key, value in self.mapping.items():
if key in results:
assert isinstance(key, str) and isinstance(value, str)
assert value not in results, ('the new name already exists in '
'results')
results[value] = results[key]
results.pop(key)
return results
@PIPELINES.register_module()
class ToDataContainer:
"""Convert the data to DataContainer.
Args:
fields (Sequence[dict]): Required fields to be converted
with keys and attributes. E.g.
fields=(dict(key='gt_bbox', stack=False),).
Note that key can also be a list of keys, if so, every tensor in
the list will be converted to DataContainer.
"""
def __init__(self, fields):
self.fields = fields
def __call__(self, results):
"""Performs the ToDataContainer formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for field in self.fields:
_field = field.copy()
key = _field.pop('key')
if isinstance(key, list):
for item in key:
results[item] = DC(results[item], **_field)
else:
results[key] = DC(results[key], **_field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ImageToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose image channels to a given order.
Args:
keys (Sequence[str]): Required keys to be converted.
order (Sequence[int]): Image channel order.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, order={self.order})')
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_metas', the results will be a dict with
keys 'imgs' and 'img_metas', where 'img_metas' is a DataContainer of
another dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_metas".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
nested (bool): If set as True, will apply data[x] = [data[x]] to all
items in data. The arg is added for compatibility. Default: False.
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_metas',
nested=False):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
self.nested = nested
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
if self.nested:
for k in data:
data[k] = [data[k]]
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys}, '
f'nested={self.nested})')
@PIPELINES.register_module()
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
if self.collapse:
assert imgs.shape[0] == 1
imgs = imgs.squeeze(0)
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class FormatAudioShape:
"""Format final audio shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTF']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audios = results['audios']
# clip x sample x freq -> clip x channel x sample x freq
clip, sample, freq = audios.shape
audios = audios.reshape(clip, 1, sample, freq)
results['audios'] = audios
results['input_shape'] = audios.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
| 12,741
| 33.160858
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/datasets/pipelines/__init__.py
|
from .augmentations import (AudioAmplify, CenterCrop, ColorJitter,
EntityBoxCrop, EntityBoxFlip, EntityBoxRescale,
Flip, Fuse, Imgaug, MelSpectrogram, MultiGroupCrop,
MultiScaleCrop, Normalize, RandomCrop, RandomErasing,
RandomRescale, RandomResizedCrop, RandomScale,
Resize, TenCrop, ThreeCrop)
from .compose import Compose
from .formating import (Collect, FormatAudioShape, FormatShape, ImageToTensor,
Rename, ToDataContainer, ToTensor, Transpose)
from .loading import (AudioDecode, AudioDecodeInit, AudioFeatureSelector,
BuildPseudoClip, DecordDecode, DecordInit,
DenseSampleFrames, FrameSelector,
GenerateLocalizationLabels, ImageDecode,
LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature,
LoadProposals, OpenCVDecode, OpenCVInit, PyAVDecode,
PyAVDecodeMotionVector, PyAVInit, RawFrameDecode,
SampleAVAFrames, SampleFrames, SampleProposalFrames,
UntrimmedSampleFrames)
from .pose_loading import (GeneratePoseTarget, LoadKineticsPose, PoseDecode,
UniformSampleFrames)
__all__ = [
'SampleFrames', 'PyAVDecode', 'DecordDecode', 'DenseSampleFrames',
'OpenCVDecode', 'FrameSelector', 'MultiGroupCrop', 'MultiScaleCrop', 'RandomErasing',
'RandomResizedCrop', 'RandomCrop', 'Resize', 'Flip', 'Fuse', 'Normalize',
'ThreeCrop', 'CenterCrop', 'TenCrop', 'ImageToTensor', 'Transpose',
'Collect', 'FormatShape', 'Compose', 'ToTensor', 'ToDataContainer',
'GenerateLocalizationLabels', 'LoadLocalizationFeature', 'LoadProposals',
'DecordInit', 'OpenCVInit', 'PyAVInit', 'SampleProposalFrames',
'UntrimmedSampleFrames', 'RawFrameDecode', 'DecordInit', 'OpenCVInit',
'PyAVInit', 'SampleProposalFrames', 'ColorJitter', 'LoadHVULabel',
'SampleAVAFrames', 'AudioAmplify', 'MelSpectrogram', 'AudioDecode',
'FormatAudioShape', 'LoadAudioFeature', 'AudioFeatureSelector',
'AudioDecodeInit', 'EntityBoxFlip', 'EntityBoxCrop', 'EntityBoxRescale',
'RandomScale', 'ImageDecode', 'BuildPseudoClip', 'RandomRescale',
'PyAVDecodeMotionVector', 'Rename', 'Imgaug', 'UniformSampleFrames',
'PoseDecode', 'LoadKineticsPose', 'GeneratePoseTarget'
]
| 2,452
| 61.897436
| 89
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/gradcam_utils.py
|
import torch
import torch.nn.functional as F
class GradCAM:
"""GradCAM class helps create visualization results.
Visualization results are blended by heatmaps and input images.
This class is modified from
https://github.com/facebookresearch/SlowFast/blob/master/slowfast/visualization/gradcam_utils.py # noqa
For more information about GradCAM, please visit:
https://arxiv.org/pdf/1610.02391.pdf
"""
def __init__(self, model, target_layer_name, colormap='viridis'):
"""Create GradCAM class with recognizer, target layername & colormap.
Args:
model (nn.Module): the recognizer model to be used.
target_layer_name (str): name of convolutional layer to
be used to get gradients and feature maps from for creating
localization maps.
colormap (Optional[str]): matplotlib colormap used to create
heatmap. Default: 'viridis'. For more information, please visit
https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html
"""
from ..models.recognizers import Recognizer2D, Recognizer3D
if isinstance(model, Recognizer2D):
self.is_recognizer2d = True
elif isinstance(model, Recognizer3D):
self.is_recognizer2d = False
else:
raise ValueError(
'GradCAM utils only support Recognizer2D & Recognizer3D.')
self.model = model
self.model.eval()
self.target_gradients = None
self.target_activations = None
import matplotlib.pyplot as plt
self.colormap = plt.get_cmap(colormap)
self.data_mean = torch.tensor(model.cfg.img_norm_cfg['mean'])
self.data_std = torch.tensor(model.cfg.img_norm_cfg['std'])
self._register_hooks(target_layer_name)
def _register_hooks(self, layer_name):
"""Register forward and backward hook to a layer, given layer_name, to
obtain gradients and activations.
Args:
layer_name (str): name of the layer.
"""
def get_gradients(module, grad_input, grad_output):
self.target_gradients = grad_output[0].detach()
def get_activations(module, input, output):
self.target_activations = output.clone().detach()
layer_ls = layer_name.split('/')
prev_module = self.model
for layer in layer_ls:
prev_module = prev_module._modules[layer]
target_layer = prev_module
target_layer.register_forward_hook(get_activations)
target_layer.register_backward_hook(get_gradients)
def _calculate_localization_map(self, inputs, use_labels, delta=1e-20):
"""Calculate localization map for all inputs with Grad-CAM.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
delta (float): used in localization map normalization,
must be small enough. Please make sure
`localization_map_max - localization_map_min >> delta`
Returns:
tuple[torch.Tensor, torch.Tensor]: (localization_map, preds)
localization_map (torch.Tensor): the localization map for
input imgs.
preds (torch.Tensor): Model predictions for `inputs` with
shape (batch_size, num_classes).
"""
inputs['imgs'] = inputs['imgs'].clone()
# model forward & backward
preds = self.model(gradcam=True, **inputs)
if use_labels:
labels = inputs['label']
if labels.ndim == 1:
labels = labels.unsqueeze(-1)
score = torch.gather(preds, dim=1, index=labels)
else:
score = torch.max(preds, dim=-1)[0]
self.model.zero_grad()
score = torch.sum(score)
score.backward()
if self.is_recognizer2d:
# [batch_size, num_segments, 3, H, W]
b, t, _, h, w = inputs['imgs'].size()
else:
# [batch_size, num_crops*num_clips, 3, clip_len, H, W]
b1, b2, _, t, h, w = inputs['imgs'].size()
b = b1 * b2
gradients = self.target_gradients
activations = self.target_activations
if self.is_recognizer2d:
# [B*Tg, C', H', W']
b_tg, c, _, _ = gradients.size()
tg = b_tg // b
else:
# source shape: [B, C', Tg, H', W']
_, c, tg, _, _ = gradients.size()
# target shape: [B, Tg, C', H', W']
gradients = gradients.permute(0, 2, 1, 3, 4)
activations = activations.permute(0, 2, 1, 3, 4)
# calculate & resize to [B, 1, T, H, W]
weights = torch.mean(gradients.view(b, tg, c, -1), dim=3)
weights = weights.view(b, tg, c, 1, 1)
activations = activations.view([b, tg, c] +
list(activations.size()[-2:]))
localization_map = torch.sum(
weights * activations, dim=2, keepdim=True)
localization_map = F.relu(localization_map)
localization_map = localization_map.permute(0, 2, 1, 3, 4)
localization_map = F.interpolate(
localization_map,
size=(t, h, w),
mode='trilinear',
align_corners=False)
# Normalize the localization map.
localization_map_min, localization_map_max = (
torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0],
torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0])
localization_map_min = torch.reshape(
localization_map_min, shape=(b, 1, 1, 1, 1))
localization_map_max = torch.reshape(
localization_map_max, shape=(b, 1, 1, 1, 1))
localization_map = (localization_map - localization_map_min) / (
localization_map_max - localization_map_min + delta)
localization_map = localization_map.data
return localization_map.squeeze(dim=1), preds
def _alpha_blending(self, localization_map, input_imgs, alpha):
"""Blend heatmaps and model input images and get visulization results.
Args:
localization_map (torch.Tensor): localization map for all inputs,
generated with Grad-CAM
input_imgs (torch.Tensor): model inputs, normed images.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
torch.Tensor: blending results for localization map and input
images, with shape [B, T, H, W, 3] and pixel values in
RGB order within range [0, 1].
"""
# localization_map shape [B, T, H, W]
localization_map = localization_map.cpu()
# heatmap shape [B, T, H, W, 3] in RGB order
heatmap = self.colormap(localization_map.detach().numpy())
heatmap = heatmap[:, :, :, :, :3]
heatmap = torch.from_numpy(heatmap)
# Permute input imgs to [B, T, H, W, 3], like heatmap
if self.is_recognizer2d:
# Recognizer2D input (B, T, C, H, W)
curr_inp = input_imgs.permute(0, 1, 3, 4, 2)
else:
# Recognizer3D input (B', num_clips*num_crops, C, T, H, W)
# B = B' * num_clips * num_crops
curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:]))
curr_inp = curr_inp.permute(0, 2, 3, 4, 1)
# renormalize input imgs to [0, 1]
curr_inp = curr_inp.cpu()
curr_inp *= self.data_std
curr_inp += self.data_mean
curr_inp /= 255.
# alpha blending
blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp
return blended_imgs
def __call__(self, inputs, use_labels=False, alpha=0.5):
"""Visualize the localization maps on their corresponding inputs as
heatmap, using Grad-CAM.
Generate visualization results for **ALL CROPS**.
For example, for I3D model, if `clip_len=32, num_clips=10` and
use `ThreeCrop` in test pipeline, then for every model inputs,
there are 960(32*10*3) images generated.
Args:
inputs (dict): model inputs, generated by test pipeline,
at least including two keys, ``imgs`` and ``label``.
use_labels (bool): Whether to use given labels to generate
localization map. Labels are in ``inputs['label']``.
alpha (float): transparency level of the heatmap,
in the range [0, 1].
Returns:
blended_imgs (torch.Tensor): Visualization results, blended by
localization maps and model inputs.
preds (torch.Tensor): Model predictions for inputs.
"""
# localization_map shape [B, T, H, W]
# preds shape [batch_size, num_classes]
localization_map, preds = self._calculate_localization_map(
inputs, use_labels=use_labels)
# blended_imgs shape [B, T, H, W, 3]
blended_imgs = self._alpha_blending(localization_map, inputs['imgs'],
alpha)
# blended_imgs shape [B, T, H, W, 3]
# preds shape [batch_size, num_classes]
# Recognizer2D: B = batch_size, T = num_segments
# Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len
return blended_imgs, preds
| 9,645
| 40.577586
| 107
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/misc.py
|
import ctypes
import random
import string
def get_random_string(length=15):
"""Get random string with letters and digits.
Args:
length (int): Length of random string. Default: 15.
"""
return ''.join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length))
def get_thread_id():
"""Get current thread id."""
# use ctype to find thread id
thread_id = ctypes.CDLL('libc.so.6').syscall(186)
return thread_id
def get_shm_dir():
"""Get shm dir for temporary usage."""
return '/dev/shm'
| 570
| 20.148148
| 59
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/logger.py
|
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Use ``get_logger`` method in mmcv to get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If ``log_file`` is specified, a FileHandler
will also be added. The name of the root logger is the top-level package
name, e.g., "mmaction".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
:obj:`logging.Logger`: The root logger.
"""
return get_logger(__name__.split('.')[0], log_file, log_level)
| 917
| 35.72
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/precise_bn.py
|
# Adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/precise_bn.py # noqa: E501
# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501
import logging
import time
import mmcv
import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import Hook
from mmcv.utils import print_log
from torch.nn import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
def is_parallel_module(module):
"""Check if a module is a parallel module.
The following 3 modules (and their subclasses) are regarded as parallel
modules: DataParallel, DistributedDataParallel,
MMDistributedDataParallel (the deprecated version).
Args:
module (nn.Module): The module to be checked.
Returns:
bool: True if the input module is a parallel module.
"""
parallels = (DataParallel, DistributedDataParallel,
MMDistributedDataParallel)
return bool(isinstance(module, parallels))
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
"""Recompute and update the batch norm stats to make them more precise.
During
training both BN stats and the weight are changing after every iteration,
so the running average can not precisely reflect the actual stats of the
current model.
In this function, the BN stats are recomputed with fixed weights, to make
the running average more precise. Specifically, it computes the true
average of per-batch mean/variance instead of the running average.
Args:
model (nn.Module): The model whose bn stats will be recomputed.
data_loader (iterator): The DataLoader iterator.
num_iters (int): number of iterations to compute the stats.
logger (:obj:`logging.Logger` | None): Logger for logging.
Default: None.
"""
model.train()
assert len(data_loader) >= num_iters, (
f'length of dataloader {len(data_loader)} must be greater than '
f'iteration number {num_iters}')
if is_parallel_module(model):
parallel_module = model
model = model.module
else:
parallel_module = model
# Finds all the bn layers with training=True.
bn_layers = [
m for m in model.modules() if m.training and isinstance(m, _BatchNorm)
]
if len(bn_layers) == 0:
print_log('No BN found in model', logger=logger, level=logging.WARNING)
return
print_log(f'{len(bn_layers)} BN found', logger=logger)
# Finds all the other norm layers with training=True.
for m in model.modules():
if m.training and isinstance(m, (_InstanceNorm, GroupNorm)):
print_log(
'IN/GN stats will be updated like training.',
logger=logger,
level=logging.WARNING)
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum *
# batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers] # pyre-ignore
for bn in bn_layers:
bn.momentum = 1.0
# Note that running_var actually means "running average of variance"
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
finish_before_loader = False
prog_bar = mmcv.ProgressBar(len(data_loader))
for ind, data in enumerate(data_loader):
with torch.no_grad():
parallel_module(**data, return_loss=False)
prog_bar.update()
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# running var is actually
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
if (ind + 1) >= num_iters:
finish_before_loader = True
break
assert finish_before_loader, 'Dataloader stopped before ' \
f'iteration {num_iters}'
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
class PreciseBNHook(Hook):
"""Precise BN hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
num_iters (int): Number of iterations to update the bn stats.
Default: 200.
interval (int): Perform precise bn interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, num_iters=200, interval=1):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.num_iters = num_iters
def after_train_epoch(self, runner):
if self.every_n_epochs(runner, self.interval):
# sleep to avoid possible deadlock
time.sleep(2.)
print_log(
f'Running Precise BN for {self.num_iters} iterations',
logger=runner.logger)
update_bn_stats(
runner.model,
self.dataloader,
self.num_iters,
logger=runner.logger)
print_log('BN stats updated', logger=runner.logger)
# sleep to avoid possible deadlock
time.sleep(2.)
| 5,826
| 36.352564
| 107
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/collect_env.py
|
from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmaction
def collect_env():
env_info = collect_basic_env()
env_info['MMAction2'] = (
mmaction.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 381
| 21.470588
| 60
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/module_hooks.py
|
import torch
from mmcv.utils import Registry, build_from_cfg
MODULE_HOOKS = Registry('module_hooks')
def register_module_hooks(Module, module_hooks_list):
handles = []
for module_hook_cfg in module_hooks_list:
hooked_module_name = module_hook_cfg.pop('hooked_module', 'backbone')
if not hasattr(Module, hooked_module_name):
raise ValueError(
f'{Module.__class__} has no {hooked_module_name}!')
hooked_module = getattr(Module, hooked_module_name)
hook_pos = module_hook_cfg.pop('hook_pos', 'forward_pre')
if hook_pos == 'forward_pre':
handle = hooked_module.register_forward_pre_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
elif hook_pos == 'forward':
handle = hooked_module.register_forward_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
elif hook_pos == 'backward':
handle = hooked_module.register_backward_hook(
build_from_cfg(module_hook_cfg, MODULE_HOOKS).hook_func())
else:
raise ValueError(
f'hook_pos must be `forward_pre`, `forward` or `backward`, '
f'but get {hook_pos}')
handles.append(handle)
return handles
@MODULE_HOOKS.register_module()
class GPUNormalize:
"""Normalize images with the given mean and std value on GPUs.
Call the member function ``hook_func`` will return the forward pre-hook
function for module registration.
GPU normalization, rather than CPU normalization, is more recommended in
the case of a model running on GPUs with strong compute capacity such as
Tesla V100.
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
"""
def __init__(self, input_format, mean, std):
if input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(f'The input format {input_format} is invalid.')
self.input_format = input_format
_mean = torch.tensor(mean)
_std = torch.tensor(std)
if input_format == 'NCTHW':
self._mean = _mean[None, :, None, None, None]
self._std = _std[None, :, None, None, None]
elif input_format == 'NCHW':
self._mean = _mean[None, :, None, None]
self._std = _std[None, :, None, None]
elif input_format == 'NCHW_Flow':
self._mean = _mean[None, :, None, None]
self._std = _std[None, :, None, None]
elif input_format == 'NPTCHW':
self._mean = _mean[None, None, None, :, None, None]
self._std = _std[None, None, None, :, None, None]
else:
raise ValueError(f'The input format {input_format} is invalid.')
def hook_func(self):
def normalize_hook(Module, input):
x = input[0]
assert x.dtype == torch.uint8, (
f'The previous augmentation should use uint8 data type to '
f'speed up computation, but get {x.dtype}')
mean = self._mean.to(x.device)
std = self._std.to(x.device)
with torch.no_grad():
x = x.float().sub_(mean).div_(std)
return (x, *input[1:])
return normalize_hook
| 3,371
| 37.318182
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/__init__.py
|
from .collect_env import collect_env
from .decorators import import_module_error_class, import_module_error_func
from .gradcam_utils import GradCAM
from .logger import get_root_logger
from .misc import get_random_string, get_shm_dir, get_thread_id
from .module_hooks import register_module_hooks
from .precise_bn import PreciseBNHook
from .optimizer import DistOptimizerHook
__all__ = [
'get_root_logger', 'collect_env', 'get_random_string', 'get_thread_id',
'get_shm_dir', 'GradCAM', 'PreciseBNHook', 'import_module_error_class',
'import_module_error_func', 'register_module_hooks', 'DistOptimizerHook'
]
| 620
| 37.8125
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/decorators.py
|
from types import MethodType
def import_module_error_func(module_name):
"""When a function is imported incorrectly due to a missing module, raise
an import error when the function is called."""
def decorate(func):
def new_func(*args, **kwargs):
raise ImportError(
f'Please install {module_name} to use {func.__name__}.')
return new_func
return decorate
def import_module_error_class(module_name):
"""When a class is imported incorrectly due to a missing module, raise an
import error when the class is instantiated."""
def decorate(cls):
def import_error_init(*args, **kwargs):
raise ImportError(
f'Please install {module_name} to use {cls.__name__}.')
cls.__init__ = MethodType(import_error_init, cls)
return cls
return decorate
| 870
| 25.393939
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/mmaction/utils/optimizer.py
|
from mmcv.runner import OptimizerHook, HOOKS
try:
import apex
except:
print('apex is not installed')
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
"""Optimizer hook for distributed training."""
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad()
| 1,158
| 33.088235
| 108
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_train.py
|
import copy
import tempfile
from collections import OrderedDict
import pytest
import torch
import torch.nn as nn
from mmcv import Config
from torch.utils.data import Dataset
from mmaction.apis import train_model
from mmaction.datasets import DATASETS
@DATASETS.register_module()
class ExampleDataset(Dataset):
def __init__(self, test_mode=False):
self.test_mode = test_mode
@staticmethod
def evaluate(results, logger=None):
eval_results = OrderedDict()
eval_results['acc'] = 1
return eval_results
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.test_cfg = None
self.conv1 = nn.Conv2d(3, 8, kernel_size=1)
self.norm1 = nn.BatchNorm1d(2)
def forward(self, imgs, return_loss=False):
self.norm1(torch.rand(3, 2).cuda())
losses = dict()
losses['test_loss'] = torch.tensor([0.5], requires_grad=True)
return losses
def train_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
losses = self.forward(imgs, True)
loss = torch.tensor([0.5], requires_grad=True)
outputs = dict(loss=loss, log_vars=losses, num_samples=3)
return outputs
def val_step(self, data_batch, optimizer, **kwargs):
imgs = data_batch['imgs']
self.forward(imgs, False)
outputs = dict(results=0.5)
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_train_model():
model = ExampleModel()
dataset = ExampleDataset()
datasets = [ExampleDataset(), ExampleDataset()]
_cfg = dict(
seed=0,
gpus=1,
gpu_ids=[0],
resume_from=None,
load_from=None,
workflow=[('train', 1)],
total_epochs=5,
evaluation=dict(interval=1, save_best='acc'),
data=dict(
videos_per_gpu=1,
workers_per_gpu=0,
val=dict(type='ExampleDataset')),
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001),
optimizer_config=dict(grad_clip=dict(max_norm=40, norm_type=2)),
lr_config=dict(policy='step', step=[40, 80]),
omnisource=False,
precise_bn=False,
checkpoint_config=dict(interval=1),
log_level='INFO',
log_config=dict(interval=20, hooks=[dict(type='TextLoggerHook')]))
with tempfile.TemporaryDirectory() as tmpdir:
# normal train
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
# train with validation
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
config = Config(cfg)
train_model(model, dataset, config, validate=True)
with tempfile.TemporaryDirectory() as tmpdir:
# train with Fp16OptimizerHook
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['fp16'] = dict(loss_scale=512.)
config = Config(cfg)
model.fp16_enabled = None
train_model(model, dataset, config)
with tempfile.TemporaryDirectory() as tmpdir:
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['omnisource'] = True
config = Config(cfg)
train_model(model, datasets, config)
with tempfile.TemporaryDirectory() as tmpdir:
# train with precise_bn on
cfg = copy.deepcopy(_cfg)
cfg['work_dir'] = tmpdir
cfg['workflow'] = [('train', 1), ('val', 1)]
cfg['data'] = dict(
videos_per_gpu=1,
workers_per_gpu=0,
train=dict(type='ExampleDataset'),
val=dict(type='ExampleDataset'))
cfg['precise_bn'] = dict(num_iters=1, interval=1)
config = Config(cfg)
train_model(model, datasets, config)
| 4,059
| 29.298507
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_optimizer.py
|
import torch
import torch.nn as nn
from mmcv.runner import build_optimizer_constructor
class SubModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2)
self.gn = nn.GroupNorm(2, 2)
self.fc = nn.Linear(2, 2)
self.param1 = nn.Parameter(torch.ones(1))
def forward(self, x):
return x
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
self.fc = nn.Linear(2, 1)
def forward(self, x):
return x
class PseudoDataParallel(nn.Module):
def __init__(self):
super().__init__()
self.module = ExampleModel()
def forward(self, x):
return x
base_lr = 0.01
base_wd = 0.0001
momentum = 0.9
def check_optimizer(optimizer,
model,
prefix='',
bias_lr_mult=1,
bias_decay_mult=1,
norm_decay_mult=1,
dwconv_decay_mult=1):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
assert len(param_groups) == len(model_parameters)
for i, param in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert param_group['momentum'] == momentum
# param1
param1 = param_groups[0]
assert param1['lr'] == base_lr
assert param1['weight_decay'] == base_wd
# conv1.weight
conv1_weight = param_groups[1]
assert conv1_weight['lr'] == base_lr
assert conv1_weight['weight_decay'] == base_wd
# conv2.weight
conv2_weight = param_groups[2]
assert conv2_weight['lr'] == base_lr
assert conv2_weight['weight_decay'] == base_wd
# conv2.bias
conv2_bias = param_groups[3]
assert conv2_bias['lr'] == base_lr * bias_lr_mult
assert conv2_bias['weight_decay'] == base_wd * bias_decay_mult
# bn.weight
bn_weight = param_groups[4]
assert bn_weight['lr'] == base_lr
assert bn_weight['weight_decay'] == base_wd * norm_decay_mult
# bn.bias
bn_bias = param_groups[5]
assert bn_bias['lr'] == base_lr
assert bn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.param1
sub_param1 = param_groups[6]
assert sub_param1['lr'] == base_lr
assert sub_param1['weight_decay'] == base_wd
# sub.conv1.weight
sub_conv1_weight = param_groups[7]
assert sub_conv1_weight['lr'] == base_lr
assert sub_conv1_weight['weight_decay'] == base_wd * dwconv_decay_mult
# sub.conv1.bias
sub_conv1_bias = param_groups[8]
assert sub_conv1_bias['lr'] == base_lr * bias_lr_mult
assert sub_conv1_bias['weight_decay'] == base_wd * dwconv_decay_mult
# sub.gn.weight
sub_gn_weight = param_groups[9]
assert sub_gn_weight['lr'] == base_lr
assert sub_gn_weight['weight_decay'] == base_wd * norm_decay_mult
# sub.gn.bias
sub_gn_bias = param_groups[10]
assert sub_gn_bias['lr'] == base_lr
assert sub_gn_bias['weight_decay'] == base_wd * norm_decay_mult
# sub.fc1.weight
sub_fc_weight = param_groups[11]
assert sub_fc_weight['lr'] == base_lr
assert sub_fc_weight['weight_decay'] == base_wd
# sub.fc1.bias
sub_fc_bias = param_groups[12]
assert sub_fc_bias['lr'] == base_lr * bias_lr_mult
assert sub_fc_bias['weight_decay'] == base_wd * bias_decay_mult
# fc1.weight
fc_weight = param_groups[13]
assert fc_weight['lr'] == base_lr
assert fc_weight['weight_decay'] == base_wd
# fc1.bias
fc_bias = param_groups[14]
assert fc_bias['lr'] == base_lr * bias_lr_mult
assert fc_bias['weight_decay'] == base_wd * bias_decay_mult
def check_tsm_optimizer(optimizer, model, fc_lr5=True):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
model_parameters = list(model.parameters())
# first_conv_weight
first_conv_weight = param_groups[0]
assert torch.equal(first_conv_weight['params'][0], model_parameters[1])
assert first_conv_weight['lr'] == base_lr
assert first_conv_weight['weight_decay'] == base_wd
# first_conv_bias
first_conv_bias = param_groups[1]
assert first_conv_bias['params'] == []
assert first_conv_bias['lr'] == base_lr * 2
assert first_conv_bias['weight_decay'] == 0
# normal_weight
normal_weight = param_groups[2]
assert torch.equal(normal_weight['params'][0], model_parameters[2])
assert torch.equal(normal_weight['params'][1], model_parameters[7])
assert normal_weight['lr'] == base_lr
assert normal_weight['weight_decay'] == base_wd
# normal_bias
normal_bias = param_groups[3]
assert torch.equal(normal_bias['params'][0], model_parameters[3])
assert torch.equal(normal_bias['params'][1], model_parameters[8])
assert normal_bias['lr'] == base_lr * 2
assert normal_bias['weight_decay'] == 0
# bn
bn = param_groups[4]
assert torch.equal(bn['params'][0], model_parameters[4])
assert torch.equal(bn['params'][1], model_parameters[5])
assert torch.equal(bn['params'][2], model_parameters[9])
assert torch.equal(bn['params'][3], model_parameters[10])
assert bn['lr'] == base_lr
assert bn['weight_decay'] == 0
# normal linear weight
assert torch.equal(normal_weight['params'][2], model_parameters[11])
# normal linear bias
assert torch.equal(normal_bias['params'][2], model_parameters[12])
# fc_lr5
lr5_weight = param_groups[5]
lr10_bias = param_groups[6]
assert lr5_weight['lr'] == base_lr * 5
assert lr5_weight['weight_decay'] == base_wd
assert lr10_bias['lr'] == base_lr * 10
assert lr10_bias['weight_decay'] == 0
if fc_lr5:
# lr5_weight
assert torch.equal(lr5_weight['params'][0], model_parameters[13])
# lr10_bias
assert torch.equal(lr10_bias['params'][0], model_parameters[14])
else:
# lr5_weight
assert lr5_weight['params'] == []
# lr10_bias
assert lr10_bias['params'] == []
assert torch.equal(normal_weight['params'][3], model_parameters[13])
assert torch.equal(normal_bias['params'][3], model_parameters[14])
def test_tsm_optimizer_constructor():
model = ExampleModel()
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
# fc_lr5 is True
paramwise_cfg = dict(fc_lr5=True)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
# fc_lr5 is False
paramwise_cfg = dict(fc_lr5=False)
optim_constructor_cfg = dict(
type='TSMOptimizerConstructor',
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_tsm_optimizer(optimizer, model, **paramwise_cfg)
| 7,742
| 35.182243
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_config.py
|
import glob
import os
import os.path as osp
import mmcv
import torch.nn as nn
from mmaction.models import build_localizer, build_recognizer
def _get_config_path():
"""Find the predefined recognizer config path."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
config_fpaths = [
osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths
]
return config_fpaths
def test_config_build_recognizer():
"""Test that all mmaction models defined in the configs can be
initialized."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/recognition')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
# test all config file in `configs` directory
for config_fpath in config_fpaths:
config_mod = mmcv.Config.fromfile(config_fpath)
print(f'Building recognizer, config_fpath = {config_fpath!r}')
# Remove pretrained keys to allow for testing in an offline environment
if 'pretrained' in config_mod.model['backbone']:
config_mod.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config_mod.model)
assert isinstance(recognizer, nn.Module)
def _get_config_path_for_localizer():
"""Find the predefined localizer config path for localizer."""
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/localization')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py')))
config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
config_fpaths = [
osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths
]
return config_fpaths
def test_config_build_localizer():
"""Test that all mmaction models defined in the configs can be
initialized."""
config_fpaths = _get_config_path_for_localizer()
# test all config file in `configs/localization` directory
for config_fpath in config_fpaths:
config_mod = mmcv.Config.fromfile(config_fpath)
print(f'Building localizer, config_fpath = {config_fpath!r}')
if config_mod.get('model', None):
localizer = build_localizer(config_mod.model)
assert isinstance(localizer, nn.Module)
| 2,887
| 38.027027
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_eval_hook.py
|
import os.path as osp
import tempfile
import unittest.mock as mock
import warnings
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, IterBasedRunner
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
# TODO import eval hooks from mmcv and delete them from mmaction2
try:
from mmcv.runner import EvalHook, DistEvalHook
pytest.skip(
'EvalHook and DistEvalHook are supported in MMCV',
allow_module_level=True)
except ImportError:
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from '
'mmaction2 will be deprecated. Please install mmcv through '
'master branch.')
from mmaction.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = OrderedDict(acc=acc, index=self.index, score=acc)
self.index += 1
return output
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
@staticmethod
def forward(x, **kwargs):
return x
@staticmethod
def train_step(data_batch, optimizer, **kwargs):
if not isinstance(data_batch, dict):
data_batch = dict(x=data_batch)
return data_batch
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = IterBasedRunner(
model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
def test_eval_hook():
with pytest.raises(AssertionError):
# `save_best` should be a str
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best=True)
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = Model()
data_loader = [DataLoader(test_dataset)]
EvalHook(data_loader)
with pytest.raises(ValueError):
# save_best must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='unsupport')
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='auto', rule='unsupport')
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset)
model = Model()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
# total_epochs = 1
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(
test_dataset, [torch.tensor([1])], logger=runner.logger)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best acc and corresponding epoch
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best score and corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
# total_epochs = 8, return the best score using less compare func
# and indicate corresponding epoch
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == -3
# Test the EvalHook when resume happend
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(ckpt_path)
assert osp.exists(ckpt_path)
assert runner.meta['hook_msgs']['best_score'] == 7
@patch('mmaction.apis.single_gpu_test', MagicMock)
@patch('mmaction.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook])
@pytest.mark.parametrize('_build_demo_runner,by_epoch',
[(_build_epoch_runner, True),
(_build_iter_runner, False)])
def test_start_param(EvalHookParam, _build_demo_runner, by_epoch):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, interval=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, interval=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, interval=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0/negative, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=0, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
runner = _build_demo_runner()
with pytest.warns(UserWarning):
evalhook = EvalHookParam(
dataloader, start=-2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=1, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 2
else:
runner._iter = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=2, by_epoch=by_epoch, save_best=None)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 1
else:
runner._iter = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
| 12,595
| 35.616279
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_precise_bn.py
|
import copy
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmaction.utils import PreciseBNHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1.0], dtype=torch.float32))
return results
def __len__(self):
return 1
class BiggerDataset(ExampleDataset):
def __init__(self, fixed_values=range(0, 12)):
assert len(self) == len(fixed_values)
self.fixed_values = fixed_values
def __getitem__(self, idx):
results = dict(
imgs=torch.tensor([self.fixed_values[idx]], dtype=torch.float32))
return results
def __len__(self):
# a bigger dataset
return 12
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(self.conv(imgs))
@staticmethod
def train_step(data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
class SingleBNModel(ExampleModel):
def __init__(self):
super().__init__()
self.bn = nn.BatchNorm1d(1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.bn(imgs)
class GNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.bn = nn.GroupNorm(1, 1)
self.test_cfg = None
class NoBNExampleModel(ExampleModel):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, return_loss=False):
return self.conv(imgs)
def test_precise_bn():
with pytest.raises(TypeError):
# `data_loader` must be a Pytorch DataLoader
test_dataset = ExampleModel()
data_loader = DataLoader(
test_dataset,
batch_size=2,
sampler=None,
num_workers=0,
shuffle=False)
PreciseBNHook('data_loader')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=2)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=2)
precise_bn_loader = copy.deepcopy(data_loader)
logger = get_logger('precise_bn')
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
with pytest.raises(AssertionError):
# num_iters should be no larget than total
# iters
precise_bn_hook = PreciseBNHook(precise_bn_loader, num_iters=5)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test non-DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model w/ gn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = GNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test model without bn layer
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = NoBNExampleModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
# test how precise it is
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=6) # run all
assert precise_bn_hook.num_iters == 6
assert precise_bn_hook.interval == 1
model = SingleBNModel()
runner = EpochBasedRunner(
model=model, batch_processor=None, optimizer=optimizer, logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
imgs_list = list()
for _, data in enumerate(loader):
imgs_list.append(np.array(data['imgs']))
mean = np.mean([np.mean(batch) for batch in imgs_list])
# bassel correction used in Pytorch, therefore ddof=1
var = np.mean([np.var(batch, ddof=1) for batch in imgs_list])
assert np.equal(mean, np.array(model.bn.running_mean))
assert np.equal(var, np.array(model.bn.running_var))
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_ddp_model_precise_bn():
# test DDP model
test_bigger_dataset = BiggerDataset()
loader = DataLoader(test_bigger_dataset, batch_size=2)
precise_bn_hook = PreciseBNHook(loader, num_iters=5)
assert precise_bn_hook.num_iters == 5
assert precise_bn_hook.interval == 1
model = ExampleModel()
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=True)
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
logger=logger)
runner.register_hook(precise_bn_hook)
runner.run([loader], [('train', 1)], 1)
| 6,456
| 30.497561
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_inference.py
|
import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmaction.apis import inference_recognizer, init_recognizer
video_config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
frame_config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
flow_frame_config_file = 'configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py' # noqa: E501
label_path = 'demo/label_map_k400.txt'
video_path = 'demo/demo.mp4'
frames_path = 'tests/data/imgs'
def test_init_recognizer():
with pytest.raises(TypeError):
# config must be a filename or Config object
init_recognizer(dict(config_file=None))
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(frame_config_file)
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(video_config_file, use_frames=True)
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
config = mmcv.Config.fromfile(video_config_file)
config.model.backbone.pretrained = None
isinstance(model, nn.Module)
if torch.cuda.is_available():
assert next(model.parameters()).is_cuda is True
else:
assert next(model.parameters()).is_cuda is False
assert model.cfg.model.backbone.pretrained is None
def test_video_inference_recognizer():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
with pytest.raises(RuntimeError):
# video path doesn't exist
inference_recognizer(model, 'missing.mp4', label_path)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, video_path, label_path, use_frames=True)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, 'demo/', label_path)
for ops in model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
top5_label = inference_recognizer(model, video_path, label_path)
scores = [item[1] for item in top5_label]
assert len(top5_label) == 5
assert scores == sorted(scores, reverse=True)
_, feat = inference_recognizer(
model,
video_path,
label_path,
outputs=('backbone', 'cls_head'),
as_tensor=False)
assert isinstance(feat, dict)
assert 'backbone' in feat and 'cls_head' in feat
assert isinstance(feat['backbone'], np.ndarray)
assert isinstance(feat['cls_head'], np.ndarray)
assert feat['backbone'].shape == (25, 2048, 7, 7)
assert feat['cls_head'].shape == (1, 400)
_, feat = inference_recognizer(
model,
video_path,
label_path,
outputs=('backbone.layer3', 'backbone.layer3.1.conv1'))
assert 'backbone.layer3.1.conv1' in feat and 'backbone.layer3' in feat
assert isinstance(feat['backbone.layer3.1.conv1'], torch.Tensor)
assert isinstance(feat['backbone.layer3'], torch.Tensor)
assert feat['backbone.layer3'].size() == (25, 1024, 14, 14)
assert feat['backbone.layer3.1.conv1'].size() == (25, 256, 14, 14)
cfg_file = 'configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py' # noqa: E501
sf_model = init_recognizer(cfg_file, None, device)
for ops in sf_model.cfg.data.test.pipeline:
# Changes to reduce memory in order to pass CI
if ops['type'] in ('TenCrop', 'ThreeCrop'):
ops['type'] = 'CenterCrop'
if ops['type'] == 'SampleFrames':
ops['num_clips'] = 1
_, feat = inference_recognizer(
sf_model, video_path, label_path, outputs=('backbone', 'cls_head'))
assert isinstance(feat, dict) and isinstance(feat['backbone'], tuple)
assert 'backbone' in feat and 'cls_head' in feat
assert len(feat['backbone']) == 2
assert isinstance(feat['backbone'][0], torch.Tensor)
assert isinstance(feat['backbone'][1], torch.Tensor)
assert feat['backbone'][0].size() == (1, 2048, 4, 8, 8)
assert feat['backbone'][1].size() == (1, 256, 32, 8, 8)
assert feat['cls_head'].size() == (1, 400)
def test_frames_inference_recognizer():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
rgb_model = init_recognizer(
frame_config_file, None, device, use_frames=True)
flow_model = init_recognizer(
flow_frame_config_file, None, device, use_frames=True)
with pytest.raises(RuntimeError):
# video path doesn't exist
inference_recognizer(rgb_model, 'missing_path', label_path)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(
flow_model, frames_path, label_path, use_frames=False)
for ops in rgb_model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
ops['crop_size'] = 224
for ops in flow_model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
ops['crop_size'] = 224
top5_label = inference_recognizer(
rgb_model, frames_path, label_path, use_frames=True)
scores = [item[1] for item in top5_label]
assert len(top5_label) == 5
assert scores == sorted(scores, reverse=True)
_, feat = inference_recognizer(
flow_model,
frames_path,
label_path,
outputs=('backbone', 'cls_head'),
as_tensor=False,
use_frames=True)
assert isinstance(feat, dict)
assert 'backbone' in feat and 'cls_head' in feat
assert isinstance(feat['backbone'], np.ndarray)
assert isinstance(feat['cls_head'], np.ndarray)
assert feat['backbone'].shape == (25, 2048, 7, 7)
assert feat['cls_head'].shape == (1, 400)
_, feat = inference_recognizer(
rgb_model,
frames_path,
label_path,
use_frames=True,
outputs=('backbone.layer3', 'backbone.layer3.1.conv1'))
assert 'backbone.layer3.1.conv1' in feat and 'backbone.layer3' in feat
assert isinstance(feat['backbone.layer3.1.conv1'], torch.Tensor)
assert isinstance(feat['backbone.layer3'], torch.Tensor)
assert feat['backbone.layer3'].size() == (25, 1024, 14, 14)
assert feat['backbone.layer3.1.conv1'].size() == (25, 256, 14, 14)
| 6,972
| 37.313187
| 119
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_lr.py
|
import logging
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, call
import torch
import torch.nn as nn
from mmcv.runner import IterTimerHook, PaviLoggerHook, build_runner
from torch.utils.data import DataLoader
def test_tin_lr_updater_hook():
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook_cfg = dict(type='TINLrUpdaterHook', min_lr=0.1)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='exp',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='constant',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='linear',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
# add pavi hook
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
calls = [
call('train', {
'learning_rate': 0.028544155877284292,
'momentum': 0.95
}, 1),
call('train', {
'learning_rate': 0.04469266270539641,
'momentum': 0.95
}, 6),
call('train', {
'learning_rate': 0.09695518130045147,
'momentum': 0.95
}, 10)
]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
def _build_demo_runner(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
log_config = dict(
interval=1, hooks=[
dict(type='TextLoggerHook'),
])
tmp_dir = tempfile.mkdtemp()
runner = build_runner(
dict(type=runner_type),
default_args=dict(
model=model,
work_dir=tmp_dir,
optimizer=optimizer,
logger=logging.getLogger(),
max_epochs=max_epochs,
max_iters=max_iters))
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
| 3,291
| 26.898305
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_runtime/test_apis_test.py
|
import sys
import warnings
from unittest.mock import MagicMock, Mock, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
# TODO import test functions from mmcv and delete them from mmaction2
try:
from mmcv.engine import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
pytest.skip(
'Test functions are supported in MMCV', allow_module_level=True)
except (ImportError, ModuleNotFoundError):
warnings.warn(
'DeprecationWarning: single_gpu_test, multi_gpu_test, '
'collect_results_cpu, collect_results_gpu from mmaction2 will be '
'deprecated. Please install mmcv through master branch.')
from mmaction.apis.test import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.cnt = 0
def forward(self, *args, **kwargs):
result = [self.cnt]
self.cnt += 1
return result
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return len(self.eval_result)
def test_single_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = single_gpu_test(model, loader)
assert results == list(range(8))
def mock_tensor_without_cuda(*args, **kwargs):
if 'device' not in kwargs:
return torch.Tensor(*args)
return torch.IntTensor(*args, device='cpu')
@patch('mmaction.apis.test.collect_results_gpu',
Mock(return_value=list(range(8))))
@patch('mmaction.apis.test.collect_results_cpu',
Mock(return_value=list(range(8))))
def test_multi_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = multi_gpu_test(model, loader)
assert results == list(range(8))
results = multi_gpu_test(model, loader, gpu_collect=False)
assert results == list(range(8))
@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1)))
@patch('torch.distributed.broadcast', MagicMock)
@patch('torch.distributed.barrier', Mock)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 8), reason='Not for python 3.8')
def test_collect_results_cpu():
def content_for_unittest():
results_part = list(range(8))
size = 8
results = collect_results_cpu(results_part, size)
assert results == list(range(8))
results = collect_results_cpu(results_part, size, 'unittest')
assert results == list(range(8))
if not torch.cuda.is_available():
with patch(
'torch.full',
Mock(
return_value=torch.full(
(512, ), 32, dtype=torch.uint8, device='cpu'))):
with patch('torch.tensor', mock_tensor_without_cuda):
content_for_unittest()
else:
content_for_unittest()
| 3,346
| 27.12605
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/base.py
|
import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv.utils import _BatchNorm
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
def generate_recognizer_demo_inputs(
input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, _, _, _) = input_shape
elif len(input_shape) == 6:
(N, M, _, L, _, _) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
elif model_type == 'audio':
gt_labels = torch.LongTensor([2] * L)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels}
return inputs
def generate_detector_demo_inputs(
input_shape=(1, 3, 4, 224, 224), num_classes=81, train=True,
device='cpu'):
num_samples = input_shape[0]
if not train:
assert num_samples == 1
def random_box(n):
box = torch.rand(n, 4) * 0.5
box[:, 2:] += 0.5
box[:, 0::2] *= input_shape[3]
box[:, 1::2] *= input_shape[4]
if device == 'cuda':
box = box.cuda()
return box
def random_label(n):
label = torch.randn(n, num_classes)
label = (label > 0.8).type(torch.float32)
label[:, 0] = 0
if device == 'cuda':
label = label.cuda()
return label
img = torch.FloatTensor(np.random.random(input_shape))
if device == 'cuda':
img = img.cuda()
proposals = [random_box(2) for i in range(num_samples)]
gt_bboxes = [random_box(2) for i in range(num_samples)]
gt_labels = [random_label(2) for i in range(num_samples)]
img_metas = [dict(img_shape=input_shape[-2:]) for i in range(num_samples)]
if train:
return dict(
img=img,
proposals=proposals,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
img_metas=img_metas)
return dict(img=[img], proposals=[proposals], img_metas=[img_metas])
def generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run gradcam.
Args:
input_shape (tuple[int]): input batch dimensions.
Default: (1, 3, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
return:
dict: model inputs, including two keys, ``imgs`` and ``label``.
"""
imgs = np.random.random(input_shape)
if model_type in ['2D', '3D']:
gt_labels = torch.LongTensor([2] * input_shape[0])
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {
'imgs': torch.FloatTensor(imgs),
'label': gt_labels,
}
return inputs
def get_cfg(config_type, fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config_types = ('recognition', 'recognition_audio', 'localization',
'detection')
assert config_type in config_types
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/' + config_type)
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config
def get_recognizer_cfg(fname):
return get_cfg('recognition', fname)
def get_audio_recognizer_cfg(fname):
return get_cfg('recognition_audio', fname)
def get_localizer_cfg(fname):
return get_cfg('localization', fname)
def get_detector_cfg(fname):
return get_cfg('detection', fname)
| 4,723
| 27.981595
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_roi_extractor.py
|
import torch
from mmaction.models import SingleRoIExtractor3D
def test_single_roi_extractor3d():
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True)
feat = torch.randn([4, 64, 8, 16, 16])
rois = torch.tensor([[0., 1., 1., 6., 6.], [1., 2., 2., 7., 7.],
[3., 2., 2., 9., 9.], [2., 2., 0., 10., 9.]])
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 64, 1, 8, 8)
assert feat.shape == (4, 64, 1, 16, 16)
feat = (torch.randn([4, 64, 8, 16, 16]), torch.randn([4, 32, 16, 16, 16]))
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 96, 1, 8, 8)
assert feat.shape == (4, 96, 1, 16, 16)
feat = torch.randn([4, 64, 8, 16, 16])
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=False)
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 64, 8, 8, 8)
assert feat.shape == (4, 64, 8, 16, 16)
feat = (torch.randn([4, 64, 8, 16, 16]), torch.randn([4, 32, 16, 16, 16]))
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 96, 16, 8, 8)
assert feat.shape == (4, 96, 16, 16, 16)
feat = torch.randn([4, 64, 8, 16, 16])
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
with_global=True)
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 128, 1, 8, 8)
assert feat.shape == (4, 64, 1, 16, 16)
| 1,945
| 32.551724
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common.py
|
import os.path as osp
import pytest
import torch
from mmaction.models.common import LFB, TAM, Conv2plus1d, ConvAudio
def test_conv2plus1d():
with pytest.raises(AssertionError):
# Length of kernel size, stride and padding must be the same
Conv2plus1d(3, 8, (2, 2))
conv_2plus1d = Conv2plus1d(3, 8, 2)
conv_2plus1d.init_weights()
assert torch.equal(conv_2plus1d.bn_s.weight,
torch.ones_like(conv_2plus1d.bn_s.weight))
assert torch.equal(conv_2plus1d.bn_s.bias,
torch.zeros_like(conv_2plus1d.bn_s.bias))
x = torch.rand(1, 3, 8, 256, 256)
output = conv_2plus1d(x)
assert output.shape == torch.Size([1, 8, 7, 255, 255])
def test_conv_audio():
conv_audio = ConvAudio(3, 8, 3)
conv_audio.init_weights()
x = torch.rand(1, 3, 8, 8)
output = conv_audio(x)
assert output.shape == torch.Size([1, 16, 8, 8])
conv_audio_sum = ConvAudio(3, 8, 3, op='sum')
output = conv_audio_sum(x)
assert output.shape == torch.Size([1, 8, 8, 8])
def test_TAM():
"""test TAM."""
with pytest.raises(AssertionError):
# alpha must be a positive integer
TAM(16, 8, alpha=0, beta=4)
with pytest.raises(AssertionError):
# beta must be a positive integer
TAM(16, 8, alpha=2, beta=0)
with pytest.raises(AssertionError):
# the channels number of x should be equal to self.in_channels of TAM
tam = TAM(16, 8)
x = torch.rand(64, 8, 112, 112)
tam(x)
tam = TAM(16, 8)
x = torch.rand(32, 16, 112, 112)
output = tam(x)
assert output.shape == torch.Size([32, 16, 112, 112])
def test_LFB():
"""test LFB."""
with pytest.raises(ValueError):
LFB(lfb_prefix_path='./_non_exist_path')
lfb_prefix_path = osp.normpath(
osp.join(osp.dirname(__file__), '../data/lfb'))
with pytest.raises(AssertionError):
LFB(lfb_prefix_path=lfb_prefix_path, dataset_modes=100)
with pytest.raises(ValueError):
LFB(lfb_prefix_path=lfb_prefix_path, device='ceph')
# load on cpu
lfb_cpu = LFB(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu')
lt_feat_cpu = lfb_cpu['video_1,930']
assert lt_feat_cpu.shape == (5 * 60, 16)
assert len(lfb_cpu) == 1
# load on lmdb
lfb_lmdb = LFB(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=3,
window_size=30,
lfb_channels=16,
dataset_modes=('unittest'),
device='lmdb',
lmdb_map_size=1e6)
lt_feat_lmdb = lfb_lmdb['video_1,930']
assert lt_feat_lmdb.shape == (3 * 30, 16)
| 2,754
| 26.55
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_backbones.py
|
import copy
import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import (C3D, X3D, MobileNetV2TSM, ResNet2Plus1d,
ResNet3dCSN, ResNet3dSlowFast, ResNet3dSlowOnly,
ResNetAudio, ResNetTIN, ResNetTSM, TANet)
from mmaction.models.backbones.resnet_tsm import NL3DWrapper
from .base import check_norm_state, generate_backbone_demo_inputs
def test_x3d_backbone():
"""Test x3d backbone."""
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=0)
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == num_stages
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
spatial_strides=(1, 2),
num_stages=4)
with pytest.raises(AssertionError):
# se_style in ['half', 'all']
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style=None)
with pytest.raises(AssertionError):
# se_ratio should be None or > 0
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
se_style='half',
se_ratio=0)
# x3d_s, no pretrained, norm_eval True
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=True)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), False)
# x3d_l, no pretrained, norm_eval True
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=True)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), False)
# x3d_s, no pretrained, norm_eval False
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), True)
# x3d_l, no pretrained, norm_eval False
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=False)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), True)
# x3d_s, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
x3d_s_frozen = X3D(
gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
norm_eval=False,
frozen_stages=frozen_stages)
x3d_s_frozen.init_weights()
x3d_s_frozen.train()
assert x3d_s_frozen.conv1_t.bn.training is False
for param in x3d_s_frozen.conv1_s.parameters():
assert param.requires_grad is False
for param in x3d_s_frozen.conv1_t.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(x3d_s_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual, zero_init_residual is True by default
for m in x3d_s_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# x3d_s inference
input_shape = (1, 3, 13, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 13, 2, 2])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 13, 2, 2])
# x3d_m inference
input_shape = (1, 3, 16, 96, 96)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 16, 3, 3])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 16, 3, 3])
def test_resnet2plus1d_backbone():
# Test r2+1d backbone
with pytest.raises(AssertionError):
# r2+1d does not support inflation
ResNet2Plus1d(50, None, pretrained2d=True)
with pytest.raises(AssertionError):
# r2+1d requires conv(2+1)d module
ResNet2Plus1d(
50, None, pretrained2d=False, conv_cfg=dict(type='Conv3d'))
frozen_stages = 1
r2plus1d_34_frozen = ResNet2Plus1d(
34,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
frozen_stages=frozen_stages,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2))
r2plus1d_34_frozen.init_weights()
r2plus1d_34_frozen.train()
assert r2plus1d_34_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_34_frozen.conv1.bn.training is False
for param in r2plus1d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_34_frozen = r2plus1d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = r2plus1d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
r2plus1d_50_frozen = ResNet2Plus1d(
50,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
frozen_stages=frozen_stages)
r2plus1d_50_frozen.init_weights()
r2plus1d_50_frozen.train()
assert r2plus1d_50_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_50_frozen.conv1.bn.training is False
for param in r2plus1d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_50_frozen = r2plus1d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = r2plus1d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
def test_resnet_tsm_backbone():
"""Test resnet_tsm backbone."""
with pytest.raises(NotImplementedError):
# shift_place must be block or blockres
resnet_tsm_50_block = ResNetTSM(50, shift_place='Block')
resnet_tsm_50_block.init_weights()
from mmaction.models.backbones.resnet import Bottleneck
from mmaction.models.backbones.resnet_tsm import TemporalShift
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# resnet_tsm with depth 50
resnet_tsm_50 = ResNetTSM(50)
resnet_tsm_50.init_weights()
for layer_name in resnet_tsm_50.res_layers:
layer = getattr(resnet_tsm_50, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
assert block.conv1.conv.num_segments == resnet_tsm_50.num_segments
assert block.conv1.conv.shift_div == resnet_tsm_50.shift_div
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with depth 50, no pretrained, shift_place is block
resnet_tsm_50_block = ResNetTSM(50, shift_place='block')
resnet_tsm_50_block.init_weights()
for layer_name in resnet_tsm_50_block.res_layers:
layer = getattr(resnet_tsm_50_block, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block, TemporalShift)
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.shift_div == resnet_tsm_50_block.shift_div
assert isinstance(block.net, Bottleneck)
# resnet_tsm with depth 50, no pretrained, use temporal_pool
resnet_tsm_50_temporal_pool = ResNetTSM(50, temporal_pool=True)
resnet_tsm_50_temporal_pool.init_weights()
for layer_name in resnet_tsm_50_temporal_pool.res_layers:
layer = getattr(resnet_tsm_50_temporal_pool, layer_name)
blocks = list(layer.children())
if layer_name == 'layer2':
assert len(blocks) == 2
assert isinstance(blocks[1], nn.MaxPool3d)
blocks = copy.deepcopy(blocks[0])
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
if layer_name == 'layer1':
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments
else:
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments // 2
assert block.conv1.conv.shift_div == resnet_tsm_50_temporal_pool.shift_div # noqa: E501
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet_tsm_nonlocal = ResNetTSM(
50, non_local=non_local, non_local_cfg=non_local_cfg)
resnet_tsm_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet_tsm_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert isinstance(layer[i], NL3DWrapper)
resnet_tsm_50_full = ResNetTSM(
50,
non_local=non_local,
non_local_cfg=non_local_cfg,
temporal_pool=True)
resnet_tsm_50_full.init_weights()
# TSM forword
feat = resnet_tsm_50(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with non-local forward
feat = resnet_tsm_nonlocal(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with temporal pool forward
feat = resnet_tsm_50_temporal_pool(imgs)
assert feat.shape == torch.Size([4, 2048, 2, 2])
# TSM with temporal pool + non-local forward
input_shape = (16, 3, 32, 32)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet_tsm_50_full(imgs)
assert feat.shape == torch.Size([8, 2048, 1, 1])
def test_mobilenetv2_tsm_backbone():
"""Test mobilenetv2_tsm backbone."""
from mmaction.models.backbones.resnet_tsm import TemporalShift
from mmaction.models.backbones.mobilenet_v2 import InvertedResidual
from mmcv.cnn import ConvModule
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# mobilenetv2_tsm with width_mult = 1.0
mobilenetv2_tsm = MobileNetV2TSM()
mobilenetv2_tsm.init_weights()
for cur_module in mobilenetv2_tsm.modules():
if isinstance(cur_module, InvertedResidual) and \
len(cur_module.conv) == 3 and \
cur_module.use_res_connect:
assert isinstance(cur_module.conv[0], TemporalShift)
assert cur_module.conv[0].num_segments == \
mobilenetv2_tsm.num_segments
assert cur_module.conv[0].shift_div == mobilenetv2_tsm.shift_div
assert isinstance(cur_module.conv[0].net, ConvModule)
# TSM-MobileNetV2 with widen_factor = 1.0 forword
feat = mobilenetv2_tsm(imgs)
assert feat.shape == torch.Size([8, 1280, 2, 2])
# mobilenetv2 with widen_factor = 0.5 forword
mobilenetv2_tsm_05 = MobileNetV2TSM(widen_factor=0.5)
mobilenetv2_tsm_05.init_weights()
feat = mobilenetv2_tsm_05(imgs)
assert feat.shape == torch.Size([8, 1280, 2, 2])
# mobilenetv2 with widen_factor = 1.5 forword
mobilenetv2_tsm_15 = MobileNetV2TSM(widen_factor=1.5)
mobilenetv2_tsm_15.init_weights()
feat = mobilenetv2_tsm_15(imgs)
assert feat.shape == torch.Size([8, 1920, 2, 2])
def test_slowfast_backbone():
"""Test SlowFast backbone."""
with pytest.raises(TypeError):
# cfg should be a dict
ResNet3dSlowFast(None, slow_pathway=list(['foo', 'bar']))
with pytest.raises(TypeError):
# pretrained should be a str
sf_50 = ResNet3dSlowFast(dict(foo='bar'))
sf_50.init_weights()
with pytest.raises(KeyError):
# pathway type should be implemented
ResNet3dSlowFast(None, slow_pathway=dict(type='resnext'))
# test slowfast with slow inflated
sf_50_inflate = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained='torchvision://resnet50',
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_inflate.init_weights()
sf_50_inflate.train()
# test slowfast with no lateral connection
sf_50_wo_lateral = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_wo_lateral.init_weights()
sf_50_wo_lateral.train()
# slowfast w/o lateral connection inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50_wo_lateral = sf_50_wo_lateral.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50_wo_lateral(imgs_gpu)
else:
feat = sf_50_wo_lateral(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
# test slowfast with frozen stages config
frozen_slow = 3
sf_50 = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
frozen_stages=frozen_slow))
sf_50.init_weights()
sf_50.train()
for stage in range(1, sf_50.slow_path.num_stages):
lateral_name = sf_50.slow_path.lateral_connections[stage - 1]
conv_lateral = getattr(sf_50.slow_path, lateral_name)
for mod in conv_lateral.modules():
if isinstance(mod, _BatchNorm):
if stage <= frozen_slow:
assert mod.training is False
else:
assert mod.training is True
for param in conv_lateral.parameters():
if stage <= frozen_slow:
assert param.requires_grad is False
else:
assert param.requires_grad is True
# test slowfast with normal config
sf_50 = ResNet3dSlowFast(None)
sf_50.init_weights()
sf_50.train()
# slowfast inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50 = sf_50.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50(imgs_gpu)
else:
feat = sf_50(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
def test_slowonly_backbone():
"""Test SlowOnly backbone."""
with pytest.raises(AssertionError):
# SlowOnly should contain no lateral connection
ResNet3dSlowOnly(50, None, lateral=True)
# test SlowOnly for PoseC3D
so_50 = ResNet3dSlowOnly(
depth=50,
pretrained=None,
in_channels=17,
base_channels=32,
num_stages=3,
out_indices=(2, ),
stage_blocks=(4, 6, 3),
conv1_stride_s=1,
pool1_stride_s=1,
inflate=(0, 1, 1),
spatial_strides=(2, 2, 2),
temporal_strides=(1, 1, 2),
dilations=(1, 1, 1))
so_50.init_weights()
so_50.train()
# test SlowOnly with normal config
so_50 = ResNet3dSlowOnly(50, None)
so_50.init_weights()
so_50.train()
# SlowOnly inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
so_50 = so_50.cuda()
imgs_gpu = imgs.cuda()
feat = so_50(imgs_gpu)
else:
feat = so_50(imgs)
assert feat.shape == torch.Size([1, 2048, 8, 2, 2])
def test_resnet_csn_backbone():
"""Test resnet_csn backbone."""
with pytest.raises(ValueError):
# Bottleneck mode must be "ip" or "ir"
ResNet3dCSN(152, None, bottleneck_mode='id')
input_shape = (2, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
resnet3d_csn_frozen = ResNet3dCSN(
152, None, bn_frozen=True, norm_eval=True)
resnet3d_csn_frozen.train()
for m in resnet3d_csn_frozen.modules():
if isinstance(m, _BatchNorm):
for param in m.parameters():
assert param.requires_grad is False
# Interaction-preserved channel-separated bottleneck block
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train()
for i, layer_name in enumerate(resnet3d_csn_ip.res_layers):
layers = getattr(resnet3d_csn_ip, layer_name)
num_blocks = resnet3d_csn_ip.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 2
assert layer.conv2[1].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ip = resnet3d_csn_ip.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ip(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ip(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Interaction-reduced channel-separated bottleneck block
resnet3d_csn_ir = ResNet3dCSN(152, None, bottleneck_mode='ir')
resnet3d_csn_ir.init_weights()
resnet3d_csn_ir.train()
for i, layer_name in enumerate(resnet3d_csn_ir.res_layers):
layers = getattr(resnet3d_csn_ir, layer_name)
num_blocks = resnet3d_csn_ir.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 1
assert layer.conv2[0].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ir = resnet3d_csn_ir.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ir(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ir(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Set training status = False
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train(False)
for module in resnet3d_csn_ip.children():
assert module.training is False
def test_tanet_backbone():
"""Test tanet backbone."""
with pytest.raises(NotImplementedError):
# TA-Blocks are only based on Bottleneck block now
tanet_18 = TANet(18, 8)
tanet_18.init_weights()
from mmaction.models.backbones.resnet import Bottleneck
from mmaction.models.backbones.tanet import TABlock
# tanet with depth 50
tanet_50 = TANet(50, 8)
tanet_50.init_weights()
for layer_name in tanet_50.res_layers:
layer = getattr(tanet_50, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block, TABlock)
assert isinstance(block.block, Bottleneck)
assert block.tam.num_segments == block.num_segments
assert block.tam.in_channels == block.block.conv1.out_channels
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = tanet_50(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
input_shape = (16, 3, 32, 32)
imgs = generate_backbone_demo_inputs(input_shape)
feat = tanet_50(imgs)
assert feat.shape == torch.Size([16, 2048, 1, 1])
def test_c3d_backbone():
"""Test c3d backbone."""
input_shape = (1, 3, 16, 112, 112)
imgs = generate_backbone_demo_inputs(input_shape)
# c3d inference test
c3d = C3D()
c3d.init_weights()
c3d.train()
feat = c3d(imgs)
assert feat.shape == torch.Size([1, 4096])
# c3d with bn inference test
c3d_bn = C3D(norm_cfg=dict(type='BN3d'))
c3d_bn.init_weights()
c3d_bn.train()
feat = c3d_bn(imgs)
assert feat.shape == torch.Size([1, 4096])
def test_resnet_audio_backbone():
"""Test ResNetAudio backbone."""
input_shape = (1, 1, 16, 16)
spec = generate_backbone_demo_inputs(input_shape)
# inference
audioonly = ResNetAudio(50, None)
audioonly.init_weights()
audioonly.train()
feat = audioonly(spec)
assert feat.shape == torch.Size([1, 1024, 2, 2])
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_resnet_tin_backbone():
"""Test resnet_tin backbone."""
with pytest.raises(AssertionError):
# num_segments should be positive
resnet_tin = ResNetTIN(50, num_segments=-1)
resnet_tin.init_weights()
from mmaction.models.backbones.resnet_tin import (CombineNet,
TemporalInterlace)
# resnet_tin with normal config
resnet_tin = ResNetTIN(50)
resnet_tin.init_weights()
for layer_name in resnet_tin.res_layers:
layer = getattr(resnet_tin, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, CombineNet)
assert isinstance(block.conv1.conv.net1, TemporalInterlace)
assert (
block.conv1.conv.net1.num_segments == resnet_tin.num_segments)
assert block.conv1.conv.net1.shift_div == resnet_tin.shift_div
# resnet_tin with partial batchnorm
resnet_tin_pbn = ResNetTIN(50, partial_bn=True)
resnet_tin_pbn.train()
count_bn = 0
for m in resnet_tin_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.training is False
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
else:
assert m.training is True
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape).cuda()
resnet_tin = resnet_tin.cuda()
# resnet_tin with normal cfg inference
feat = resnet_tin(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
| 25,467
| 34.971751
| 100
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_neck.py
|
import copy
import pytest
import torch
from mmaction.models import TPN
from .base import generate_backbone_demo_inputs
def test_tpn():
"""Test TPN backbone."""
tpn_cfg = dict(
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=400, loss_weight=0.5))
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['in_channels'] = list(tpn_cfg_['in_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['out_channels'] = float(tpn_cfg_['out_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['downsample_cfg']['downsample_position'] = 'unsupport'
TPN(**tpn_cfg_)
for k in tpn_cfg:
if not k.endswith('_cfg'):
continue
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_[k] = list()
with pytest.raises(AssertionError):
TPN(**tpn_cfg_)
with pytest.raises(ValueError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['flow_type'] = 'unsupport'
TPN(**tpn_cfg_)
target_shape = (32, 1)
target = generate_backbone_demo_inputs(target_shape).long().squeeze()
x0_shape = (32, 1024, 1, 4, 4)
x1_shape = (32, 2048, 1, 2, 2)
x0 = generate_backbone_demo_inputs(x0_shape)
x1 = generate_backbone_demo_inputs(x1_shape)
x = [x0, x1]
# ResNetTPN with 'cascade' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cascade = TPN(**tpn_cfg_)
feat, loss_aux = tpn_cascade(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'parallel' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_parallel = TPN(flow_type='parallel', **tpn_cfg_)
feat, loss_aux = tpn_parallel(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'cascade' flow_type and target is None
feat, loss_aux = tpn_cascade(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
# ResNetTPN with 'parallel' flow_type and target is None
feat, loss_aux = tpn_parallel(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
| 2,850
| 31.770115
| 73
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/__init__.py
|
from .base import (check_norm_state, generate_backbone_demo_inputs,
generate_detector_demo_inputs, generate_gradcam_inputs,
generate_recognizer_demo_inputs, get_audio_recognizer_cfg,
get_cfg, get_detector_cfg, get_localizer_cfg,
get_recognizer_cfg)
__all__ = [
'check_norm_state', 'generate_backbone_demo_inputs',
'generate_recognizer_demo_inputs', 'generate_gradcam_inputs', 'get_cfg',
'get_recognizer_cfg', 'get_audio_recognizer_cfg', 'get_localizer_cfg',
'get_detector_cfg', 'generate_detector_demo_inputs'
]
| 605
| 45.615385
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_gradcam.py
|
import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils.gradcam_utils import GradCAM
from .base import generate_gradcam_inputs, get_recognizer_cfg
def _get_target_shapes(input_shape, num_classes=400, model_type='2D'):
if model_type not in ['2D', '3D']:
raise ValueError(f'Data type {model_type} is not available')
preds_target_shape = (input_shape[0], num_classes)
if model_type == '3D':
# input shape (batch_size, num_crops*num_clips, C, clip_len, H, W)
# target shape (batch_size*num_crops*num_clips, clip_len, H, W, C)
blended_imgs_target_shape = (input_shape[0] * input_shape[1],
input_shape[3], input_shape[4],
input_shape[5], input_shape[2])
else:
# input shape (batch_size, num_segments, C, H, W)
# target shape (batch_size, num_segments, H, W, C)
blended_imgs_target_shape = (input_shape[0], input_shape[1],
input_shape[3], input_shape[4],
input_shape[2])
return blended_imgs_target_shape, preds_target_shape
def _do_test_2D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400,
device='cpu'):
demo_inputs = generate_gradcam_inputs(input_shape)
demo_inputs['imgs'] = demo_inputs['imgs'].to(device)
demo_inputs['label'] = demo_inputs['label'].to(device)
recognizer = recognizer.to(device)
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='2D')
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def _do_test_3D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400):
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='3D')
demo_inputs = generate_gradcam_inputs(input_shape, '3D')
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
demo_inputs['imgs'] = demo_inputs['imgs'].cuda()
demo_inputs['label'] = demo_inputs['label'].cuda()
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
else:
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 25, 3, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_i3d():
config = get_recognizer_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = [1, 1, 3, 32, 32, 32]
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_r2plus1d():
config = get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_slowfast():
config = get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/slow_path/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
# base config
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
# test twice sample + 3 crops, 2*3*8=48
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 48, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_csn():
config = get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tpn():
target_layer_name = 'backbone/layer4/1/relu'
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape, 174)
config = get_recognizer_cfg(
'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_c3d():
config = get_recognizer_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 16, 112, 112)
target_layer_name = 'backbone/conv5a/activate'
_do_test_3D_models(recognizer, target_layer_name, input_shape, 101)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_tin():
config = get_recognizer_cfg(
'tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 64, 64)
_do_test_2D_models(
recognizer, target_layer_name, input_shape, device='cuda:0')
def test_x3d():
config = get_recognizer_cfg('x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 13, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
| 8,364
| 35.369565
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_head.py
|
import os.path as osp
import tempfile
from unittest.mock import Mock, patch
import numpy as np
import pytest
import torch
import torch.nn as nn
import mmaction
from mmaction.models import (ACRNHead, AudioTSNHead, BBoxHeadAVA, FBOHead,
I3DHead, LFBInferHead, SlowFastHead, TPNHead,
TRNHead, TSMHead, TSNHead, X3DHead)
from .base import generate_backbone_demo_inputs
def test_i3d_head():
"""Test loss method, layer construction, attributes and forward function in
i3d head."""
i3d_head = I3DHead(num_classes=4, in_channels=2048)
i3d_head.init_weights()
assert i3d_head.num_classes == 4
assert i3d_head.dropout_ratio == 0.5
assert i3d_head.in_channels == 2048
assert i3d_head.init_std == 0.01
assert isinstance(i3d_head.dropout, nn.Dropout)
assert i3d_head.dropout.p == i3d_head.dropout_ratio
assert isinstance(i3d_head.fc_cls, nn.Linear)
assert i3d_head.fc_cls.in_features == i3d_head.in_channels
assert i3d_head.fc_cls.out_features == i3d_head.num_classes
assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d)
assert i3d_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = i3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_bbox_head_ava():
"""Test loss method, layer construction, attributes and forward function in
bbox head."""
with pytest.raises(TypeError):
# topk must be None, int or tuple[int]
BBoxHeadAVA(topk=0.1)
with pytest.raises(AssertionError):
# topk should be smaller than num_classes
BBoxHeadAVA(num_classes=5, topk=(3, 5))
bbox_head = BBoxHeadAVA(in_channels=10, num_classes=4, topk=1)
input = torch.randn([3, 10, 2, 2, 2])
ret, _ = bbox_head(input)
assert ret.shape == (3, 4)
bbox_head = BBoxHeadAVA()
bbox_head.init_weights()
bbox_head = BBoxHeadAVA(temporal_pool_type='max', spatial_pool_type='avg')
bbox_head.init_weights()
cls_score = torch.tensor(
[[0.568, -0.162, 0.273, -0.390, 0.447, 0.102, -0.409],
[2.388, 0.609, 0.369, 1.630, -0.808, -0.212, 0.296],
[0.252, -0.533, -0.644, -0.591, 0.148, 0.963, -0.525],
[0.134, -0.311, -0.764, -0.752, 0.656, -1.517, 0.185]])
labels = torch.tensor([[0., 0., 1., 0., 0., 1., 0.],
[0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 1.],
[0., 0., 1., 1., 0., 0., 1.]])
label_weights = torch.tensor([1., 1., 1., 1.])
losses = bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=labels,
label_weights=label_weights)
assert torch.isclose(losses['loss_action_cls'], torch.tensor(0.7162495))
assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.6666666))
assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.4791665))
assert torch.isclose(losses['recall@top3'], torch.tensor(0.75))
assert torch.isclose(losses['prec@top3'], torch.tensor(0.5))
assert torch.isclose(losses['recall@top5'], torch.tensor(1.0))
assert torch.isclose(losses['prec@top5'], torch.tensor(0.45))
rois = torch.tensor([[0.0, 0.1, 0.2, 0.3, 0.4], [0.0, 0.5, 0.6, 0.7, 0.8]])
rois[1::2] *= 380
rois[2::2] *= 220
crop_quadruple = np.array([0.1, 0.2, 0.8, 0.7])
cls_score = torch.tensor([0.995, 0.728])
img_shape = (320, 480)
flip = True
bboxes, scores = bbox_head.get_det_bboxes(
rois=rois,
cls_score=cls_score,
img_shape=img_shape,
flip=flip,
crop_quadruple=crop_quadruple)
assert torch.all(
torch.isclose(
bboxes,
torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500],
[0.45499998, 0.69875002, 0.58166665, 0.86499995]])))
assert torch.all(
torch.isclose(scores, torch.tensor([0.73007441, 0.67436624])))
def test_x3d_head():
"""Test loss method, layer construction, attributes and forward function in
x3d head."""
x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False)
x3d_head.init_weights()
assert x3d_head.num_classes == 4
assert x3d_head.dropout_ratio == 0.5
assert x3d_head.in_channels == 432
assert x3d_head.init_std == 0.01
assert isinstance(x3d_head.dropout, nn.Dropout)
assert x3d_head.dropout.p == x3d_head.dropout_ratio
assert isinstance(x3d_head.fc1, nn.Linear)
assert x3d_head.fc1.in_features == x3d_head.in_channels
assert x3d_head.fc1.out_features == x3d_head.mid_channels
assert x3d_head.fc1.bias is None
assert isinstance(x3d_head.fc2, nn.Linear)
assert x3d_head.fc2.in_features == x3d_head.mid_channels
assert x3d_head.fc2.out_features == x3d_head.num_classes
assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d)
assert x3d_head.pool.output_size == (1, 1, 1)
input_shape = (3, 432, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = x3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_slowfast_head():
"""Test loss method, layer construction, attributes and forward function in
slowfast head."""
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
sf_head.init_weights()
assert sf_head.num_classes == 4
assert sf_head.dropout_ratio == 0.8
assert sf_head.in_channels == 2304
assert sf_head.init_std == 0.01
assert isinstance(sf_head.dropout, nn.Dropout)
assert sf_head.dropout.p == sf_head.dropout_ratio
assert isinstance(sf_head.fc_cls, nn.Linear)
assert sf_head.fc_cls.in_features == sf_head.in_channels
assert sf_head.fc_cls.out_features == sf_head.num_classes
assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d)
assert sf_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 32, 7, 7)
feat_slow = torch.rand(input_shape)
input_shape = (3, 256, 4, 7, 7)
feat_fast = torch.rand(input_shape)
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
cls_scores = sf_head((feat_slow, feat_fast))
assert cls_scores.shape == torch.Size([3, 4])
def test_tsn_head():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head = TSNHead(num_classes=4, in_channels=2048)
tsn_head.init_weights()
assert tsn_head.num_classes == 4
assert tsn_head.dropout_ratio == 0.4
assert tsn_head.in_channels == 2048
assert tsn_head.init_std == 0.01
assert tsn_head.consensus.dim == 1
assert tsn_head.spatial_type == 'avg'
assert isinstance(tsn_head.dropout, nn.Dropout)
assert tsn_head.dropout.p == tsn_head.dropout_ratio
assert isinstance(tsn_head.fc_cls, nn.Linear)
assert tsn_head.fc_cls.in_features == tsn_head.in_channels
assert tsn_head.fc_cls.out_features == tsn_head.num_classes
assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# Test multi-class recognition
multi_tsn_head = TSNHead(
num_classes=4,
in_channels=2048,
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
multi_class=True,
label_smooth_eps=0.01)
multi_tsn_head.init_weights()
assert multi_tsn_head.num_classes == 4
assert multi_tsn_head.dropout_ratio == 0.4
assert multi_tsn_head.in_channels == 2048
assert multi_tsn_head.init_std == 0.01
assert multi_tsn_head.consensus.dim == 1
assert isinstance(multi_tsn_head.dropout, nn.Dropout)
assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio
assert isinstance(multi_tsn_head.fc_cls, nn.Linear)
assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels
assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes
assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert multi_tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# multi-class tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
def test_tsn_head_audio():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head_audio = AudioTSNHead(num_classes=4, in_channels=5)
tsn_head_audio.init_weights()
assert tsn_head_audio.num_classes == 4
assert tsn_head_audio.dropout_ratio == 0.4
assert tsn_head_audio.in_channels == 5
assert tsn_head_audio.init_std == 0.01
assert tsn_head_audio.spatial_type == 'avg'
assert isinstance(tsn_head_audio.dropout, nn.Dropout)
assert tsn_head_audio.dropout.p == tsn_head_audio.dropout_ratio
assert isinstance(tsn_head_audio.fc_cls, nn.Linear)
assert tsn_head_audio.fc_cls.in_features == tsn_head_audio.in_channels
assert tsn_head_audio.fc_cls.out_features == tsn_head_audio.num_classes
assert isinstance(tsn_head_audio.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head_audio.avg_pool.output_size == (1, 1)
input_shape = (8, 5, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
cls_scores = tsn_head_audio(feat)
assert cls_scores.shape == torch.Size([8, 4])
def test_tsm_head():
"""Test loss method, layer construction, attributes and forward function in
tsm head."""
tsm_head = TSMHead(num_classes=4, in_channels=2048)
tsm_head.init_weights()
assert tsm_head.num_classes == 4
assert tsm_head.dropout_ratio == 0.8
assert tsm_head.in_channels == 2048
assert tsm_head.init_std == 0.001
assert tsm_head.consensus.dim == 1
assert tsm_head.spatial_type == 'avg'
assert isinstance(tsm_head.dropout, nn.Dropout)
assert tsm_head.dropout.p == tsm_head.dropout_ratio
assert isinstance(tsm_head.fc_cls, nn.Linear)
assert tsm_head.fc_cls.in_features == tsm_head.in_channels
assert tsm_head.fc_cls.out_features == tsm_head.num_classes
assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsm_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True)
tsm_head.init_weights()
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([2, 4])
def test_trn_head():
"""Test loss method, layer construction, attributes and forward function in
trn head."""
from mmaction.models.heads.trn_head import (RelationModule,
RelationModuleMultiScale)
trn_head = TRNHead(num_classes=4, in_channels=2048, relation_type='TRN')
trn_head.init_weights()
assert trn_head.num_classes == 4
assert trn_head.dropout_ratio == 0.8
assert trn_head.in_channels == 2048
assert trn_head.init_std == 0.001
assert trn_head.spatial_type == 'avg'
relation_module = trn_head.consensus
assert isinstance(relation_module, RelationModule)
assert relation_module.hidden_dim == 256
assert isinstance(relation_module.classifier[3], nn.Linear)
assert relation_module.classifier[3].out_features == trn_head.num_classes
assert trn_head.dropout.p == trn_head.dropout_ratio
assert isinstance(trn_head.dropout, nn.Dropout)
assert isinstance(trn_head.fc_cls, nn.Linear)
assert trn_head.fc_cls.in_features == trn_head.in_channels
assert trn_head.fc_cls.out_features == trn_head.hidden_dim
assert isinstance(trn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert trn_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='TRNMultiScale')
trn_head.init_weights()
assert isinstance(trn_head.consensus, RelationModuleMultiScale)
assert trn_head.consensus.scales == range(8, 1, -1)
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
with pytest.raises(ValueError):
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='RelationModlue')
@patch.object(mmaction.models.LFBInferHead, '__del__', Mock)
def test_lfb_infer_head():
"""Test layer construction, attributes and forward function in lfb infer
head."""
with tempfile.TemporaryDirectory() as tmpdir:
lfb_infer_head = LFBInferHead(
lfb_prefix_path=tmpdir, use_half_precision=True)
lfb_infer_head.init_weights()
st_feat_shape = (3, 16, 1, 8, 8)
st_feat = generate_backbone_demo_inputs(st_feat_shape)
rois = torch.cat(
(torch.tensor([0, 1, 0]).float().view(3, 1), torch.randn(3, 4)), dim=1)
img_metas = [dict(img_key='video_1,777'), dict(img_key='video_2, 888')]
result = lfb_infer_head(st_feat, rois, img_metas)
assert st_feat.equal(result)
assert len(lfb_infer_head.all_features) == 3
assert lfb_infer_head.all_features[0].shape == (16, 1, 1, 1)
def test_fbo_head():
"""Test layer construction, attributes and forward function in fbo head."""
lfb_prefix_path = osp.normpath(
osp.join(osp.dirname(__file__), '../data/lfb'))
st_feat_shape = (1, 16, 1, 8, 8)
st_feat = generate_backbone_demo_inputs(st_feat_shape)
rois = torch.randn(1, 5)
rois[0][0] = 0
img_metas = [dict(img_key='video_1, 930')]
# non local fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(
type='non_local',
st_feat_channels=16,
lt_feat_channels=16,
latent_channels=8,
num_st_feat=1,
num_lt_feat=5 * 60,
))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 24, 1, 1, 1)
# avg fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='avg'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1)
# max fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='max'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1)
def test_tpn_head():
"""Test loss method, layer construction, attributes and forward function in
tpn head."""
tpn_head = TPNHead(num_classes=4, in_channels=2048)
tpn_head.init_weights()
assert hasattr(tpn_head, 'avg_pool2d')
assert hasattr(tpn_head, 'avg_pool3d')
assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d)
assert tpn_head.avg_pool3d.output_size == (1, 1, 1)
assert tpn_head.avg_pool2d is None
input_shape = (4, 2048, 7, 7)
feat = torch.rand(input_shape)
# tpn head inference with num_segs
num_segs = 2
cls_scores = tpn_head(feat, num_segs)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
# tpn head inference with no num_segs
input_shape = (2, 2048, 3, 7, 7)
feat = torch.rand(input_shape)
cls_scores = tpn_head(feat)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
def test_acrn_head():
roi_feat = torch.randn(4, 16, 1, 7, 7)
feat = torch.randn(2, 16, 1, 16, 16)
rois = torch.Tensor([[0, 2.2268, 0.5926, 10.6142, 8.0029],
[0, 2.2577, 0.1519, 11.6451, 8.9282],
[1, 1.9874, 1.0000, 11.1585, 8.2840],
[1, 3.3338, 3.7166, 8.4174, 11.2785]])
acrn_head = ACRNHead(32, 16)
acrn_head.init_weights()
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 16, 16)
acrn_head = ACRNHead(32, 16, stride=2)
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 8, 8)
acrn_head = ACRNHead(32, 16, stride=2, num_convs=2)
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 8, 8)
| 17,583
| 34.097804
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/test_bmn.py
|
import numpy as np
import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_bmn():
model_cfg = get_localizer_cfg(
'bmn/bmn_400x100_2x8_9e_activitynet_feature.py')
if torch.cuda.is_available():
localizer_bmn = build_localizer(model_cfg.model).cuda()
raw_feature = torch.rand(8, 400, 100).cuda()
gt_bbox = np.array([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100).cuda()
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
else:
localizer_bmn = build_localizer(model_cfg.model)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100)
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
| 1,758
| 30.410714
| 66
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/test_ssn.py
|
import copy
import mmcv
import pytest
import torch
from mmaction.models import build_localizer
def test_ssn_train():
train_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
assigner=dict(
positive_iou_threshold=0.7,
background_iou_threshold=0.01,
incomplete_iou_threshold=0.3,
background_coverage_threshold=0.02,
incomplete_overlap_threshold=0.01),
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1),
debug=False)))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
loss_cls=dict(type='SSNLoss'),
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(
type='STPPTrain',
stpp_stage=(1, 1, 1),
num_segments_list=(2, 5, 2)),
use_regression=True),
train_cfg=train_cfg)
dropout_cfg = copy.deepcopy(base_model_cfg)
dropout_cfg['dropout_ratio'] = 0
dropout_cfg['cls_head']['dropout_ratio'] = 0.5
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
imgs = torch.rand(1, 8, 9, 3, 224, 224)
proposal_scale_factor = torch.Tensor([[[1.0345, 1.0345], [1.0028, 0.0028],
[1.0013, 1.0013], [1.0008, 1.0008],
[0.3357, 1.0006], [1.0006, 1.0006],
[0.0818, 1.0005], [1.0030,
1.0030]]])
proposal_type = torch.Tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
proposal_labels = torch.LongTensor([[8, 8, 8, 8, 8, 8, 8, 0]])
reg_targets = torch.Tensor([[[0.2929, 0.2694], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_dropout = build_localizer(dropout_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_dropout = localizer_ssn_dropout.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
imgs = imgs.cuda()
proposal_scale_factor = proposal_scale_factor.cuda()
proposal_type = proposal_type.cuda()
proposal_labels = proposal_labels.cuda()
reg_targets = reg_targets.cuda()
# Train normal case
losses = localizer_ssn(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN without dropout in model, with dropout in head
losses = localizer_ssn_dropout(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN model without regression
losses = localizer_ssn_non_regression(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
def test_ssn_test():
test_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=2000,
nms=0.2,
softmax_before_filter=True,
cls_score_dict=None,
cls_top_k=2))))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)),
use_regression=True),
test_cfg=test_cfg)
maxpool_model_cfg = copy.deepcopy(base_model_cfg)
maxpool_model_cfg['spatial_type'] = 'max'
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
non_regression_cfg['cls_head']['consensus']['use_regression'] = False
tuple_stage_cfg = copy.deepcopy(base_model_cfg)
tuple_stage_cfg['cls_head']['consensus']['stpp_stage'] = (1, (1, 2), 1)
str_stage_cfg = copy.deepcopy(base_model_cfg)
str_stage_cfg['cls_head']['consensus']['stpp_stage'] = ('error', )
imgs = torch.rand(1, 8, 3, 224, 224)
relative_proposal_list = torch.Tensor([[[0.2500, 0.6250], [0.3750,
0.7500]]])
scale_factor_list = torch.Tensor([[[1.0000, 1.0000], [1.0000, 0.2661]]])
proposal_tick_list = torch.LongTensor([[[1, 2, 5, 7], [20, 30, 60, 80]]])
reg_norm_consts = torch.Tensor([[[-0.0603, 0.0325], [0.0752, 0.1596]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_maxpool = build_localizer(maxpool_model_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
localizer_ssn_tuple_stage_cfg = build_localizer(tuple_stage_cfg)
with pytest.raises(ValueError):
build_localizer(str_stage_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_maxpool = localizer_ssn_maxpool.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
localizer_ssn_tuple_stage_cfg = localizer_ssn_tuple_stage_cfg.cuda()
imgs = imgs.cuda()
relative_proposal_list = relative_proposal_list.cuda()
scale_factor_list = scale_factor_list.cuda()
proposal_tick_list = proposal_tick_list.cuda()
reg_norm_consts = reg_norm_consts.cuda()
with torch.no_grad():
# Test normal case
localizer_ssn(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with max spatial pooling
localizer_ssn_maxpool(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model without regression
localizer_ssn_non_regression(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with tuple stage cfg.
localizer_ssn_tuple_stage_cfg(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
| 7,989
| 38.359606
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/test_pem.py
|
import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_pem():
model_cfg = get_localizer_cfg(
'bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py')
localizer_pem = build_localizer(model_cfg.model)
bsp_feature = torch.rand(8, 100, 32)
reference_temporal_iou = torch.rand(8, 100)
losses = localizer_pem(bsp_feature, reference_temporal_iou)
assert isinstance(losses, dict)
# Test forward test
tmin = torch.rand(100)
tmax = torch.rand(100)
tmin_score = torch.rand(100)
tmax_score = torch.rand(100)
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [0.3, 0.6],
'label': 'Rock climbing'
}],
feature_frame=900)
]
with torch.no_grad():
for one_bsp_feature in bsp_feature:
one_bsp_feature = one_bsp_feature.reshape(1, 100, 32)
localizer_pem(
one_bsp_feature,
tmin=tmin,
tmax=tmax,
tmin_score=tmin_score,
tmax_score=tmax_score,
video_meta=video_meta,
return_loss=False)
| 1,294
| 27.777778
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/test_localizers.py
|
import numpy as np
from mmaction.models.localizers.utils import post_processing
def test_post_processing():
# test with multiple results
result = np.array([[0., 1., 1., 1., 0.5, 0.5], [0., 0.4, 1., 1., 0.4, 0.4],
[0., 0.95, 1., 1., 0.6, 0.6]])
video_info = dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 2, 16)
assert isinstance(proposal_list[0], dict)
assert proposal_list[0]['score'] == 0.6
assert proposal_list[0]['segment'] == [0., 95.0]
assert isinstance(proposal_list[1], dict)
assert proposal_list[1]['score'] == 0.4
assert proposal_list[1]['segment'] == [0., 40.0]
# test with only result
result = np.array([[0., 1., 1., 1., 0.5, 0.5]])
video_info = dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 1, 16)
assert isinstance(proposal_list[0], dict)
assert proposal_list[0]['score'] == 0.5
assert proposal_list[0]['segment'] == [0., 100.0]
| 1,221
| 34.941176
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_localizers/test_tem.py
|
import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_tem():
model_cfg = get_localizer_cfg(
'bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py')
localizer_tem = build_localizer(model_cfg.model)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[1.0, 3.0], [3.0, 5.0]]] * 8)
losses = localizer_tem(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [{'video_name': 'v_test'}]
with torch.no_grad():
for one_raw_feature in raw_feature:
one_raw_feature = one_raw_feature.reshape(1, 400, 100)
localizer_tem(
one_raw_feature, video_meta=video_meta, return_loss=False)
| 759
| 30.666667
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_recognizers/test_recognizer2d.py
|
import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
mmcls_backbone = dict(
type='mmcls.ResNeXt',
depth=101,
num_stages=4,
out_indices=(3, ),
groups=32,
width_per_group=4,
style='pytorch')
config.model['backbone'] = mmcls_backbone
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test mixup forward
config = get_recognizer_cfg(
'tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (2, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# test torchvision backbones
tv_backbone = dict(type='torchvision.densenet161', pretrained=True)
config.model['backbone'] = tv_backbone
config.model['cls_head']['in_channels'] = 2208
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test timm backbones
timm_backbone = dict(type='timm.efficientnet_b0', pretrained=False)
config.model['backbone'] = timm_backbone
config.model['cls_head']['in_channels'] = 1280
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_trn():
config = get_recognizer_cfg('trn/trn_r50_1x1x8_50e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 224, 224)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
assert 'loss_aux' in losses and 'loss_cls' in losses
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_tanet():
config = get_recognizer_cfg(
'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| 8,541
| 29.29078
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_recognizers/test_recognizer3d.py
|
import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_i3d():
config = get_recognizer_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
def test_r2plus1d():
config = get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_slowfast():
config = get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 16, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test the feature max_testing_views
config.model.test_cfg['max_testing_views'] = 1
recognizer = build_recognizer(config.model)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_csn():
config = get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg(
'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 1, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test dummy forward
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_c3d():
config = get_recognizer_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 16, 112, 112)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| 9,327
| 31.84507
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_recognizers/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_recognizers/test_audio_recognizer.py
|
import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_audio_recognizer_cfg
def test_audio_recognizer():
config = get_audio_recognizer_cfg(
'resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 1, 128, 80)
demo_inputs = generate_recognizer_demo_inputs(
input_shape, model_type='audio')
audios = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(audios, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
audio_list = [audio[None, :] for audio in audios]
for one_spectro in audio_list:
recognizer(one_spectro, None, return_loss=False)
| 866
| 28.896552
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/test_resnet.py
|
import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import ResNet
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrain must be a str
resnet50 = ResNet(50, pretrained=0)
resnet50.init_weights()
with pytest.raises(AssertionError):
# style must be in ['pytorch', 'caffe']
ResNet(18, style='tensorflow')
with pytest.raises(AssertionError):
# assert not with_cp
ResNet(18, with_cp=True)
# resnet with depth 18, norm_eval False, initial weights
resnet18 = ResNet(18)
resnet18.init_weights()
# resnet with depth 50, norm_eval True
resnet50 = ResNet(50, norm_eval=True)
resnet50.init_weights()
resnet50.train()
assert check_norm_state(resnet50.modules(), False)
# resnet with depth 50, norm_eval True, pretrained
resnet50_pretrain = ResNet(
pretrained='torchvision://resnet50', depth=50, norm_eval=True)
resnet50_pretrain.init_weights()
resnet50_pretrain.train()
assert check_norm_state(resnet50_pretrain.modules(), False)
# resnet with depth 50, norm_eval True, frozen_stages 1
frozen_stages = 1
resnet50_frozen = ResNet(50, frozen_stages=frozen_stages)
resnet50_frozen.init_weights()
resnet50_frozen.train()
assert resnet50_frozen.conv1.bn.training is False
for layer in resnet50_frozen.conv1.modules():
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# resnet with depth 50, partial batchnorm
resnet_pbn = ResNet(50, partial_bn=True)
resnet_pbn.train()
count_bn = 0
for m in resnet_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
assert m.training is False
else:
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
assert m.training is True
input_shape = (1, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# resnet with depth 18 inference
resnet18 = ResNet(18, norm_eval=False)
resnet18.init_weights()
resnet18.train()
feat = resnet18(imgs)
assert feat.shape == torch.Size([1, 512, 2, 2])
# resnet with depth 50 inference
resnet50 = ResNet(50, norm_eval=False)
resnet50.init_weights()
resnet50.train()
feat = resnet50(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
# resnet with depth 50 in caffe style inference
resnet50_caffe = ResNet(50, style='caffe', norm_eval=False)
resnet50_caffe.init_weights()
resnet50_caffe.train()
feat = resnet50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
resnet50_flow = ResNet(
depth=50, pretrained='torchvision://resnet50', in_channels=10)
input_shape = (1, 10, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet50_flow(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
resnet50 = ResNet(
depth=50, pretrained='torchvision://resnet50', in_channels=3)
input_shape = (1, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet50(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
| 4,319
| 32.75
| 70
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/test_mobilenet_v2.py
|
import pytest
import torch
from mmcv.utils import _BatchNorm
from mmaction.models import MobileNetV2
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_mobilenetv2_backbone():
"""Test MobileNetV2.
Modified from mmclassification.
"""
from torch.nn.modules import GroupNorm
from mmaction.models.backbones.mobilenet_v2 import InvertedResidual
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False
with pytest.raises(TypeError):
# pretrained must be a string path
model = MobileNetV2(pretrained=0)
model.init_weights()
with pytest.raises(ValueError):
# frozen_stages must in range(1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# tout_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
input_shape = (1, 3, 224, 224)
imgs = generate_backbone_demo_inputs(input_shape)
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0, pretrained
model = MobileNetV2(
widen_factor=1.0,
out_indices=range(0, 8),
pretrained='mmcls://mobilenet_v2')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0)
model.init_weights()
model.train()
feat = model(imgs)
assert feat.shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with out_indices=None
model = MobileNetV2(widen_factor=1.0)
model.init_weights()
model.train()
feat = model(imgs)
assert feat.shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 7,014
| 33.219512
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/test_resnet3d.py
|
import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import ResNet3d, ResNet3dLayer
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_resnet3d_backbone():
"""Test resnet3d backbone."""
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=5)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
50,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
34,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_34 = ResNet3d(34, ['resnet', 'bninception'])
resnet3d_34.init_weights()
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_50 = ResNet3d(50, ['resnet', 'bninception'])
resnet3d_50.init_weights()
# resnet3d with depth 34, no pretrained, norm_eval True
resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True)
resnet3d_34.init_weights()
resnet3d_34.train()
assert check_norm_state(resnet3d_34.modules(), False)
# resnet3d with depth 50, no pretrained, norm_eval True
resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True)
resnet3d_50.init_weights()
resnet3d_50.train()
assert check_norm_state(resnet3d_50.modules(), False)
# resnet3d with depth 50, pretrained2d, norm_eval True
resnet3d_50_pretrain = ResNet3d(
50, 'torchvision://resnet50', norm_eval=True)
resnet3d_50_pretrain.init_weights()
resnet3d_50_pretrain.train()
assert check_norm_state(resnet3d_50_pretrain.modules(), False)
from mmcv.runner import _load_checkpoint
chkp_2d = _load_checkpoint('torchvision://resnet50')
for name, module in resnet3d_50_pretrain.named_modules():
if len(name.split('.')) == 4:
# layer.block.module.submodule
prefix = name.split('.')[:2]
module_type = name.split('.')[2]
submodule_type = name.split('.')[3]
if module_type == 'downsample':
name2d = name.replace('conv', '0').replace('bn', '1')
else:
layer_id = name.split('.')[2][-1]
name2d = prefix[0] + '.' + prefix[1] + '.' + \
submodule_type + layer_id
if isinstance(module, nn.Conv3d):
conv2d_weight = chkp_2d[name2d + '.weight']
conv3d_weight = getattr(module, 'weight').data
assert torch.equal(
conv3d_weight,
conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) /
conv3d_weight.shape[2])
if getattr(module, 'bias') is not None:
conv2d_bias = chkp_2d[name2d + '.bias']
conv3d_bias = getattr(module, 'bias').data
assert torch.equal(conv2d_bias, conv3d_bias)
elif isinstance(module, nn.BatchNorm3d):
for pname in ['weight', 'bias', 'running_mean', 'running_var']:
param_2d = chkp_2d[name2d + '.' + pname]
param_3d = getattr(module, pname).data
assert torch.equal(param_2d, param_3d)
conv3d = resnet3d_50_pretrain.conv1.conv
assert torch.equal(
conv3d.weight,
chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) /
conv3d.weight.shape[2])
conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv
assert torch.equal(
conv3d.weight, chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as(
conv3d.weight) / conv3d.weight.shape[2])
# resnet3d with depth 34, no pretrained, norm_eval False
resnet3d_34_no_bn_eval = ResNet3d(
34, None, pretrained2d=False, norm_eval=False)
resnet3d_34_no_bn_eval.init_weights()
resnet3d_34_no_bn_eval.train()
assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True)
# resnet3d with depth 50, no pretrained, norm_eval False
resnet3d_50_no_bn_eval = ResNet3d(
50, None, pretrained2d=False, norm_eval=False)
resnet3d_50_no_bn_eval.init_weights()
resnet3d_50_no_bn_eval.train()
assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True)
# resnet3d with depth 34, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_34_frozen = ResNet3d(
34, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_34_frozen.init_weights()
resnet3d_34_frozen.train()
assert resnet3d_34_frozen.conv1.bn.training is False
for param in resnet3d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_34_frozen.modules():
if hasattr(m, 'conv2'):
assert torch.equal(m.conv2.bn.weight,
torch.zeros_like(m.conv2.bn.weight))
assert torch.equal(m.conv2.bn.bias,
torch.zeros_like(m.conv2.bn.bias))
# resnet3d with depth 50, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_50_frozen = ResNet3d(
50, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_50_frozen.init_weights()
resnet3d_50_frozen.train()
assert resnet3d_50_frozen.conv1.bn.training is False
for param in resnet3d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_50_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# resnet3d frozen with depth 34 inference
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_frozen = resnet3d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with depth 50 inference
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_frozen = resnet3d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
# resnet3d with depth 50 in caffe style inference
resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe')
resnet3d_50_caffe.init_weights()
resnet3d_50_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_caffe = resnet3d_50_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
# resnet3d with depth 34 in caffe style inference
resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe')
resnet3d_34_caffe.init_weights()
resnet3d_34_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_caffe = resnet3d_34_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_caffe(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with depth with 3x3x3 inflate_style inference
resnet3d_50_1x1x1 = ResNet3d(
50, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_50_1x1x1.init_weights()
resnet3d_50_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_1x1x1(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
resnet3d_34_1x1x1 = ResNet3d(
34, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_34_1x1x1.init_weights()
resnet3d_34_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_1x1x1(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet3d_nonlocal = ResNet3d(
50,
None,
pretrained2d=False,
non_local=non_local,
non_local_cfg=non_local_cfg)
resnet3d_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet3d_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert hasattr(layer[i], 'non_local_block')
feat = resnet3d_nonlocal(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
def test_resnet3d_layer():
with pytest.raises(AssertionError):
ResNet3dLayer(22, None)
with pytest.raises(AssertionError):
ResNet3dLayer(50, None, stage=4)
res_layer = ResNet3dLayer(50, None, stage=3, norm_eval=True)
res_layer.init_weights()
res_layer.train()
input_shape = (1, 1024, 1, 4, 4)
imgs = generate_backbone_demo_inputs(input_shape)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = res_layer(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
res_layer = ResNet3dLayer(
50, 'torchvision://resnet50', stage=3, all_frozen=True)
res_layer.init_weights()
res_layer.train()
imgs = generate_backbone_demo_inputs(input_shape)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = res_layer(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
| 13,070
| 38.01791
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/test_base_head.py
|
import torch
import torch.nn.functional as F
from mmcv.utils import assert_dict_has_keys
from mmaction.models import BaseHead
class ExampleHead(BaseHead):
# use an ExampleHead to test BaseHead
def init_weights(self):
pass
def forward(self, x):
pass
def test_base_head():
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss'))
cls_scores = torch.rand((3, 4))
# When truth is non-empty then cls loss should be nonzero for random inputs
gt_labels = torch.LongTensor([2] * 3).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss', loss_weight=2.0))
cls_scores = torch.rand((3, 4))
# When truth is non-empty then cls loss should be nonzero for random inputs
gt_labels = torch.LongTensor([2] * 3).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert_dict_has_keys(losses, ['loss_cls'])
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# Test Soft label with batch size > 1
cls_scores = torch.rand((3, 3))
gt_labels = torch.LongTensor([[2] * 3])
gt_one_hot_labels = F.one_hot(gt_labels, num_classes=3).squeeze()
losses = head.loss(cls_scores, gt_one_hot_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# Test Soft label with batch size = 1
cls_scores = torch.rand((1, 3))
gt_labels = torch.LongTensor([2])
gt_one_hot_labels = F.one_hot(gt_labels, num_classes=3).squeeze()
losses = head.loss(cls_scores, gt_one_hot_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# test multi-class & label smoothing
head = ExampleHead(
3,
400,
dict(type='BCELossWithLogits'),
multi_class=True,
label_smooth_eps=0.1)
# batch size > 1
cls_scores = torch.rand((2, 3))
gt_labels = torch.LongTensor([[1, 0, 1], [0, 1, 0]]).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# batch size = 1
cls_scores = torch.rand((1, 3))
gt_labels = torch.LongTensor([[1, 0, 1]]).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
| 2,538
| 33.780822
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_common_modules/test_base_recognizers.py
|
import pytest
import torch
import torch.nn.functional as F
from mmaction.models import BaseRecognizer
class ExampleRecognizer(BaseRecognizer):
def __init__(self, train_cfg, test_cfg):
super(BaseRecognizer, self).__init__()
# reconstruct `__init__()` method in BaseRecognizer to avoid building
# backbone and head which are useless to ExampleRecognizer,
# since ExampleRecognizer is only used for model-unrelated methods
# (like `average_clip`) testing.
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_train(self, imgs, labels):
pass
def forward_test(self, imgs):
pass
def forward_gradcam(self, imgs):
pass
def test_base_recognizer():
cls_score = torch.rand(5, 400)
with pytest.raises(KeyError):
# "average_clips" must defined in test_cfg keys
wrong_test_cfg = dict(clip='score')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# unsupported average clips type
wrong_test_cfg = dict(average_clips='softmax')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# Label should not be None
recognizer = ExampleRecognizer(None, None)
recognizer(torch.tensor(0))
# average_clips=None
test_cfg = dict(average_clips=None)
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score)
# average_clips='score'
test_cfg = dict(average_clips='score')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score.mean(dim=0, keepdim=True))
# average_clips='prob'
test_cfg = dict(average_clips='prob')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score,
F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True))
| 2,178
| 32.015152
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_detectors/test_detectors.py
|
import pytest
import torch
from ..base import generate_detector_demo_inputs, get_detector_cfg
try:
from mmaction.models import build_detector
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
@pytest.mark.skipif(not mmdet_imported, reason='requires mmdet')
def test_ava_detector():
config = get_detector_cfg('ava/slowonly_kinetics_pretrained_r50_'
'4x16x1_20e_ava_rgb.py')
detector = build_detector(config.model)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
train_demo_inputs = generate_detector_demo_inputs(
train=True, device='cuda')
test_demo_inputs = generate_detector_demo_inputs(
train=False, device='cuda')
detector = detector.cuda()
losses = detector(**train_demo_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
_ = detector(**test_demo_inputs, return_loss=False)
else:
train_demo_inputs = generate_detector_demo_inputs(train=True)
test_demo_inputs = generate_detector_demo_inputs(train=False)
losses = detector(**train_demo_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
_ = detector(**test_demo_inputs, return_loss=False)
| 1,425
| 32.952381
| 69
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_models/test_detectors/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_formating.py
|
import numpy as np
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (Collect, FormatAudioShape,
FormatShape, ImageToTensor, Rename,
ToDataContainer, ToTensor, Transpose)
def test_rename():
org_name = 'a'
new_name = 'b'
mapping = {org_name: new_name}
rename = Rename(mapping)
results = dict(a=2)
results = rename(results)
assert results['b'] == 2
assert 'a' not in results
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
# str cannot be converted to tensor
results = dict(str='0')
to_tensor(results)
# convert tensor, numpy, squence, int, float to tensor
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
# Add an additional key which is not in keys.
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert repr(to_tensor) == to_tensor.__class__.__name__ + \
f'(keys={target_keys})'
def test_to_data_container():
# check user-defined fields
fields = (dict(key='key1', stack=True), dict(key='key2'))
to_data_container = ToDataContainer(fields=fields)
target_keys = ['key1', 'key2']
original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b'])
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
# Add an additional key which is not in keys.
original_results = dict(
key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3')
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
assert repr(to_data_container) == (
to_data_container.__class__.__name__ + f'(fields={fields})')
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert results['imgs'].shape == torch.Size([3, 256, 256])
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \
f'(keys={keys})'
def test_transpose():
results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
order = [2, 0, 1]
transpose = Transpose(keys, order)
results = transpose(results)
assert results['imgs'].shape == (3, 256, 256)
assert repr(transpose) == transpose.__class__.__name__ + \
f'(keys={keys}, order={order})'
def test_collect():
inputs = dict(
imgs=np.random.randn(256, 256, 3),
label=[1],
filename='test.txt',
original_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['imgs', 'label']
collect = Collect(keys)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
imgs = inputs.pop('imgs')
assert set(results['img_metas'].data) == set(inputs)
for key in results['img_metas'].data:
assert results['img_metas'].data[key] == inputs[key]
assert repr(collect) == collect.__class__.__name__ + \
(f'(keys={keys}, meta_keys={collect.meta_keys}, '
f'nested={collect.nested})')
inputs['imgs'] = imgs
collect = Collect(keys, nested=True)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
for k in results:
assert isinstance(results[k], list)
def test_format_shape():
with pytest.raises(ValueError):
# invalid input format
FormatShape('NHWC')
# 'NCHW' input format
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCHW')
assert format_shape(results)['input_shape'] == (3, 3, 224, 224)
# `NCTHW` input format with num_clips=1, clip_len=3
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCTHW')
assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224)
# `NCTHW` input format with num_clips=2, clip_len=3
results = dict(
imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3)
assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224)
target_keys = ['imgs', 'input_shape']
assert assert_dict_has_keys(results, target_keys)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTHW')"
# 'NPTCHW' input format
results = dict(
imgs=np.random.randn(72, 224, 224, 3),
num_clips=9,
clip_len=1,
num_proposals=8)
format_shape = FormatShape('NPTCHW')
assert format_shape(results)['input_shape'] == (8, 9, 3, 224, 224)
def test_format_audio_shape():
with pytest.raises(ValueError):
# invalid input format
FormatAudioShape('XXXX')
# 'NCTF' input format
results = dict(audios=np.random.randn(3, 128, 8))
format_shape = FormatAudioShape('NCTF')
assert format_shape(results)['input_shape'] == (3, 1, 128, 8)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTF')"
| 6,791
| 33.830769
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_sampler.py
|
from torch.utils.data import DataLoader, Dataset
from mmaction.datasets.samplers import (ClassSpecificDistributedSampler,
DistributedSampler)
class MyDataset(Dataset):
def __init__(self, class_prob={i: 1 for i in range(10)}):
super().__init__()
self.class_prob = class_prob
self.video_infos = [
dict(data=idx, label=idx % 10) for idx in range(100)
]
def __len__(self):
return len(self.video_infos)
def __getitem__(self, idx):
return self.video_infos[idx]
def test_distributed_sampler():
dataset = MyDataset()
sampler = DistributedSampler(dataset, num_replicas=1, rank=0)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for _, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 25
assert sum([len(x['data']) for x in batches]) == 100
sampler = DistributedSampler(dataset, num_replicas=4, rank=2)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 7
assert sum([len(x['data']) for x in batches]) == 25
sampler = DistributedSampler(dataset, num_replicas=6, rank=3)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 5
assert sum([len(x['data']) for x in batches]) == 17
def test_class_specific_distributed_sampler():
class_prob = dict(zip(list(range(10)), [1] * 5 + [3] * 5))
dataset = MyDataset(class_prob=class_prob)
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=1, rank=0, dynamic_length=True)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for _, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 50
assert sum([len(x['data']) for x in batches]) == 200
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=1, rank=0, dynamic_length=False)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 25
assert sum([len(x['data']) for x in batches]) == 100
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=6, rank=2, dynamic_length=True)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 9
assert sum([len(x['data']) for x in batches]) == 34
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=6, rank=2, dynamic_length=False)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 5
assert sum([len(x['data']) for x in batches]) == 17
| 3,148
| 31.802083
| 72
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_blending.py
|
import torch
from mmaction.datasets import CutmixBlending, MixupBlending
def test_mixup():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = MixupBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
def test_cutmix():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = CutmixBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
| 1,306
| 30.119048
| 63
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_compose.py
|
import numpy as np
import pytest
from mmcv.utils import assert_keys_equal
from mmaction.datasets.pipelines import Compose, ImageToTensor
def test_compose():
with pytest.raises(TypeError):
# transform must be callable or a dict
Compose('LoadImage')
target_keys = ['img', 'img_metas']
# test Compose given a data pipeline
img = np.random.randn(256, 256, 3)
results = dict(img=img, abandoned_key=None, img_name='test_image.png')
test_pipeline = [
dict(type='Collect', keys=['img'], meta_keys=['img_name']),
dict(type='ImageToTensor', keys=['img'])
]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert assert_keys_equal(compose_results.keys(), target_keys)
assert assert_keys_equal(compose_results['img_metas'].data.keys(),
['img_name'])
# test Compose when forward data is None
results = None
image_to_tensor = ImageToTensor(keys=[])
test_pipeline = [image_to_tensor]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert compose_results is None
assert repr(compose) == compose.__class__.__name__ + \
f'(\n {image_to_tensor}\n)'
| 1,226
| 31.289474
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_video_dataset.py
|
import os.path as osp
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets import VideoDataset
from .base import BaseTestDataset
class TestVideoDataset(BaseTestDataset):
def test_video_dataset(self):
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
start_index=3)
assert len(video_dataset) == 2
assert video_dataset.start_index == 3
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix)
video_infos = video_dataset.video_infos
video_filename = osp.join(self.data_prefix, 'test.mp4')
assert video_infos == [dict(filename=video_filename, label=0)] * 2
assert video_dataset.start_index == 0
def test_video_dataset_multi_label(self):
video_dataset = VideoDataset(
self.video_ann_file_multi_label,
self.video_pipeline,
data_prefix=self.data_prefix,
multi_class=True,
num_classes=100)
video_infos = video_dataset.video_infos
video_filename = osp.join(self.data_prefix, 'test.mp4')
label0 = [0, 3]
label1 = [0, 2, 4]
labels = [label0, label1]
for info, label in zip(video_infos, labels):
print(info, video_filename)
assert info['filename'] == video_filename
assert set(info['label']) == set(label)
assert video_dataset.start_index == 0
def test_video_pipeline(self):
target_keys = ['filename', 'label', 'start_index', 'modality']
# VideoDataset not in test mode
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = video_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# VideoDataset in test mode
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = video_dataset[0]
assert assert_dict_has_keys(result, target_keys)
def test_video_evaluate(self):
video_dataset = VideoDataset(
self.video_ann_file,
self.video_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
video_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
video_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
video_dataset.evaluate(
[0] * len(video_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
video_dataset.evaluate([0] * len(video_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = video_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
| 3,490
| 33.91
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/base.py
|
import os.path as osp
from mmcv import ConfigDict
class BaseTestDataset:
@classmethod
def setup_class(cls):
# prefix path
cls.data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../../data'))
cls.ann_file_prefix = osp.join(cls.data_prefix, 'annotations')
# annotations path
cls.action_ann_file = osp.join(cls.ann_file_prefix,
'action_test_anno.json')
cls.audio_feature_ann_file = osp.join(cls.ann_file_prefix,
'audio_feature_test_list.txt')
cls.audio_ann_file = osp.join(cls.ann_file_prefix,
'audio_test_list.txt')
cls.frame_ann_file_multi_label = osp.join(
cls.ann_file_prefix, 'rawframe_test_list_multi_label.txt')
cls.frame_ann_file_with_offset = osp.join(
cls.ann_file_prefix, 'rawframe_test_list_with_offset.txt')
cls.frame_ann_file = osp.join(cls.ann_file_prefix,
'rawframe_test_list.txt')
cls.hvu_frame_ann_file = osp.join(cls.ann_file_prefix,
'hvu_frame_test_anno.json')
cls.hvu_video_ann_file = osp.join(cls.ann_file_prefix,
'hvu_video_test_anno.json')
cls.hvu_video_eval_ann_file = osp.join(
cls.ann_file_prefix, 'hvu_video_eval_test_anno.json')
cls.proposal_ann_file = osp.join(cls.ann_file_prefix,
'proposal_test_list.txt')
cls.proposal_norm_ann_file = osp.join(cls.ann_file_prefix,
'proposal_normalized_list.txt')
cls.rawvideo_test_anno_json = osp.join(cls.ann_file_prefix,
'rawvideo_test_anno.json')
cls.rawvideo_test_anno_txt = osp.join(cls.ann_file_prefix,
'rawvideo_test_anno.txt')
cls.video_ann_file = osp.join(cls.ann_file_prefix,
'video_test_list.txt')
cls.video_ann_file_multi_label = osp.join(
cls.ann_file_prefix, 'video_test_list_multi_label.txt')
cls.pose_ann_file = osp.join(cls.ann_file_prefix, 'sample.pkl')
# pipeline configuration
cls.action_pipeline = []
cls.audio_feature_pipeline = [
dict(type='LoadAudioFeature'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='AudioFeatureSelector')
]
cls.audio_pipeline = [
dict(type='AudioDecodeInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='AudioDecode')
]
cls.frame_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.proposal_pipeline = [
dict(
type='SampleProposalFrames',
clip_len=1,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=0.5),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.proposal_test_pipeline = [
dict(
type='SampleProposalFrames',
clip_len=1,
body_segments=5,
aug_segments=(2, 2),
aug_ratio=0.5,
mode='test'),
dict(type='RawFrameDecode', io_backend='disk')
]
cls.proposal_train_cfg = ConfigDict(
dict(
ssn=dict(
assigner=dict(
positive_iou_threshold=0.7,
background_iou_threshold=0.01,
incomplete_iou_threshold=0.5,
background_coverage_threshold=0.02,
incomplete_overlap_threshold=0.01),
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(
comp_loss_weight=0.1, reg_loss_weight=0.1),
debug=False)))
cls.proposal_test_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=2000,
nms=0.2,
softmax_before_filter=True,
cls_top_k=2))))
cls.proposal_test_cfg_topall = ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=-1,
nms=0.2,
softmax_before_filter=True,
cls_top_k=2))))
cls.rawvideo_pipeline = []
cls.video_pipeline = [
dict(type='OpenCVInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1),
dict(type='OpenCVDecode')
]
cls.hvu_categories = [
'action', 'attribute', 'concept', 'event', 'object', 'scene'
]
cls.hvu_category_nums = [739, 117, 291, 69, 1679, 248]
cls.hvu_categories_for_eval = ['action', 'scene', 'object']
cls.hvu_category_nums_for_eval = [3, 3, 3]
cls.filename_tmpl = 'img_{:05d}.jpg'
| 5,987
| 38.92
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_audio_visual_dataset.py
|
import os.path as osp
from mmaction.datasets import AudioVisualDataset
from .base import BaseTestDataset
class TestAudioVisualDataset(BaseTestDataset):
def test_audio_visual_dataset(self):
test_dataset = AudioVisualDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
video_prefix=self.data_prefix,
data_prefix=self.data_prefix)
video_infos = test_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'imgs')
audio_path = osp.join(self.data_prefix, 'imgs.npy')
filename = osp.join(self.data_prefix, 'imgs.mp4')
assert video_infos == [
dict(
frame_dir=frame_dir,
audio_path=audio_path,
filename=filename,
total_frames=5,
label=127)
] * 2
assert test_dataset.start_index == 1
| 920
| 30.758621
| 59
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_rawframe_dataset.py
|
import os.path as osp
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets import RawframeDataset
from .base import BaseTestDataset
class TestRawframDataset(BaseTestDataset):
def test_rawframe_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'imgs')
assert rawframe_infos == [
dict(frame_dir=frame_dir, total_frames=5, label=127)
] * 2
assert rawframe_dataset.start_index == 1
def test_rawframe_dataset_with_offset(self):
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'imgs')
assert rawframe_infos == [
dict(frame_dir=frame_dir, offset=2, total_frames=5, label=127)
] * 2
assert rawframe_dataset.start_index == 1
def test_rawframe_dataset_multi_label(self):
rawframe_dataset = RawframeDataset(
self.frame_ann_file_multi_label,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=100)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'imgs')
label0 = [1]
label1 = [3, 5]
labels = [label0, label1]
for info, label in zip(rawframe_infos, labels):
assert info['frame_dir'] == frame_dir
assert info['total_frames'] == 5
assert set(info['label']) == set(label)
assert rawframe_dataset.start_index == 1
def test_dataset_realpath(self):
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,
'.')
assert dataset.data_prefix == osp.realpath('.')
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,
's3://good')
assert dataset.data_prefix == 's3://good'
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline)
assert dataset.data_prefix is None
assert dataset.video_infos[0]['frame_dir'] == 'imgs'
def test_rawframe_pipeline(self):
target_keys = [
'frame_dir', 'total_frames', 'label', 'filename_tmpl',
'start_index', 'modality'
]
# RawframeDataset not in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
test_mode=False)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# RawframeDataset in multi-class tasks
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=400,
test_mode=False)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# RawframeDataset with offset
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True,
num_classes=400,
test_mode=False)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys + ['offset'])
# RawframeDataset in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
test_mode=True)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# RawframeDataset in multi-class tasks in test mode
rawframe_dataset = RawframeDataset(
self.frame_ann_file,
self.frame_pipeline,
self.data_prefix,
multi_class=True,
num_classes=400,
test_mode=True)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# RawframeDataset with offset
rawframe_dataset = RawframeDataset(
self.frame_ann_file_with_offset,
self.frame_pipeline,
self.data_prefix,
with_offset=True,
num_classes=400,
test_mode=True)
result = rawframe_dataset[0]
assert assert_dict_has_keys(result, target_keys + ['offset'])
def test_rawframe_evaluate(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
rawframe_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
rawframe_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
rawframe_dataset.evaluate(
[0] * len(rawframe_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
rawframe_dataset.evaluate(
[0] * len(rawframe_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = rawframe_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
| 6,071
| 35.8
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_audio_feature_dataset.py
|
import os.path as osp
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets import AudioFeatureDataset
from .base import BaseTestDataset
class TestAudioFeatureDataset(BaseTestDataset):
def test_audio_feature_dataset(self):
audio_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix)
audio_infos = audio_dataset.video_infos
feature_path = osp.join(self.data_prefix, 'test.npy')
assert audio_infos == [
dict(audio_path=feature_path, total_frames=100, label=127)
] * 2
def test_audio_feature_pipeline(self):
target_keys = [
'audio_path', 'label', 'start_index', 'modality', 'audios',
'total_frames'
]
# Audio feature dataset not in test mode
audio_feature_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = audio_feature_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# Audio dataset in test mode
audio_feature_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = audio_feature_dataset[0]
assert assert_dict_has_keys(result, target_keys)
def test_audio_feature_evaluate(self):
audio_dataset = AudioFeatureDataset(
self.audio_feature_ann_file,
self.audio_feature_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
audio_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
audio_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
audio_dataset.evaluate(
[0] * len(audio_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = audio_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
| 2,753
| 34.307692
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_pose_dataset.py
|
import numpy as np
import pytest
from mmaction.datasets import PoseDataset
from .base import BaseTestDataset
class TestPoseDataset(BaseTestDataset):
def test_pose_dataset(self):
ann_file = self.pose_ann_file
data_prefix = 'root'
dataset = PoseDataset(
ann_file=ann_file,
pipeline=[],
box_thr='0.5',
data_prefix=data_prefix)
assert len(dataset) == 100
item = dataset[0]
assert item['filename'].startswith(data_prefix)
dataset = PoseDataset(
ann_file=ann_file,
pipeline=[],
valid_ratio=0.2,
box_thr='0.9',
data_prefix=data_prefix)
assert len(dataset) == 84
for item in dataset:
assert item['filename'].startswith(data_prefix)
assert np.all(item['box_score'][item['anno_inds']] >= 0.9)
assert item['valid@0.9'] / item['total_frames'] >= 0.2
dataset = PoseDataset(
ann_file=ann_file,
pipeline=[],
valid_ratio=0.3,
box_thr='0.7',
data_prefix=data_prefix)
assert len(dataset) == 87
for item in dataset:
assert item['filename'].startswith(data_prefix)
assert np.all(item['box_score'][item['anno_inds']] >= 0.7)
assert item['valid@0.7'] / item['total_frames'] >= 0.3
class_prob = {i: 1 for i in range(400)}
dataset = PoseDataset(
ann_file=ann_file,
pipeline=[],
valid_ratio=0.3,
box_thr='0.7',
data_prefix=data_prefix,
class_prob=class_prob)
with pytest.raises(AssertionError):
dataset = PoseDataset(
ann_file=ann_file,
pipeline=[],
valid_ratio=0.2,
box_thr='0.55',
data_prefix=data_prefix)
| 1,919
| 29.967742
| 70
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_ssn_dataset.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets import SSNDataset
from .base import BaseTestDataset
class TestSSNDataset(BaseTestDataset):
def test_proposal_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals',
'filename_tmpl', 'modality', 'out_proposals', 'reg_targets',
'proposal_scale_factor', 'proposal_labels', 'proposal_type',
'start_index'
]
# SSN Dataset not in test mode
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
result = proposal_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# SSN Dataset with random sampling proposals
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
video_centric=False)
result = proposal_dataset[0]
assert assert_dict_has_keys(result, target_keys)
target_keys = [
'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals',
'filename_tmpl', 'modality', 'relative_proposal_list',
'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts',
'start_index'
]
# SSN Dataset in test mode
proposal_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_test_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
test_mode=True)
result = proposal_dataset[0]
assert assert_dict_has_keys(result, target_keys)
def test_ssn_dataset(self):
# test ssn dataset
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'imgs'
assert ssn_infos[0]['total_frames'] == 5
# test ssn dataset with verbose
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
verbose=True)
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'imgs'
assert ssn_infos[0]['total_frames'] == 5
# test ssn datatset with normalized proposal file
with pytest.raises(Exception):
ssn_dataset = SSNDataset(
self.proposal_norm_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
# test ssn dataset with reg_normalize_constants
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
reg_normalize_constants=[[[-0.0603, 0.0325], [0.0752, 0.1596]]])
ssn_infos = ssn_dataset.video_infos
assert ssn_infos[0]['video_id'] == 'imgs'
assert ssn_infos[0]['total_frames'] == 5
# test error case
with pytest.raises(TypeError):
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix,
aug_ratio=('error', 'error'))
ssn_infos = ssn_dataset.video_infos
def test_ssn_evaluate(self):
ssn_dataset = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg,
data_prefix=self.data_prefix)
ssn_dataset_topall = SSNDataset(
self.proposal_ann_file,
self.proposal_pipeline,
self.proposal_train_cfg,
self.proposal_test_cfg_topall,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
ssn_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
ssn_dataset.evaluate([0] * 5)
with pytest.raises(KeyError):
# unsupported metric
ssn_dataset.evaluate([0] * len(ssn_dataset), metrics='iou')
# evaluate mAP metric
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [
dict(
relative_proposal_list=results_relative_proposal_list,
activity_scores=results_activity_scores,
completeness_scores=results_completeness_scores,
bbox_preds=results_bbox_preds)
]
eval_result = ssn_dataset.evaluate(results, metrics=['mAP'])
assert set(eval_result) == set([
'mAP@0.10', 'mAP@0.20', 'mAP@0.30', 'mAP@0.40', 'mAP@0.50',
'mAP@0.50', 'mAP@0.60', 'mAP@0.70', 'mAP@0.80', 'mAP@0.90'
])
# evaluate mAP metric without filtering topk
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [
dict(
relative_proposal_list=results_relative_proposal_list,
activity_scores=results_activity_scores,
completeness_scores=results_completeness_scores,
bbox_preds=results_bbox_preds)
]
eval_result = ssn_dataset_topall.evaluate(results, metrics=['mAP'])
assert set(eval_result) == set([
'mAP@0.10', 'mAP@0.20', 'mAP@0.30', 'mAP@0.40', 'mAP@0.50',
'mAP@0.50', 'mAP@0.60', 'mAP@0.70', 'mAP@0.80', 'mAP@0.90'
])
| 6,680
| 36.960227
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_rawvideo_dataset.py
|
import os.path as osp
from mmaction.datasets import RawVideoDataset
from .base import BaseTestDataset
class TestRawVideoDataset(BaseTestDataset):
def test_rawvideo_dataset(self):
# Try to load txt file
rawvideo_dataset = RawVideoDataset(
ann_file=self.rawvideo_test_anno_txt,
pipeline=self.rawvideo_pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='positive',
data_prefix=self.data_prefix)
result = rawvideo_dataset[0]
clipname = osp.join(self.data_prefix, 'rawvideo_dataset', 'part_0.mp4')
assert result['filename'] == clipname
# Try to load json file
rawvideo_dataset = RawVideoDataset(
ann_file=self.rawvideo_test_anno_json,
pipeline=self.rawvideo_pipeline,
clipname_tmpl='part_{}.mp4',
sampling_strategy='random',
data_prefix=self.data_prefix,
test_mode=True)
result = rawvideo_dataset[0]
| 1,006
| 32.566667
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_hvu_dataset.py
|
import os.path as osp
import numpy as np
from numpy.testing import assert_array_almost_equal
from mmaction.datasets import HVUDataset
from .base import BaseTestDataset
class TestHVUDataset(BaseTestDataset):
def test_hvu_dataset(self):
hvu_frame_dataset = HVUDataset(
ann_file=self.hvu_frame_ann_file,
pipeline=self.frame_pipeline,
tag_categories=self.hvu_categories,
tag_category_nums=self.hvu_category_nums,
filename_tmpl=self.filename_tmpl,
data_prefix=self.data_prefix,
start_index=1)
hvu_frame_infos = hvu_frame_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'imgs')
assert hvu_frame_infos == [
dict(
frame_dir=frame_dir,
total_frames=5,
label=dict(
concept=[250, 131, 42, 51, 57, 155, 122],
object=[1570, 508],
event=[16],
action=[180],
scene=[206]),
categories=self.hvu_categories,
category_nums=self.hvu_category_nums,
filename_tmpl=self.filename_tmpl,
start_index=1,
modality='RGB')
] * 2
hvu_video_dataset = HVUDataset(
ann_file=self.hvu_video_ann_file,
pipeline=self.video_pipeline,
tag_categories=self.hvu_categories,
tag_category_nums=self.hvu_category_nums,
data_prefix=self.data_prefix)
hvu_video_infos = hvu_video_dataset.video_infos
filename = osp.join(self.data_prefix, 'tmp.mp4')
assert hvu_video_infos == [
dict(
filename=filename,
label=dict(
concept=[250, 131, 42, 51, 57, 155, 122],
object=[1570, 508],
event=[16],
action=[180],
scene=[206]),
categories=self.hvu_categories,
category_nums=self.hvu_category_nums)
] * 2
hvu_video_eval_dataset = HVUDataset(
ann_file=self.hvu_video_eval_ann_file,
pipeline=self.video_pipeline,
tag_categories=self.hvu_categories_for_eval,
tag_category_nums=self.hvu_category_nums_for_eval,
data_prefix=self.data_prefix)
results = [
np.array([
-1.59812844, 0.24459082, 1.38486497, 0.28801252, 1.09813449,
-0.28696971, 0.0637848, 0.22877678, -1.82406999
]),
np.array([
0.87904563, 1.64264224, 0.46382051, 0.72865088, -2.13712525,
1.28571358, 1.01320328, 0.59292737, -0.05502892
])
]
mAP = hvu_video_eval_dataset.evaluate(results)
assert_array_almost_equal(mAP['action_mAP'], 1.0)
assert_array_almost_equal(mAP['scene_mAP'], 0.5)
assert_array_almost_equal(mAP['object_mAP'], 0.75)
| 3,023
| 35.878049
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_audio_dataset.py
|
import os.path as osp
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets import AudioDataset
from .base import BaseTestDataset
class TestAudioDataset(BaseTestDataset):
def test_audio_dataset(self):
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix)
audio_infos = audio_dataset.video_infos
wav_path = osp.join(self.data_prefix, 'test.wav')
assert audio_infos == [
dict(audio_path=wav_path, total_frames=100, label=127)
] * 2
def test_audio_pipeline(self):
target_keys = [
'audio_path', 'label', 'start_index', 'modality', 'audios_shape',
'length', 'sample_rate', 'total_frames'
]
# Audio dataset not in test mode
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix,
test_mode=False)
result = audio_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# Audio dataset in test mode
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix,
test_mode=True)
result = audio_dataset[0]
assert assert_dict_has_keys(result, target_keys)
def test_audio_evaluate(self):
audio_dataset = AudioDataset(
self.audio_ann_file,
self.audio_pipeline,
data_prefix=self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
audio_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
audio_dataset.evaluate([0] * 5)
with pytest.raises(TypeError):
# topk must be int or tuple of int
audio_dataset.evaluate(
[0] * len(audio_dataset),
metric_options=dict(top_k_accuracy=dict(topk=1.)))
with pytest.raises(KeyError):
# unsupported metric
audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou')
# evaluate top_k_accuracy and mean_class_accuracy metric
results = [np.array([0.1, 0.5, 0.4])] * 2
eval_result = audio_dataset.evaluate(
results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert set(eval_result) == set(
['top1_acc', 'top5_acc', 'mean_class_accuracy'])
| 2,606
| 32.423077
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/__init__.py
|
from .base import BaseTestDataset
__all__ = ['BaseTestDataset']
| 65
| 15.5
| 33
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_activitynet_dataset.py
|
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_equal
from mmaction.datasets import ActivityNetDataset
from .base import BaseTestDataset
class TestActivitynetDataset(BaseTestDataset):
def test_activitynet_dataset(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
activitynet_infos = activitynet_dataset.video_infos
assert activitynet_infos == [
dict(
video_name='v_test1',
duration_second=1,
duration_frame=30,
annotations=[dict(segment=[0.3, 0.6], label='Rock climbing')],
feature_frame=30,
fps=30.0,
rfps=30),
dict(
video_name='v_test2',
duration_second=2,
duration_frame=48,
annotations=[dict(segment=[1.0, 2.0], label='Drinking beer')],
feature_frame=48,
fps=24.0,
rfps=24.0)
]
def test_activitynet_proposals2json(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
result_dict = activitynet_dataset.proposals2json(results)
assert result_dict == dict(
test1=[{
'segment': [0.1, 0.9],
'score': 0.1
}],
test2=[{
'segment': [10.1, 20.9],
'score': 0.9
}])
result_dict = activitynet_dataset.proposals2json(results, True)
assert result_dict == dict(
test1=[{
'segment': [0.1, 0.9],
'score': 0.1
}],
test2=[{
'segment': [10.1, 20.9],
'score': 0.9
}])
def test_activitynet_evaluate(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
with pytest.raises(TypeError):
# results must be a list
activitynet_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
# The length of results must be equal to the dataset len
activitynet_dataset.evaluate([0] * 5)
with pytest.raises(KeyError):
# unsupported metric
activitynet_dataset.evaluate(
[0] * len(activitynet_dataset), metrics='iou')
# evaluate AR@AN metric
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
eval_result = activitynet_dataset.evaluate(results, metrics=['AR@AN'])
assert set(eval_result) == set(
['auc', 'AR@1', 'AR@5', 'AR@10', 'AR@100'])
def test_activitynet_dump_results(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file,
self.action_pipeline,
self.data_prefix)
# test dumping json file
results = [
dict(
video_name='v_test1',
proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),
dict(
video_name='v_test2',
proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])
]
dump_results = {
'version': 'VERSION 1.3',
'results': {
'test1': [{
'segment': [0.1, 0.9],
'score': 0.1
}],
'test2': [{
'segment': [10.1, 20.9],
'score': 0.9
}]
},
'external_data': {}
}
tmp_filename = osp.join(tempfile.gettempdir(), 'result.json')
activitynet_dataset.dump_results(results, tmp_filename, 'json')
assert osp.isfile(tmp_filename)
with open(tmp_filename, 'r+') as f:
load_obj = mmcv.load(f, file_format='json')
assert load_obj == dump_results
os.remove(tmp_filename)
# test dumping csv file
results = [('test_video', np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9,
10]]))]
with tempfile.TemporaryDirectory() as tmpdir:
activitynet_dataset.dump_results(results, tmpdir, 'csv')
load_obj = np.loadtxt(
osp.join(tmpdir, 'test_video.csv'),
dtype=np.float32,
delimiter=',',
skiprows=1)
assert_array_equal(
load_obj,
np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]],
dtype=np.float32))
def test_action_pipeline(self):
target_keys = ['video_name', 'data_prefix']
# ActivityNet Dataset not in test mode
action_dataset = ActivityNetDataset(
self.action_ann_file,
self.action_pipeline,
self.data_prefix,
test_mode=False)
result = action_dataset[0]
assert assert_dict_has_keys(result, target_keys)
# ActivityNet Dataset in test mode
action_dataset = ActivityNetDataset(
self.action_ann_file,
self.action_pipeline,
self.data_prefix,
test_mode=True)
result = action_dataset[0]
assert assert_dict_has_keys(result, target_keys)
| 6,311
| 34.863636
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_repeat_dataset.py
|
import numpy as np
from mmaction.datasets import RawframeDataset, RepeatDataset
from .base import BaseTestDataset
class TestRepeatDataset(BaseTestDataset):
def test_repeat_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file,
self.frame_pipeline,
self.data_prefix)
repeat_dataset = RepeatDataset(rawframe_dataset, 5)
assert len(repeat_dataset) == 10
result_a = repeat_dataset[0]
result_b = repeat_dataset[2]
assert set(result_a) == set(result_b)
for key in result_a:
if isinstance(result_a[key], np.ndarray):
assert np.equal(result_a[key], result_b[key]).all()
elif isinstance(result_a[key], list):
assert all(
np.array_equal(a, b)
for (a, b) in zip(result_a[key], result_b[key]))
else:
assert result_a[key] == result_b[key]
| 1,016
| 36.666667
| 68
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_datasets/test_ava_dataset.py
|
import os.path as osp
import mmcv
import numpy as np
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.datasets import AVADataset
class TestAVADataset:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../../data', 'ava_dataset'))
cls.label_file = osp.join(cls.data_prefix, 'action_list.txt')
cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv')
cls.exclude_file = osp.join(cls.data_prefix,
'ava_excluded_timestamps_sample.csv')
cls.proposal_file = osp.join(cls.data_prefix,
'ava_proposals_sample.pkl')
cls.pipeline = [
dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2))
]
cls.proposal = mmcv.load(cls.proposal_file)
def test_ava_dataset(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann'
]
ann_keys = ['gt_labels', 'gt_bboxes', 'entity_ids']
pkl_keys = ['0f39OWEqJ24,0902', '0f39OWEqJ24,0903', '_-Z6wFjXtGQ,0902']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert assert_dict_has_keys(ava_dataset.proposals, pkl_keys)
assert assert_dict_has_keys(ava_infos[0], target_keys)
assert assert_dict_has_keys(ava_infos[0]['ann'], ann_keys)
assert len(ava_infos) == 1
assert ava_infos[0]['frame_dir'] == osp.join(self.data_prefix,
'0f39OWEqJ24')
assert ava_infos[0]['video_id'] == '0f39OWEqJ24'
assert ava_infos[0]['timestamp'] == 902
assert ava_infos[0]['img_key'] == '0f39OWEqJ24,0902'
assert ava_infos[0]['shot_info'] == (0, 27000)
assert ava_infos[0]['fps'] == 30
assert len(ava_infos[0]['ann']) == 3
target_labels = np.array([12, 17, 79])
labels = np.zeros([81])
labels[target_labels] = 1.
target_labels = labels[None, ...]
assert_array_equal(ava_infos[0]['ann']['gt_labels'], target_labels)
assert_array_equal(ava_infos[0]['ann']['gt_bboxes'],
np.array([[0.031, 0.162, 0.67, 0.995]]))
assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0]))
# custom classes
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
label_file=self.label_file,
custom_classes=[17, 79],
num_classes=3,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
target_labels = np.array([1, 2])
labels = np.zeros([3])
labels[target_labels] = 1.
target_labels = labels[None, ...]
assert_array_equal(ava_infos[0]['ann']['gt_labels'], target_labels)
assert_array_equal(ava_infos[0]['ann']['gt_bboxes'],
np.array([[0.031, 0.162, 0.67, 0.995]]))
assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0]))
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
def test_ava_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'filename_tmpl', 'modality', 'start_index',
'timestamp_start', 'timestamp_end', 'proposals', 'scores',
'frame_inds', 'clip_len', 'frame_interval', 'gt_labels',
'gt_bboxes', 'entity_ids'
]
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
result = ava_dataset[0]
assert assert_dict_has_keys(result, target_keys)
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 900
assert result['timestamp_end'] == 1800
assert_array_equal(result['proposals'],
np.array([[0.011, 0.157, 0.655, 0.983]]))
assert_array_equal(result['scores'], np.array([0.998163]))
assert result['clip_len'] == 32
assert result['frame_interval'] == 2
assert len(result['frame_inds']) == 32
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
# Try to get a sample
result = ava_dataset[0]
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 900
assert result['timestamp_end'] == 1800
@staticmethod
def test_ava_evaluate():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../../data', 'eval_detection'))
ann_file = osp.join(data_prefix, 'gt.csv')
label_file = osp.join(data_prefix, 'action_list.txt')
ava_dataset = AVADataset(
ann_file, None, [], label_file=label_file, num_classes=4)
fake_result = [[
np.array([[0.362, 0.156, 0.969, 0.666, 0.106],
[0.442, 0.083, 0.721, 0.947, 0.162]]),
np.array([[0.288, 0.365, 0.766, 0.551, 0.706],
[0.178, 0.296, 0.707, 0.995, 0.223]]),
np.array([[0.417, 0.167, 0.843, 0.939, 0.015],
[0.35, 0.421, 0.57, 0.689, 0.427]])
],
[
np.array([[0.256, 0.338, 0.726, 0.799, 0.563],
[0.071, 0.256, 0.64, 0.75, 0.297]]),
np.array([[0.326, 0.036, 0.513, 0.991, 0.405],
[0.351, 0.035, 0.729, 0.936, 0.945]]),
np.array([[0.051, 0.005, 0.975, 0.942, 0.424],
[0.347, 0.05, 0.97, 0.944, 0.396]])
],
[
np.array([[0.39, 0.087, 0.833, 0.616, 0.447],
[0.461, 0.212, 0.627, 0.527, 0.036]]),
np.array([[0.022, 0.394, 0.93, 0.527, 0.109],
[0.208, 0.462, 0.874, 0.948, 0.954]]),
np.array([[0.206, 0.456, 0.564, 0.725, 0.685],
[0.106, 0.445, 0.782, 0.673, 0.367]])
]]
res = ava_dataset.evaluate(fake_result)
assert_array_almost_equal(res['mAP@0.5IOU'], 0.027777778)
# custom classes
ava_dataset = AVADataset(
ann_file,
None, [],
label_file=label_file,
num_classes=3,
custom_classes=[1, 3])
fake_result = [[
np.array([[0.362, 0.156, 0.969, 0.666, 0.106],
[0.442, 0.083, 0.721, 0.947, 0.162]]),
np.array([[0.417, 0.167, 0.843, 0.939, 0.015],
[0.35, 0.421, 0.57, 0.689, 0.427]])
],
[
np.array([[0.256, 0.338, 0.726, 0.799, 0.563],
[0.071, 0.256, 0.64, 0.75, 0.297]]),
np.array([[0.051, 0.005, 0.975, 0.942, 0.424],
[0.347, 0.05, 0.97, 0.944, 0.396]])
],
[
np.array([[0.39, 0.087, 0.833, 0.616, 0.447],
[0.461, 0.212, 0.627, 0.527, 0.036]]),
np.array([[0.206, 0.456, 0.564, 0.725, 0.685],
[0.106, 0.445, 0.782, 0.673, 0.367]])
]]
res = ava_dataset.evaluate(fake_result)
assert_array_almost_equal(res['mAP@0.5IOU'], 0.04166667)
| 9,151
| 40.411765
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/base.py
|
import numpy as np
from numpy.testing import assert_array_almost_equal
def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1):
"""Check if the result_bbox is in correspond to result_imgs."""
def check_single_crop(origin_imgs, result_imgs, result_bbox):
result_img_shape = result_imgs[0].shape[:2]
crop_w = result_bbox[2] - result_bbox[0]
crop_h = result_bbox[3] - result_bbox[1]
crop_shape = (crop_h, crop_w)
if not crop_shape == result_img_shape:
return False
left, top, right, bottom = result_bbox
return np.array_equal(
np.array(origin_imgs)[:, top:bottom, left:right, :],
np.array(result_imgs))
if result_bbox.ndim == 1:
return check_single_crop(origin_imgs, result_imgs, result_bbox)
if result_bbox.ndim == 2:
num_batch = len(origin_imgs)
for i, bbox in enumerate(result_bbox):
if num_crops == 10:
if (i // num_batch) % 2 == 0:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
else:
flag = check_single_crop([origin_imgs[i % num_batch]],
[np.flip(result_imgs[i], axis=1)],
bbox)
else:
flag = check_single_crop([origin_imgs[i % num_batch]],
[result_imgs[i]], bbox)
if not flag:
return False
return True
else:
# bbox has a wrong dimension
return False
def check_flip(origin_imgs, result_imgs, flip_type):
"""Check if the origin_imgs are flipped correctly into result_imgs in
different flip_types."""
n, _, _, _ = np.shape(origin_imgs)
if flip_type == 'horizontal':
for i in range(n):
if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):
return False
else:
# yapf: disable
for i in range(n):
if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501
return False
# yapf: enable
return True
def check_normalize(origin_imgs, result_imgs, norm_cfg):
"""Check if the origin_imgs are normalized correctly into result_imgs in a
given norm_cfg."""
target_imgs = result_imgs.copy()
target_imgs *= norm_cfg['std']
target_imgs += norm_cfg['mean']
if norm_cfg['to_bgr']:
target_imgs = target_imgs[..., ::-1].copy()
assert_array_almost_equal(origin_imgs, target_imgs, decimal=4)
| 2,707
| 37.685714
| 130
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_normalization.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import Normalize
from .base import check_normalize
class TestNormalization:
@staticmethod
def test_normalize():
with pytest.raises(TypeError):
# mean must be list, tuple or np.ndarray
Normalize(
dict(mean=[123.675, 116.28, 103.53]), [58.395, 57.12, 57.375])
with pytest.raises(TypeError):
# std must be list, tuple or np.ndarray
Normalize([123.675, 116.28, 103.53],
dict(std=[58.395, 57.12, 57.375]))
target_keys = ['imgs', 'img_norm_cfg', 'modality']
# normalize imgs in RGB format
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=False)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
check_normalize(imgs, normalize_results['imgs'],
normalize_results['img_norm_cfg'])
# normalize flow imgs
imgs = list(np.random.rand(4, 240, 320).astype(np.float32))
results = dict(imgs=imgs, modality='Flow')
config = dict(mean=[128, 128], std=[128, 128])
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
assert normalize_results['imgs'].shape == (2, 240, 320, 2)
x_components = np.array(imgs[0::2])
y_components = np.array(imgs[1::2])
x_components = (x_components - config['mean'][0]) / config['std'][0]
y_components = (y_components - config['mean'][1]) / config['std'][1]
result_imgs = np.stack([x_components, y_components], axis=-1)
assert np.all(np.isclose(result_imgs, normalize_results['imgs']))
# normalize imgs in BGR format
imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32))
results = dict(imgs=imgs, modality='RGB')
config = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=True)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert assert_dict_has_keys(normalize_results, target_keys)
check_normalize(imgs, normalize_results['imgs'],
normalize_results['img_norm_cfg'])
assert normalize.__repr__() == (
normalize.__class__.__name__ +
f'(mean={np.array([123.675, 116.28, 103.53])}, ' +
f'std={np.array([58.395, 57.12, 57.375])}, to_bgr={True}, '
f'adjust_magnitude={False})')
| 2,892
| 39.746479
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_transform.py
|
import copy
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import RandomRescale, RandomScale, Resize
from mmaction.datasets.pipelines.augmentations import PoseCompact
class TestTransform:
@staticmethod
def test_random_rescale():
with pytest.raises(AssertionError):
# scale_range must be a tuple of int
RandomRescale(scale_range=224)
with pytest.raises(AssertionError):
# scale_range must be a tuple of int
RandomRescale(scale_range=(224.0, 256.0))
with pytest.raises(AssertionError):
# scale_range[0] > scale_range[1], which is wrong
RandomRescale(scale_range=(320, 256))
with pytest.raises(AssertionError):
# scale_range[0] <= 0, which is wrong
RandomRescale(scale_range=(0, 320))
target_keys = ['imgs', 'short_edge', 'img_shape']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 340, 3))
results = dict(imgs=imgs, img_shape=(256, 340), modality='RGB')
random_rescale = RandomRescale(scale_range=(300, 400))
random_rescale_result = random_rescale(results)
assert assert_dict_has_keys(random_rescale_result, target_keys)
h, w = random_rescale_result['img_shape']
# check rescale
assert np.abs(h / 256 - w / 340) < eps
assert 300 / 256 - eps <= h / 256 <= 400 / 256 + eps
assert repr(random_rescale) == (f'{random_rescale.__class__.__name__}'
f'(scale_range={(300, 400)}, '
'interpolation=bilinear)')
@staticmethod
def test_resize():
with pytest.raises(ValueError):
# scale must be positive
Resize(-0.5)
with pytest.raises(TypeError):
# scale must be tuple of int
Resize('224')
target_keys = [
'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality'
]
# test resize for flow images
imgs = list(np.random.rand(2, 240, 320))
kp = np.array([60, 60]).reshape([1, 1, 1, 2])
results = dict(imgs=imgs, keypoint=kp, modality='Flow')
resize = Resize(scale=(160, 80), keep_ratio=False)
resize_results = resize(results)
assert assert_dict_has_keys(resize_results, target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[.5, 1. / 3.], dtype=np.float32))
assert resize_results['img_shape'] == (80, 160)
kp = resize_results['keypoint'][0, 0, 0]
assert_array_almost_equal(kp, np.array([30, 20]))
# scale with -1 to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
results['gt_bboxes'] = np.array([[0, 0, 320, 240]])
results['proposals'] = np.array([[0, 0, 320, 240]])
resize = Resize(scale=(-1, 256), keep_ratio=True)
resize_results = resize(results)
assert assert_dict_has_keys(resize_results, target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (256, 341)
# scale with a normal tuple (320, 320) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(320, 320), keep_ratio=False)
resize_results = resize(results)
assert assert_dict_has_keys(resize_results, target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[1, 320 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (320, 320)
# scale with a normal tuple (341, 256) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(341, 256), keep_ratio=False)
resize_results = resize(results)
assert assert_dict_has_keys(resize_results, target_keys)
assert np.all(resize_results['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results['img_shape'] == (256, 341)
assert repr(resize) == (
resize.__class__.__name__ +
f'(scale={(341, 256)}, keep_ratio={False}, ' +
f'interpolation=bilinear, lazy={False})')
@staticmethod
def test_random_scale():
scales = ((200, 64), (250, 80))
with pytest.raises(ValueError):
RandomScale(scales, 'unsupport')
with pytest.raises(ValueError):
random_scale = RandomScale([(800, 256), (1000, 320), (800, 320)])
random_scale({})
imgs = list(np.random.rand(2, 340, 256, 3))
results = dict(imgs=imgs, img_shape=(340, 256))
results_ = copy.deepcopy(results)
random_scale_range = RandomScale(scales)
results_ = random_scale_range(results_)
assert 200 <= results_['scale'][0] <= 250
assert 64 <= results_['scale'][1] <= 80
results_ = copy.deepcopy(results)
random_scale_value = RandomScale(scales, 'value')
results_ = random_scale_value(results_)
assert results_['scale'] in scales
random_scale_single = RandomScale([(200, 64)])
results_ = copy.deepcopy(results)
results_ = random_scale_single(results_)
assert results_['scale'] == (200, 64)
assert repr(random_scale_range) == (
f'{random_scale_range.__class__.__name__}'
f'(scales={((200, 64), (250, 80))}, '
'mode=range)')
class TestPoseCompact:
@staticmethod
def test_pose_compact():
results = {}
results['img_shape'] = (100, 100)
fake_kp = np.zeros([1, 4, 2, 2])
fake_kp[:, :, 0] = [10, 10]
fake_kp[:, :, 1] = [90, 90]
results['keypoint'] = fake_kp
pose_compact = PoseCompact(
padding=0, threshold=0, hw_ratio=None, allow_imgpad=False)
inp = copy.deepcopy(results)
ret = pose_compact(inp)
assert ret['img_shape'] == (80, 80)
assert str(pose_compact) == (
'PoseCompact(padding=0, threshold=0, hw_ratio=None, '
'allow_imgpad=False)')
pose_compact = PoseCompact(
padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=False)
inp = copy.deepcopy(results)
ret = pose_compact(inp)
assert ret['img_shape'] == (100, 100)
pose_compact = PoseCompact(
padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=True)
inp = copy.deepcopy(results)
ret = pose_compact(inp)
assert ret['img_shape'] == (104, 104)
pose_compact = PoseCompact(
padding=0, threshold=100, hw_ratio=None, allow_imgpad=False)
inp = copy.deepcopy(results)
ret = pose_compact(inp)
assert ret['img_shape'] == (100, 100)
pose_compact = PoseCompact(
padding=0, threshold=0, hw_ratio=0.75, allow_imgpad=True)
inp = copy.deepcopy(results)
ret = pose_compact(inp)
assert ret['img_shape'] == (80, 106)
| 7,393
| 37.113402
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_misc.py
|
from mmaction.datasets.pipelines.augmentations import (_combine_quadruple,
_flip_quadruple)
class TestQuadrupleOps:
@staticmethod
def test_combine_quadruple():
a = (0.1, 0.1, 0.5, 0.5)
b = (0.3, 0.3, 0.7, 0.7)
res = _combine_quadruple(a, b)
assert res == (0.25, 0.25, 0.35, 0.35)
@staticmethod
def test_flip_quadruple():
a = (0.1, 0.1, 0.5, 0.5)
res = _flip_quadruple(a)
assert res == (0.4, 0.1, 0.5, 0.5)
| 537
| 27.315789
| 74
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_imgaug.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import CenterCrop, Imgaug
from .base import check_flip
class TestAugumentations:
@staticmethod
def test_imgaug():
with pytest.raises(ValueError):
# transforms only support one string, 'default'
Imgaug(transforms='test')
with pytest.raises(ValueError):
# transforms only support string or list of dicts
# or iaa.Augmenter object
Imgaug(transforms=dict(type='Rotate'))
with pytest.raises(AssertionError):
# each dict must have a `type` key
Imgaug(transforms=[dict(rotate=(-30, 30))])
with pytest.raises(AttributeError):
# `type` must be available in imgaug
Imgaug(transforms=[dict(type='BlaBla')])
with pytest.raises(TypeError):
# `type` must be str or iaa available type
Imgaug(transforms=[dict(type=CenterCrop)])
from imgaug import augmenters as iaa
# check default configs
target_keys = ['imgs', 'img_shape', 'modality']
imgs = list(np.random.randint(0, 255, (1, 64, 64, 3)).astype(np.uint8))
results = dict(imgs=imgs, modality='RGB')
default_imgaug = Imgaug(transforms='default')
default_results = default_imgaug(results)
assert_dict_has_keys(default_results, target_keys)
assert default_results['img_shape'] == (64, 64)
# check flip (both images and bboxes)
target_keys = ['imgs', 'gt_bboxes', 'proposals', 'img_shape']
imgs = list(np.random.rand(1, 64, 64, 3).astype(np.float32))
results = dict(
imgs=imgs,
modality='RGB',
proposals=np.array([[0, 0, 25, 35]]),
img_shape=(64, 64),
gt_bboxes=np.array([[0, 0, 25, 35]]))
imgaug_flip = Imgaug(transforms=[dict(type='Fliplr')])
flip_results = imgaug_flip(results)
assert assert_dict_has_keys(flip_results, target_keys)
assert check_flip(imgs, flip_results['imgs'], 'horizontal')
assert_array_almost_equal(flip_results['gt_bboxes'],
np.array([[39, 0, 64, 35]]))
assert_array_almost_equal(flip_results['proposals'],
np.array([[39, 0, 64, 35]]))
transforms = iaa.Sequential([iaa.Fliplr()])
assert repr(imgaug_flip) == f'Imgaug(transforms={transforms})'
# check crop (both images and bboxes)
target_keys = ['crop_bbox', 'gt_bboxes', 'imgs', 'img_shape']
imgs = list(np.random.rand(1, 122, 122, 3))
results = dict(
imgs=imgs,
modality='RGB',
img_shape=(122, 122),
gt_bboxes=np.array([[1.5, 2.5, 110, 64]]))
imgaug_center_crop = Imgaug(transforms=[
dict(
type=iaa.CropToFixedSize,
width=100,
height=100,
position='center')
])
crop_results = imgaug_center_crop(results)
assert_dict_has_keys(crop_results, target_keys)
assert_array_almost_equal(crop_results['gt_bboxes'],
np.array([[0., 0., 99., 53.]]))
assert 'proposals' not in results
transforms = iaa.Sequential(
[iaa.CropToFixedSize(width=100, height=100, position='center')])
assert repr(imgaug_center_crop) == f'Imgaug(transforms={transforms})'
# check resize (images only)
target_keys = ['imgs', 'img_shape']
imgs = list(np.random.rand(1, 64, 64, 3))
results = dict(imgs=imgs, modality='RGB')
transforms = iaa.Resize(32)
imgaug_resize = Imgaug(transforms=transforms)
resize_results = imgaug_resize(results)
assert_dict_has_keys(resize_results, target_keys)
assert resize_results['img_shape'] == (32, 32)
assert repr(imgaug_resize) == f'Imgaug(transforms={transforms})'
| 4,067
| 39.277228
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_lazy.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (CenterCrop, Flip, Fuse,
MultiScaleCrop, RandomCrop,
RandomResizedCrop, Resize)
from .base import check_crop, check_flip
class TestLazy:
@staticmethod
def test_init_lazy():
from mmaction.datasets.pipelines.augmentations import \
_init_lazy_if_proper # noqa: E501
with pytest.raises(AssertionError):
# use lazy operation but "lazy" not in results
result = dict(lazy=dict(), img_shape=[64, 64])
_init_lazy_if_proper(result, False)
lazy_keys = [
'original_shape', 'crop_bbox', 'flip', 'flip_direction',
'interpolation'
]
# 'img_shape' not in results
result = dict(imgs=list(np.random.randn(3, 64, 64, 3)))
_init_lazy_if_proper(result, True)
assert assert_dict_has_keys(result, ['imgs', 'lazy', 'img_shape'])
assert assert_dict_has_keys(result['lazy'], lazy_keys)
# 'img_shape' in results
result = dict(img_shape=[64, 64])
_init_lazy_if_proper(result, True)
assert assert_dict_has_keys(result, ['lazy', 'img_shape'])
assert assert_dict_has_keys(result['lazy'], lazy_keys)
# do not use lazy operation
result = dict(img_shape=[64, 64])
_init_lazy_if_proper(result, False)
assert assert_dict_has_keys(result, ['img_shape'])
assert 'lazy' not in result
@staticmethod
def test_random_crop_lazy():
with pytest.raises(TypeError):
# size must be an int
RandomCrop(size=(112, 112), lazy=True)
with pytest.raises(AssertionError):
# "size > height" or "size > width" is not allowed
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=320, lazy=True)
random_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy']
# General case
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
# Test the case that no need for cropping
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
# Test the one-side-equal case
imgs = list(np.random.rand(2, 224, 225, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224, lazy=True)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert 'lazy' not in random_crop_result_fuse
assert check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result_fuse['img_shape']
assert h == w == 224
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(size={224}, lazy={True})')
@staticmethod
def test_random_resized_crop_lazy():
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
with pytest.raises(AssertionError):
# area_range[0] > area_range[1], which is wrong
random_crop = RandomResizedCrop(area_range=(0.9, 0.7), lazy=True)
random_crop(results)
with pytest.raises(AssertionError):
# 0 > area_range[0] and area_range[1] > 1, which is wrong
random_crop = RandomResizedCrop(
aspect_ratio_range=(-0.1, 2.0), lazy=True)
random_crop(results)
random_crop = RandomResizedCrop(lazy=True)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert ((0.08 - eps <= h * w / 256 / 341)
and (h * w / 256 / 341 <= 1 + eps))
assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.)
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(area_range={(0.08, 1.0)}, '
f'aspect_ratio_range={(3 / 4, 4 / 3)}, '
f'lazy={True})')
random_crop = RandomResizedCrop(
area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1), lazy=True)
# Test fallback cases by very big area range
imgs = np.random.rand(2, 256, 341, 3)
results = dict(imgs=imgs)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert id(imgs) == id(random_crop_result['imgs'])
random_crop_result_fuse = Fuse()(random_crop_result)
assert check_crop(imgs, random_crop_result_fuse['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 256
@staticmethod
def test_multi_scale_crop_lazy():
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop(0.5, lazy=True)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop('224', lazy=True)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop([224, 224], lazy=True)
with pytest.raises(TypeError):
# scales must be tuple.
MultiScaleCrop(
224, scales=[
1,
], lazy=True)
with pytest.raises(ValueError):
# num_fix_crops must be in [5, 13]
MultiScaleCrop(224, num_fixed_crops=6, lazy=True)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales']
# MultiScaleCrop with normal crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert assert_dict_has_keys(multi_scale_crop_result, target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with more fixed crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
num_fixed_crops=13,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert assert_dict_has_keys(multi_scale_crop_result, target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with random crop.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=True,
max_wh_scale_gap=0,
lazy=True)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_result = multi_scale_crop(results)
assert id(imgs) == id(multi_scale_crop_result['imgs'])
assert assert_dict_has_keys(multi_scale_crop_result, target_keys)
multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result)
assert check_crop(imgs, multi_scale_crop_result_fuse['imgs'],
multi_scale_crop_result['crop_bbox'])
assert (multi_scale_crop_result_fuse['img_shape'] in [(256, 256),
(204, 204)])
assert repr(multi_scale_crop) == (
f'{multi_scale_crop.__class__.__name__}'
f'(input_size={(224, 224)}, scales={(1, 0.8)}, '
f'max_wh_scale_gap={0}, random_crop={True}, '
f'num_fixed_crops={5}, lazy={True})')
@staticmethod
def test_resize_lazy():
with pytest.raises(ValueError):
# scale must be positive
Resize(-0.5, lazy=True)
with pytest.raises(TypeError):
# scale must be tuple of int
Resize('224', lazy=True)
target_keys = [
'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality'
]
# scale with -1 to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(-1, 256), keep_ratio=True, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert assert_dict_has_keys(resize_results, target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (256, 341)
# scale with a normal tuple (320, 320) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(320, 320), keep_ratio=False, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert assert_dict_has_keys(resize_results, target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[1, 320 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (320, 320)
# scale with a normal tuple (341, 256) to indicate np.inf
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs, modality='RGB')
resize = Resize(scale=(341, 256), keep_ratio=False, lazy=True)
resize_results = resize(results)
assert id(imgs) == id(resize_results['imgs'])
assert assert_dict_has_keys(resize_results, target_keys)
resize_results_fuse = Fuse()(resize_results)
assert np.all(resize_results_fuse['scale_factor'] == np.array(
[341 / 320, 256 / 240], dtype=np.float32))
assert resize_results_fuse['img_shape'] == (256, 341)
assert repr(resize) == (f'{resize.__class__.__name__ }'
f'(scale={(341, 256)}, keep_ratio={False}, ' +
f'interpolation=bilinear, lazy={True})')
@staticmethod
def test_flip_lazy():
with pytest.raises(ValueError):
Flip(direction='vertically', lazy=True)
target_keys = ['imgs', 'flip_direction', 'modality']
# do not flip imgs.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=0, direction='horizontal', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert assert_dict_has_keys(flip_results, target_keys)
flip_results_fuse = Fuse()(flip_results)
assert np.equal(imgs, results['imgs']).all()
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
# always flip imgs horizontally.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=1, direction='horizontal', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert assert_dict_has_keys(flip_results, target_keys)
flip_results_fuse = Fuse()(flip_results)
assert check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
# always flip imgs vertivally.
imgs = list(np.random.rand(2, 64, 64, 3))
imgs_tmp = imgs.copy()
results = dict(imgs=imgs_tmp, modality='RGB')
flip = Flip(flip_ratio=1, direction='vertical', lazy=True)
flip_results = flip(results)
assert id(imgs_tmp) == id(flip_results['imgs'])
assert assert_dict_has_keys(flip_results, target_keys)
flip_results_fuse = Fuse()(flip_results)
assert check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert flip_results_fuse['imgs'][0].shape == (64, 64, 3)
assert repr(flip) == (f'{flip.__class__.__name__}'
f'(flip_ratio={1}, direction=vertical, '
f'flip_label_map={None}, lazy={True})')
@staticmethod
def test_center_crop_lazy():
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop([224, 224])
# center crop with crop_size 224
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs)
center_crop = CenterCrop(crop_size=224, lazy=True)
center_crop_results = center_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert assert_dict_has_keys(center_crop_results, target_keys)
center_crop_results_fuse = Fuse()(center_crop_results)
assert check_crop(imgs, center_crop_results_fuse['imgs'],
center_crop_results['crop_bbox'])
assert np.all(center_crop_results_fuse['crop_bbox'] == np.array(
[48, 8, 272, 232]))
assert center_crop_results_fuse['img_shape'] == (224, 224)
assert repr(center_crop) == (f'{center_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, lazy={True})')
| 16,650
| 42.703412
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/__init__.py
|
from .base import check_crop, check_flip, check_normalize
__all__ = ['check_crop', 'check_flip', 'check_normalize']
| 117
| 28.5
| 57
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_flip.py
|
import copy
import mmcv
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import Flip
from .base import check_flip
class TestFlip:
@staticmethod
def test_flip():
with pytest.raises(ValueError):
# direction must be in ['horizontal', 'vertical']
Flip(direction='vertically')
target_keys = ['imgs', 'flip_direction', 'modality']
# do not flip imgs.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
flip = Flip(flip_ratio=0, direction='horizontal')
flip_results = flip(results)
assert assert_dict_has_keys(flip_results, target_keys)
assert np.array_equal(imgs, results['imgs'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# always flip imgs horizontally.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
results['gt_bboxes'] = np.array([[0, 0, 60, 60]])
results['proposals'] = np.array([[0, 0, 60, 60]])
flip = Flip(flip_ratio=1, direction='horizontal')
flip_results = flip(results)
assert assert_dict_has_keys(flip_results, target_keys)
if flip_results['flip'] is True:
assert check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# flip flow images horizontally
imgs = [
np.arange(16).reshape(4, 4).astype(np.float32),
np.arange(16, 32).reshape(4, 4).astype(np.float32)
]
results = dict(imgs=copy.deepcopy(imgs), modality='Flow')
flip = Flip(flip_ratio=1, direction='horizontal')
flip_results = flip(results)
assert assert_dict_has_keys(flip_results, target_keys)
imgs = [x.reshape(4, 4, 1) for x in imgs]
flip_results['imgs'] = [
x.reshape(4, 4, 1) for x in flip_results['imgs']
]
if flip_results['flip'] is True:
assert check_flip([imgs[0]],
[mmcv.iminvert(flip_results['imgs'][0])],
flip_results['flip_direction'])
assert check_flip([imgs[1]], [flip_results['imgs'][1]],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
# always flip imgs vertivally.
imgs = list(np.random.rand(2, 64, 64, 3))
results = dict(imgs=copy.deepcopy(imgs), modality='RGB')
flip = Flip(flip_ratio=1, direction='vertical')
flip_results = flip(results)
assert assert_dict_has_keys(flip_results, target_keys)
if flip_results['flip'] is True:
assert check_flip(imgs, flip_results['imgs'],
flip_results['flip_direction'])
assert id(flip_results['imgs']) == id(results['imgs'])
assert np.shape(flip_results['imgs']) == np.shape(imgs)
assert repr(flip) == (f'{flip.__class__.__name__}'
f'(flip_ratio={1}, direction=vertical, '
f'flip_label_map={None}, lazy={False})')
# transform label for the flipped image with the specific label.
_flip_label_map = {4: 6}
imgs = list(np.random.rand(2, 64, 64, 3))
# the label should be mapped.
results = dict(imgs=copy.deepcopy(imgs), modality='RGB', label=4)
flip = Flip(
flip_ratio=1,
direction='horizontal',
flip_label_map=_flip_label_map)
flip_results = flip(results)
assert results['label'] == 6
# the label should not be mapped.
results = dict(imgs=copy.deepcopy(imgs), modality='RGB', label=3)
flip = Flip(
flip_ratio=1,
direction='horizontal',
flip_label_map=_flip_label_map)
flip_results = flip(results)
assert results['label'] == 3
# flip the keypoints
results = dict(
keypoint=np.array([[1, 1], [63, 63]]).reshape([1, 1, 2, 2]),
modality='Pose',
img_shape=(64, 64))
flip = Flip(
flip_ratio=1, direction='horizontal', left_kp=[0], right_kp=[1])
flip_results = flip(results)
assert_array_almost_equal(flip_results['keypoint'][0, 0],
np.array([[1, 63], [63, 1]]))
results = dict(
keypoint=np.array([[1, 1], [63, 63]]).reshape([1, 1, 2, 2]),
modality='Pose',
img_shape=(64, 64))
flip = Flip(
flip_ratio=1, direction='horizontal', left_kp=[], right_kp=[])
flip_results = flip(results)
assert_array_almost_equal(flip_results['keypoint'][0, 0],
np.array([[63, 1], [1, 63]]))
with pytest.raises(AssertionError):
results = dict(
keypoint=np.array([[1, 1], [63, 63]]).reshape([1, 1, 2, 2]),
modality='Pose',
img_shape=(64, 64))
flip = Flip(
flip_ratio=1, direction='vertical', left_kp=[], right_kp=[])
flip_results = flip(results)
| 5,562
| 39.904412
| 76
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_color.py
|
import numpy as np
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_equal
from mmaction.datasets.pipelines import ColorJitter
class TestColor:
@staticmethod
def test_color_jitter():
imgs = list(
np.random.randint(0, 255, size=(3, 112, 112, 3), dtype=np.uint8))
results = dict(imgs=imgs)
eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
eig_vec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype=np.float32)
color_jitter = ColorJitter()
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert color_jitter.alpha_std == 0.1
assert color_jitter.color_space_aug is False
color_jitter_results = color_jitter(results)
target_keys = [
'imgs', 'eig_val', 'eig_vec', 'alpha_std', 'color_space_aug'
]
assert assert_dict_has_keys(color_jitter_results, target_keys)
assert np.shape(color_jitter_results['imgs']) == (3, 112, 112, 3)
assert_array_equal(color_jitter_results['eig_val'], eig_val)
assert_array_equal(color_jitter_results['eig_vec'], eig_vec)
assert color_jitter_results['alpha_std'] == 0.1
assert color_jitter_results['color_space_aug'] is False
custom_eig_val = np.ones(3, )
custom_eig_vec = np.ones((3, 3))
imgs = list(
np.random.randint(0, 255, size=(3, 64, 80, 3), dtype=np.uint8))
results = dict(imgs=imgs)
custom_color_jitter = ColorJitter(True, 0.5, custom_eig_val,
custom_eig_vec)
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert custom_color_jitter.alpha_std == 0.5
assert custom_color_jitter.color_space_aug is True
custom_color_jitter_results = custom_color_jitter(results)
assert np.shape(custom_color_jitter_results['imgs']) == (3, 64, 80, 3)
assert_array_equal(custom_color_jitter_results['eig_val'],
custom_eig_val)
assert_array_equal(custom_color_jitter_results['eig_vec'],
custom_eig_vec)
assert custom_color_jitter_results['alpha_std'] == 0.5
assert custom_color_jitter_results['color_space_aug'] is True
color_jitter = ColorJitter()
assert repr(color_jitter) == (f'{color_jitter.__class__.__name__}('
f'color_space_aug={False}, '
f'alpha_std={0.1}, '
f'eig_val={eig_val}, '
f'eig_vec={eig_vec})')
| 2,886
| 43.415385
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_audio.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import AudioAmplify, MelSpectrogram
class TestAudio:
@staticmethod
def test_audio_amplify():
target_keys = ['audios', 'amplify_ratio']
with pytest.raises(TypeError):
# ratio should be float
AudioAmplify(1)
audio = (np.random.rand(8, ))
results = dict(audios=audio)
amplifier = AudioAmplify(1.5)
results = amplifier(results)
assert assert_dict_has_keys(results, target_keys)
assert repr(amplifier) == (f'{amplifier.__class__.__name__}'
f'(ratio={amplifier.ratio})')
@staticmethod
def test_melspectrogram():
target_keys = ['audios']
with pytest.raises(TypeError):
# ratio should be float
MelSpectrogram(window_size=12.5)
audio = (np.random.rand(1, 160000))
# test padding
results = dict(audios=audio, sample_rate=16000)
results['num_clips'] = 1
results['sample_rate'] = 16000
mel = MelSpectrogram()
results = mel(results)
assert assert_dict_has_keys(results, target_keys)
# test truncating
audio = (np.random.rand(1, 160000))
results = dict(audios=audio, sample_rate=16000)
results['num_clips'] = 1
results['sample_rate'] = 16000
mel = MelSpectrogram(fixed_length=1)
results = mel(results)
assert assert_dict_has_keys(results, target_keys)
assert repr(mel) == (f'{mel.__class__.__name__}'
f'(window_size={mel.window_size}), '
f'step_size={mel.step_size}, '
f'n_mels={mel.n_mels}, '
f'fixed_length={mel.fixed_length})')
| 1,866
| 33.574074
| 68
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_augmentations/test_crop.py
|
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (CenterCrop, MultiGroupCrop,
MultiScaleCrop, RandomCrop,
RandomResizedCrop, TenCrop, ThreeCrop)
from .base import check_crop
class TestCrops:
@staticmethod
def test_random_crop():
with pytest.raises(TypeError):
# size must be an int
RandomCrop(size=(112, 112))
with pytest.raises(AssertionError):
# "size > height" or "size > width" is not allowed
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=320)
random_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# General case
imgs = list(np.random.rand(2, 224, 341, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
results['gt_bboxes'] = np.array([[0, 0, 340, 224]])
results['proposals'] = np.array([[0, 0, 340, 224]])
kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])
results['keypoint'] = kp
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
# Test the case that no need for cropping
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
# Test the one-side-equal case
imgs = list(np.random.rand(2, 224, 225, 3))
results = dict(imgs=imgs)
random_crop = RandomCrop(size=224)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 224
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(size={224}, lazy={False})')
@staticmethod
def test_random_resized_crop():
with pytest.raises(TypeError):
# area_range must be a tuple of float
RandomResizedCrop(area_range=0.5)
with pytest.raises(TypeError):
# aspect_ratio_range must be a tuple of float
RandomResizedCrop(area_range=(0.08, 1.0), aspect_ratio_range=0.1)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# There will be a slight difference because of rounding
eps = 0.01
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
results['gt_bboxes'] = np.array([[0, 0, 340, 256]])
results['proposals'] = np.array([[0, 0, 340, 256]])
kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])
results['keypoint'] = kp
with pytest.raises(AssertionError):
# area_range[0] > area_range[1], which is wrong
random_crop = RandomResizedCrop(area_range=(0.9, 0.7))
random_crop(results)
with pytest.raises(AssertionError):
# 0 > area_range[0] and area_range[1] > 1, which is wrong
random_crop = RandomResizedCrop(aspect_ratio_range=(-0.1, 2.0))
random_crop(results)
random_crop = RandomResizedCrop()
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert ((0.08 - eps <= h * w / 256 / 341)
and (h * w / 256 / 341 <= 1 + eps))
assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.)
assert repr(random_crop) == (f'{random_crop.__class__.__name__}'
f'(area_range={(0.08, 1.0)}, '
f'aspect_ratio_range={(3 / 4, 4 / 3)}, '
f'lazy={False})')
random_crop = RandomResizedCrop(
area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1))
# Test fallback cases by very big area range
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
random_crop_result = random_crop(results)
assert assert_dict_has_keys(random_crop_result, target_keys)
assert check_crop(imgs, random_crop_result['imgs'],
results['crop_bbox'])
h, w = random_crop_result['img_shape']
assert h == w == 256
@staticmethod
def test_multi_scale_crop():
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop(0.5)
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop('224')
with pytest.raises(TypeError):
# input_size must be int or tuple of int
MultiScaleCrop([224, 224])
with pytest.raises(TypeError):
# scales must be tuple.
MultiScaleCrop(
224, scales=[
1,
])
with pytest.raises(ValueError):
# num_fix_crops must be in [5, 13]
MultiScaleCrop(224, num_fixed_crops=6)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales']
# MultiScaleCrop with normal crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
results['gt_bboxes'] = np.array([[0, 0, 340, 256]])
results['proposals'] = np.array([[0, 0, 340, 256]])
kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])
results['keypoint'] = kp
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert assert_dict_has_keys(multi_scale_crop_results, target_keys)
assert check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with more fixed crops.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=False,
max_wh_scale_gap=0,
num_fixed_crops=13)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert assert_dict_has_keys(multi_scale_crop_results, target_keys)
assert check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)]
# MultiScaleCrop with random crop.
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
config = dict(
input_size=224,
scales=(1, 0.8),
random_crop=True,
max_wh_scale_gap=0)
multi_scale_crop = MultiScaleCrop(**config)
multi_scale_crop_results = multi_scale_crop(results)
assert assert_dict_has_keys(multi_scale_crop_results, target_keys)
assert check_crop(imgs, multi_scale_crop_results['imgs'],
multi_scale_crop_results['crop_bbox'])
assert (multi_scale_crop_results['img_shape'] in [(256, 256),
(204, 204)])
assert repr(multi_scale_crop) == (
f'{multi_scale_crop.__class__.__name__}'
f'(input_size={(224, 224)}, scales={(1, 0.8)}, '
f'max_wh_scale_gap={0}, random_crop={True}, '
f'num_fixed_crops=5, lazy={False})')
@staticmethod
def test_center_crop():
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
CenterCrop([224, 224])
# center crop with crop_size 224
# add kps in test_center_crop
imgs = list(np.random.rand(2, 240, 320, 3))
results = dict(imgs=imgs)
kp = np.array([[160, 120], [160, 120]]).reshape([1, 1, 2, 2])
results['keypoint'] = kp
results['gt_bboxes'] = np.array([[0, 0, 320, 240]])
results['proposals'] = np.array([[0, 0, 320, 240]])
center_crop = CenterCrop(crop_size=224)
center_crop_results = center_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape', 'keypoint']
assert assert_dict_has_keys(center_crop_results, target_keys)
assert check_crop(imgs, center_crop_results['imgs'],
center_crop_results['crop_bbox'])
assert np.all(
center_crop_results['crop_bbox'] == np.array([48, 8, 272, 232]))
assert center_crop_results['img_shape'] == (224, 224)
assert np.all(center_crop_results['keypoint'] == 112)
assert repr(center_crop) == (f'{center_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, lazy={False})')
@staticmethod
def test_three_crop():
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
ThreeCrop([224, 224])
# three crop with crop_size 120
imgs = list(np.random.rand(2, 240, 120, 3))
results = dict(imgs=imgs)
three_crop = ThreeCrop(crop_size=120)
three_crop_results = three_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert assert_dict_has_keys(three_crop_results, target_keys)
assert check_crop(imgs, three_crop_results['imgs'],
three_crop_results['crop_bbox'], 3)
assert three_crop_results['img_shape'] == (120, 120)
# three crop with crop_size 224
imgs = list(np.random.rand(2, 224, 224, 3))
results = dict(imgs=imgs)
three_crop = ThreeCrop(crop_size=224)
three_crop_results = three_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert assert_dict_has_keys(three_crop_results, target_keys)
assert check_crop(imgs, three_crop_results['imgs'],
three_crop_results['crop_bbox'], 3)
assert three_crop_results['img_shape'] == (224, 224)
assert repr(three_crop) == (f'{three_crop.__class__.__name__}'
f'(crop_size={(224, 224)})')
@staticmethod
def test_ten_crop():
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop(0.5)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop('224')
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
TenCrop([224, 224])
# ten crop with crop_size 256
imgs = list(np.random.rand(2, 256, 256, 3))
results = dict(imgs=imgs)
ten_crop = TenCrop(crop_size=224)
ten_crop_results = ten_crop(results)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
assert assert_dict_has_keys(ten_crop_results, target_keys)
assert check_crop(imgs, ten_crop_results['imgs'],
ten_crop_results['crop_bbox'], 10)
assert ten_crop_results['img_shape'] == (224, 224)
assert repr(ten_crop) == (f'{ten_crop.__class__.__name__}'
f'(crop_size={(224, 224)})')
@staticmethod
def test_multi_group_crop():
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop(0.5, 1)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop('224', 1)
with pytest.raises(TypeError):
# crop_size must be int or tuple of int
MultiGroupCrop([224, 224], 1)
with pytest.raises(TypeError):
# groups must be int
MultiGroupCrop(224, '1')
with pytest.raises(ValueError):
# groups must be positive
MultiGroupCrop(224, 0)
target_keys = ['imgs', 'crop_bbox', 'img_shape']
# multi_group_crop with crop_size 224, groups 3
imgs = list(np.random.rand(2, 256, 341, 3))
results = dict(imgs=imgs)
multi_group_crop = MultiGroupCrop(224, 3)
multi_group_crop_result = multi_group_crop(results)
assert assert_dict_has_keys(multi_group_crop_result, target_keys)
assert check_crop(imgs, multi_group_crop_result['imgs'],
multi_group_crop_result['crop_bbox'],
multi_group_crop.groups)
assert multi_group_crop_result['img_shape'] == (224, 224)
assert repr(multi_group_crop) == (
f'{multi_group_crop.__class__.__name__}'
f'(crop_size={(224, 224)}, groups={3})')
| 14,358
| 40.143266
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_decode.py
|
import copy
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (AudioDecode, AudioDecodeInit,
DecordDecode, DecordInit,
FrameSelector, OpenCVDecode,
OpenCVInit, PyAVDecode,
PyAVDecodeMotionVector, PyAVInit,
RawFrameDecode)
from .base import BaseTestLoading
class TestDecode(BaseTestLoading):
def test_pyav_init(self):
target_keys = ['video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
assert assert_dict_has_keys(pyav_init_result, target_keys)
assert pyav_init_result['total_frames'] == 300
assert repr(
pyav_init) == f'{pyav_init.__class__.__name__}(io_backend=disk)'
def test_pyav_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test PyAV with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
2)[:, np.newaxis]
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('
f'multi_thread={False})')
# test PyAV with 1 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# PyAV with multi thread and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode(multi_thread=True)
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}('
f'multi_thread={True})')
# test PyAV with 2 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames,
2)[:, np.newaxis]
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test PyAV with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode()
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# PyAV with multi thread
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 5)
pyav_init = PyAVInit()
pyav_init_result = pyav_init(video_result)
video_result['video_reader'] = pyav_init_result['video_reader']
pyav_decode = PyAVDecode(multi_thread=True)
pyav_decode_result = pyav_decode(video_result)
assert assert_dict_has_keys(pyav_decode_result, target_keys)
assert pyav_decode_result['original_shape'] == (256, 340)
assert np.shape(pyav_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
assert repr(pyav_decode) == pyav_decode.__class__.__name__ + \
f'(multi_thread={True})'
def test_decord_init(self):
target_keys = ['video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
assert assert_dict_has_keys(decord_init_result, target_keys)
assert decord_init_result['total_frames'] == len(
decord_init_result['video_reader'])
assert repr(decord_init) == (f'{decord_init.__class__.__name__}('
f'io_backend=disk, '
f'num_threads={1})')
def test_decord_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test Decord with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
3)[:, np.newaxis]
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert assert_dict_has_keys(decord_decode_result, target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 1 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 3)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert assert_dict_has_keys(decord_decode_result, target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 2 dim input and start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
3)[:, np.newaxis]
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert assert_dict_has_keys(decord_decode_result, target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test Decord with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 3)
decord_init = DecordInit()
decord_init_result = decord_init(video_result)
video_result['video_reader'] = decord_init_result['video_reader']
decord_decode = DecordDecode()
decord_decode_result = decord_decode(video_result)
assert assert_dict_has_keys(decord_decode_result, target_keys)
assert decord_decode_result['original_shape'] == (256, 340)
assert np.shape(decord_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
def test_opencv_init(self):
target_keys = ['new_path', 'video_reader', 'total_frames']
video_result = copy.deepcopy(self.video_results)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
assert assert_dict_has_keys(opencv_init_result, target_keys)
assert opencv_init_result['total_frames'] == len(
opencv_init_result['video_reader'])
assert repr(opencv_init) == (f'{opencv_init.__class__.__name__}('
f'io_backend=disk)')
def test_opencv_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape']
# test OpenCV with 2 dim input when start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames,
2)[:, np.newaxis]
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert assert_dict_has_keys(opencv_decode_result, target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test OpenCV with 2 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames,
2)[:, np.newaxis]
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert assert_dict_has_keys(opencv_decode_result, target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
# test OpenCV with 1 dim input when start_index = 0
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(0, self.total_frames, 3)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
# test OpenCV with 1 dim input
video_result = copy.deepcopy(self.video_results)
video_result['frame_inds'] = np.arange(1, self.total_frames, 3)
opencv_init = OpenCVInit()
opencv_init_result = opencv_init(video_result)
video_result['video_reader'] = opencv_init_result['video_reader']
opencv_decode = OpenCVDecode()
opencv_decode_result = opencv_decode(video_result)
assert assert_dict_has_keys(opencv_decode_result, target_keys)
assert opencv_decode_result['original_shape'] == (256, 340)
assert np.shape(opencv_decode_result['imgs']) == (len(
video_result['frame_inds']), 256, 340, 3)
@staticmethod
def test_rawframe_selector():
with pytest.warns(UserWarning):
FrameSelector(io_backend='disk')
def test_rawframe_decode(self):
target_keys = ['frame_inds', 'imgs', 'original_shape', 'modality']
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)[:,
np.newaxis]
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
inputs['gt_bboxes'] = np.array([[0, 0, 1, 1]])
inputs['proposals'] = np.array([[0, 0, 1, 1]])
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)[:,
np.newaxis]
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input when start_index = 0
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 5)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 5)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input for flow images
inputs = copy.deepcopy(self.flow_frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 2)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,
240, 320)
assert results['original_shape'] == (240, 320)
# test frame selector with 1 dim input for flow images
inputs = copy.deepcopy(self.flow_frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 2)
frame_selector = RawFrameDecode(io_backend='disk')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2,
240, 320)
assert results['original_shape'] == (240, 320)
# test frame selector in turbojpeg decording backend
# when start_index = 0
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(0, self.total_frames, 5)
# since the test images start with index 1, we plus 1 to frame_inds
# in order to pass the CI
inputs['frame_inds'] = inputs['frame_inds'] + 1
frame_selector = RawFrameDecode(
io_backend='disk', decoding_backend='turbojpeg')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
# test frame selector in turbojpeg decording backend
inputs = copy.deepcopy(self.frame_results)
inputs['frame_inds'] = np.arange(1, self.total_frames, 5)
frame_selector = RawFrameDecode(
io_backend='disk', decoding_backend='turbojpeg')
results = frame_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240,
320, 3)
assert results['original_shape'] == (240, 320)
assert repr(frame_selector) == (f'{frame_selector.__class__.__name__}('
f'io_backend=disk, '
f'decoding_backend=turbojpeg)')
def test_audio_decode_init(self):
target_keys = ['audios', 'length', 'sample_rate']
inputs = copy.deepcopy(self.audio_results)
audio_decode_init = AudioDecodeInit()
results = audio_decode_init(inputs)
assert assert_dict_has_keys(results, target_keys)
# test when no audio file exists
inputs = copy.deepcopy(self.audio_results)
inputs['audio_path'] = 'foo/foo/bar.wav'
audio_decode_init = AudioDecodeInit()
results = audio_decode_init(inputs)
assert assert_dict_has_keys(results, target_keys)
assert results['audios'].shape == (10.0 *
audio_decode_init.sample_rate, )
assert repr(audio_decode_init) == (
f'{audio_decode_init.__class__.__name__}('
f'io_backend=disk, '
f'sample_rate=16000, '
f'pad_method=zero)')
def test_audio_decode(self):
target_keys = ['frame_inds', 'audios']
inputs = copy.deepcopy(self.audio_results)
inputs['frame_inds'] = np.arange(0, self.audio_total_frames,
2)[:, np.newaxis]
inputs['num_clips'] = 1
inputs['length'] = 1280
audio_selector = AudioDecode()
results = audio_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
def test_pyav_decode_motion_vector(self):
pyav_init = PyAVInit()
pyav = PyAVDecodeMotionVector()
# test pyav with 2-dim input
results = {
'filename': self.video_path,
'frame_inds': np.arange(0, 32, 1)[:, np.newaxis]
}
results = pyav_init(results)
results = pyav(results)
target_keys = ['motion_vectors']
assert assert_dict_has_keys(results, target_keys)
# test pyav with 1 dim input
results = {
'filename': self.video_path,
'frame_inds': np.arange(0, 32, 1)
}
pyav_init = PyAVInit()
results = pyav_init(results)
pyav = PyAVDecodeMotionVector()
results = pyav(results)
assert assert_dict_has_keys(results, target_keys)
| 21,562
| 46.495595
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/base.py
|
import os.path as osp
import mmcv
import numpy as np
class BaseTestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../../../data'))
cls.img_path = osp.join(cls.data_prefix, 'test.jpg')
cls.video_path = osp.join(cls.data_prefix, 'test.mp4')
cls.wav_path = osp.join(cls.data_prefix, 'test.wav')
cls.audio_spec_path = osp.join(cls.data_prefix, 'test.npy')
cls.img_dir = osp.join(cls.data_prefix, 'imgs')
cls.raw_feature_dir = osp.join(cls.data_prefix, 'activitynet_features')
cls.bsp_feature_dir = osp.join(cls.data_prefix, 'bsp_features')
cls.proposals_dir = osp.join(cls.data_prefix, 'proposals')
cls.total_frames = 5
cls.filename_tmpl = 'img_{:05}.jpg'
cls.flow_filename_tmpl = '{}_{:05d}.jpg'
video_total_frames = len(mmcv.VideoReader(cls.video_path))
cls.audio_total_frames = video_total_frames
cls.video_results = dict(
filename=cls.video_path,
label=1,
total_frames=video_total_frames,
start_index=0)
cls.audio_results = dict(
audios=np.random.randn(1280, ),
audio_path=cls.wav_path,
total_frames=cls.audio_total_frames,
label=1,
start_index=0)
cls.audio_feature_results = dict(
audios=np.random.randn(128, 80),
audio_path=cls.audio_spec_path,
total_frames=cls.audio_total_frames,
label=1,
start_index=0)
cls.frame_results = dict(
frame_dir=cls.img_dir,
total_frames=cls.total_frames,
filename_tmpl=cls.filename_tmpl,
start_index=1,
modality='RGB',
offset=0,
label=1)
cls.flow_frame_results = dict(
frame_dir=cls.img_dir,
total_frames=cls.total_frames,
filename_tmpl=cls.flow_filename_tmpl,
modality='Flow',
offset=0,
label=1)
cls.action_results = dict(
video_name='v_test1',
data_prefix=cls.raw_feature_dir,
temporal_scale=5,
boundary_ratio=0.1,
duration_second=10,
duration_frame=10,
feature_frame=8,
annotations=[{
'segment': [3.0, 5.0],
'label': 'Rock climbing'
}])
from mmaction.datasets.ssn_dataset import SSNInstance
cls.proposal_results = dict(
frame_dir=cls.img_dir,
video_id='imgs',
total_frames=cls.total_frames,
filename_tmpl=cls.filename_tmpl,
start_index=1,
out_proposals=[[['imgs', SSNInstance(1, 4, 10, 1, 1, 1)], 0],
[['imgs', SSNInstance(2, 5, 10, 2, 1, 1)], 0]])
cls.ava_results = dict(
fps=30, timestamp=902, timestamp_start=840, shot_info=(0, 27000))
cls.hvu_label_example1 = dict(
categories=['action', 'object', 'scene', 'concept'],
category_nums=[2, 5, 3, 2],
label=dict(action=[0], object=[2, 3], scene=[0, 1]))
cls.hvu_label_example2 = dict(
categories=['action', 'object', 'scene', 'concept'],
category_nums=[2, 5, 3, 2],
label=dict(action=[1], scene=[1, 2], concept=[1]))
| 3,440
| 36
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_sampling.py
|
import copy
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_equal
from mmaction.datasets.pipelines import (AudioFeatureSelector,
DenseSampleFrames, SampleAVAFrames,
SampleFrames, SampleProposalFrames,
UntrimmedSampleFrames)
from .base import BaseTestLoading
class TestSampling(BaseTestLoading):
def test_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
with pytest.warns(UserWarning):
# start_index has been deprecated
config = dict(
clip_len=3, frame_interval=1, num_clips=5, start_index=1)
SampleFrames(**config)
# Sample Frame with no temporal_jitter
# clip_len=3, frame_interval=1, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 15
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 15
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter
# clip_len=5, frame_interval=1, num_clips=5,
# out_of_bound_opt='repeat_last'
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=5,
frame_interval=1,
num_clips=5,
temporal_jitter=False,
out_of_bound_opt='repeat_last')
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={5}, '
f'frame_interval={1}, '
f'num_clips={5}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=repeat_last, '
f'test_mode={False})')
def check_monotonous(arr):
length = arr.shape[0]
for i in range(length - 1):
if arr[i] > arr[i + 1]:
return False
return True
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 25
frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])
for i in range(5):
assert check_monotonous(frame_inds[i])
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with temporal_jitter
# clip_len=4, frame_interval=2, num_clips=5
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 20
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 20
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={2}, '
f'num_clips={5}, '
f'temporal_jitter={True}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'temporal_jitter={False}, '
f'twice_sample={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Sample Frame with no temporal_jitter in test mode
# clip_len=3, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=3,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 18
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 18
assert np.max(sample_frames_results['frame_inds']) <= 5
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 2, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=6, frame_interval=1, num_clips=1
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 5
config = dict(
clip_len=6,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
[1, 2, 3, 4, 5, 1])
# Sample Frame with no temporal_jitter to get avg_interval <= 0
# clip_len=12, frame_interval=1, num_clips=20
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 30
config = dict(
clip_len=12,
frame_interval=1,
num_clips=20,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 240
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 240
assert np.max(sample_frames_results['frame_inds']) <= 30
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame with no temporal_jitter to get clip_offsets
# clip_len=1, frame_interval=1, num_clips=8
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 6
config = dict(
clip_len=1,
frame_interval=1,
num_clips=8,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert sample_frames_results['start_index'] == 0
assert len(sample_frames_results['frame_inds']) == 8
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 8
assert_array_equal(sample_frames_results['frame_inds'],
np.array([1, 2, 3, 3, 4, 5, 5, 6]))
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 10
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
test_mode=False)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 24
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 24
assert np.max(sample_frames_results['frame_inds']) <= 10
assert np.min(sample_frames_results['frame_inds']) >= 1
# Sample Frame using twice sample
# clip_len=12, frame_interval=1, num_clips=2
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
frame_result['total_frames'] = 40
config = dict(
clip_len=12,
frame_interval=1,
num_clips=2,
temporal_jitter=False,
twice_sample=True,
test_mode=True)
sample_frames = SampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 48
sample_frames_results = sample_frames(frame_result)
assert len(sample_frames_results['frame_inds']) == 48
assert np.max(sample_frames_results['frame_inds']) <= 40
assert np.min(sample_frames_results['frame_inds']) >= 1
def test_dense_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
# Dense sample with no temporal_jitter in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={64}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
# Dense sample with no temporal_jitter
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter, sample_range=32 in test mode
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 240
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 240
# Dense sample with no temporal_jitter, sample_range=32
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=32,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={10}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={False})')
# Dense sample with no temporal_jitter, sample_range=1000 to check mod
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
sample_range=1000,
temporal_jitter=False)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 24
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 24
# Dense sample with no temporal_jitter in test mode
# sample_range=32, num_sample_positions=5
# clip_len=4, frame_interval=1, num_clips=6
video_result = copy.deepcopy(self.video_results)
frame_result = copy.deepcopy(self.frame_results)
config = dict(
clip_len=4,
frame_interval=1,
num_clips=6,
num_sample_positions=5,
sample_range=32,
temporal_jitter=False,
test_mode=True)
dense_sample_frames = DenseSampleFrames(**config)
dense_sample_frames_results = dense_sample_frames(video_result)
assert dense_sample_frames_results['start_index'] == 0
assert assert_dict_has_keys(dense_sample_frames_results, target_keys)
assert len(dense_sample_frames_results['frame_inds']) == 120
dense_sample_frames_results = dense_sample_frames(frame_result)
assert len(dense_sample_frames_results['frame_inds']) == 120
assert repr(dense_sample_frames) == (
f'{dense_sample_frames.__class__.__name__}('
f'clip_len={4}, '
f'frame_interval={1}, '
f'num_clips={6}, '
f'sample_range={32}, '
f'num_sample_positions={5}, '
f'temporal_jitter={False}, '
f'out_of_bound_opt=loop, '
f'test_mode={True})')
def test_untrim_sample_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames'
]
frame_result = dict(
frame_dir=None,
total_frames=100,
filename_tmpl=None,
modality='RGB',
start_index=0,
label=1)
video_result = copy.deepcopy(self.video_results)
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(video_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
frame_inds = np.array(list(range(8, 300, 16)))
assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0]
assert_array_equal(sample_frames_results['frame_inds'], frame_inds)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=1, frame_interval=16)
sample_frames = UntrimmedSampleFrames(**config)
frame_result_ = copy.deepcopy(frame_result)
frame_result_['start_index'] = 1
sample_frames_results = sample_frames(frame_result_)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 6
assert_array_equal(sample_frames_results['frame_inds'],
np.array([8, 24, 40, 56, 72, 88]) + 1)
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'frame_interval={16})')
config = dict(clip_len=3, frame_interval=16, start_index=0)
sample_frames = UntrimmedSampleFrames(**config)
sample_frames_results = sample_frames(frame_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 18
assert_array_equal(
sample_frames_results['frame_inds'],
np.array([
7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87,
88, 89
]))
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={3}, '
f'frame_interval={16})')
def test_sample_ava_frames(self):
target_keys = [
'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds',
'clip_len', 'frame_interval'
]
config = dict(clip_len=32, frame_interval=2)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert assert_dict_has_keys(ava_result, target_keys)
assert ava_result['clip_len'] == 32
assert ava_result['frame_interval'] == 2
assert len(ava_result['frame_inds']) == 32
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={32}, '
f'frame_interval={2}, '
f'test_mode={False})')
# add test case in Issue #306
config = dict(clip_len=8, frame_interval=8)
sample_ava_dataset = SampleAVAFrames(**config)
ava_result = sample_ava_dataset(results=self.ava_results)
assert assert_dict_has_keys(ava_result, target_keys)
assert ava_result['clip_len'] == 8
assert ava_result['frame_interval'] == 8
assert len(ava_result['frame_inds']) == 8
assert repr(sample_ava_dataset) == (
f'{sample_ava_dataset.__class__.__name__}('
f'clip_len={8}, '
f'frame_interval={8}, '
f'test_mode={False})')
def test_sample_proposal_frames(self):
target_keys = [
'frame_inds', 'clip_len', 'frame_interval', 'num_clips',
'total_frames', 'start_index'
]
# test error cases
with pytest.raises(TypeError):
proposal_result = copy.deepcopy(self.proposal_results)
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=('error', 'error'),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames(proposal_result)
# test normal cases
# Sample Frame with no temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with temporal_jitter
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=True)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={True}, '
f'mode=train)')
# Sample Frame with no temporal_jitter in val mode
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 9
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
# Sample Frame with no temporal_jitter in test mode
# test_interval=2
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['out_proposals'] = None
proposal_result['total_frames'] = 10
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
test_interval=2,
temporal_jitter=False,
mode='test')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 5
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={2}, '
f'temporal_jitter={False}, '
f'mode=test)')
# Sample Frame with no temporal_jitter to get clip_offsets zero
# clip_len=1, frame_interval=1
# body_segments=2, aug_segments=(1, 1)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=2,
aug_segments=(1, 1),
aug_ratio=0.5,
temporal_jitter=False)
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 8
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={2}, '
f'aug_segments={(1, 1)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=train)')
# Sample Frame with no temporal_jitter to
# get clip_offsets zero in val mode
# clip_len=1, frame_interval=1
# body_segments=4, aug_segments=(2, 2)
proposal_result = copy.deepcopy(self.proposal_results)
proposal_result['total_frames'] = 3
config = dict(
clip_len=1,
frame_interval=1,
body_segments=4,
aug_segments=(2, 2),
aug_ratio=0.5,
temporal_jitter=False,
mode='val')
sample_frames = SampleProposalFrames(**config)
sample_frames_results = sample_frames(proposal_result)
assert assert_dict_has_keys(sample_frames_results, target_keys)
assert len(sample_frames_results['frame_inds']) == 16
assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('
f'clip_len={1}, '
f'body_segments={4}, '
f'aug_segments={(2, 2)}, '
f'aug_ratio={(0.5, 0.5)}, '
f'frame_interval={1}, '
f'test_interval={6}, '
f'temporal_jitter={False}, '
f'mode=val)')
def test_audio_feature_selector(self):
target_keys = ['audios']
# test frame selector with 2 dim input
inputs = copy.deepcopy(self.audio_feature_results)
inputs['frame_inds'] = np.arange(0, self.audio_total_frames,
2)[:, np.newaxis]
inputs['num_clips'] = 1
inputs['length'] = 1280
audio_feature_selector = AudioFeatureSelector()
results = audio_feature_selector(inputs)
assert assert_dict_has_keys(results, target_keys)
assert repr(audio_feature_selector) == (
f'{audio_feature_selector.__class__.__name__}('
f'fix_length={128})')
| 35,091
| 45.914439
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_pose_loading.py
|
import copy as cp
from collections import defaultdict
import numpy as np
import pytest
from mmcv import dump
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.datasets.pipelines import (GeneratePoseTarget, LoadKineticsPose,
PoseDecode, UniformSampleFrames)
class TestPoseLoading:
@staticmethod
def test_uniform_sample_frames():
results = dict(total_frames=64, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=True, seed=0)
assert str(sampling) == ('UniformSampleFrames(clip_len=8, '
'num_clips=1, test_mode=True, seed=0)')
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert_array_equal(sampling_results['frame_inds'],
np.array([4, 15, 21, 24, 35, 43, 51, 63]))
results = dict(total_frames=15, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=True, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert_array_equal(sampling_results['frame_inds'],
np.array([0, 2, 4, 6, 8, 9, 11, 13]))
results = dict(total_frames=7, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=True, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert_array_equal(sampling_results['frame_inds'],
np.array([0, 1, 2, 3, 4, 5, 6, 0]))
results = dict(total_frames=7, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=8, test_mode=True, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 8
assert len(sampling_results['frame_inds']) == 64
results = dict(total_frames=64, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=4, test_mode=True, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 4
assert_array_equal(
sampling_results['frame_inds'],
np.array([
4, 15, 21, 24, 35, 43, 51, 63, 1, 11, 21, 26, 36, 47, 54, 56,
0, 12, 18, 25, 38, 47, 55, 62, 0, 9, 21, 25, 37, 40, 49, 60
]))
results = dict(total_frames=64, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=False, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert len(sampling_results['frame_inds']) == 8
results = dict(total_frames=7, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=False, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert len(sampling_results['frame_inds']) == 8
results = dict(total_frames=15, start_index=0)
sampling = UniformSampleFrames(
clip_len=8, num_clips=1, test_mode=False, seed=0)
sampling_results = sampling(results)
assert sampling_results['clip_len'] == 8
assert sampling_results['frame_interval'] is None
assert sampling_results['num_clips'] == 1
assert len(sampling_results['frame_inds']) == 8
@staticmethod
def test_pose_decode():
kp = np.random.random([1, 16, 17, 2])
kpscore = np.random.random([1, 16, 17])
frame_inds = np.array([2, 4, 6, 8, 10])
results = dict(
keypoint=kp, keypoint_score=kpscore, frame_inds=frame_inds)
pose_decode = PoseDecode()
assert str(pose_decode) == ('PoseDecode()')
decode_results = pose_decode(results)
assert_array_almost_equal(decode_results['keypoint'], kp[:,
frame_inds])
assert_array_almost_equal(decode_results['keypoint_score'],
kpscore[:, frame_inds])
results = dict(keypoint=kp, keypoint_score=kpscore, total_frames=16)
pose_decode = PoseDecode()
decode_results = pose_decode(results)
assert_array_almost_equal(decode_results['keypoint'], kp)
assert_array_almost_equal(decode_results['keypoint_score'], kpscore)
@staticmethod
def test_load_kinetics_pose():
def get_mode(arr):
cnt = defaultdict(lambda: 0)
for num in arr:
cnt[num] += 1
max_val = max(cnt.values())
return [k for k in cnt if cnt[k] == max_val], max_val
filename = '/tmp/tmp.pkl'
total_frames = 100
img_shape = (224, 224)
frame_inds = np.random.choice(range(100), size=120)
frame_inds.sort()
anno_flag = np.random.random(120) > 0.1
anno_inds = np.array([i for i, f in enumerate(anno_flag) if f])
kp = np.random.random([120, 17, 3])
dump(kp, filename)
results = dict(
filename=filename,
total_frames=total_frames,
img_shape=img_shape,
frame_inds=frame_inds)
inp = cp.deepcopy(results)
with pytest.raises(NotImplementedError):
LoadKineticsPose(squeeze=True, max_person=100, source='xxx')
load_kinetics_pose = LoadKineticsPose(
squeeze=True, max_person=100, source='openpose')
assert str(load_kinetics_pose) == ('LoadKineticsPose(io_backend=disk, '
'squeeze=True, max_person=100, '
"keypoint_weight={'face': 1, "
"'torso': 2, 'limb': 3}, "
'source=openpose, kwargs={})')
return_results = load_kinetics_pose(inp)
assert return_results['keypoint'].shape[:-1] == \
return_results['keypoint_score'].shape
num_person = return_results['keypoint'].shape[0]
num_frame = return_results['keypoint'].shape[1]
assert num_person == get_mode(frame_inds)[1]
assert np.max(return_results['keypoint']) > 1
assert num_frame == len(set(frame_inds))
inp = cp.deepcopy(results)
load_kinetics_pose = LoadKineticsPose(
squeeze=False, max_person=100, source='openpose')
return_results = load_kinetics_pose(inp)
assert return_results['keypoint'].shape[:-1] == \
return_results['keypoint_score'].shape
num_person = return_results['keypoint'].shape[0]
num_frame = return_results['keypoint'].shape[1]
assert num_person == get_mode(frame_inds)[1]
assert np.max(return_results['keypoint']) > 1
assert num_frame == total_frames
inp = cp.deepcopy(results)
inp['anno_inds'] = anno_inds
load_kinetics_pose = LoadKineticsPose(
squeeze=True, max_person=100, source='mmpose')
return_results = load_kinetics_pose(inp)
assert return_results['keypoint'].shape[:-1] == \
return_results['keypoint_score'].shape
num_person = return_results['keypoint'].shape[0]
num_frame = return_results['keypoint'].shape[1]
assert num_person == get_mode(frame_inds[anno_inds])[1]
assert np.max(return_results['keypoint']) <= 1
assert num_frame == len(set(frame_inds[anno_inds]))
inp = cp.deepcopy(results)
inp['anno_inds'] = anno_inds
load_kinetics_pose = LoadKineticsPose(
squeeze=True, max_person=2, source='mmpose')
return_results = load_kinetics_pose(inp)
assert return_results['keypoint'].shape[:-1] == \
return_results['keypoint_score'].shape
num_person = return_results['keypoint'].shape[0]
num_frame = return_results['keypoint'].shape[1]
assert num_person <= 2
assert np.max(return_results['keypoint']) <= 1
assert num_frame == len(set(frame_inds[anno_inds]))
@staticmethod
def test_generate_pose_target():
img_shape = (64, 64)
kp = np.array([[[[24, 24], [40, 40], [24, 40]]]])
kpscore = np.array([[[1., 1., 1.]]])
kp = np.concatenate([kp] * 8, axis=1)
kpscore = np.concatenate([kpscore] * 8, axis=1)
results = dict(
img_shape=img_shape,
keypoint=kp,
keypoint_score=kpscore,
modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1, with_kp=True, left_kp=(0, ), right_kp=(1, ), skeletons=())
assert str(generate_pose_target) == ('GeneratePoseTarget(sigma=1, '
'use_score=True, with_kp=True, '
'with_limb=False, skeletons=(), '
'double=False, left_kp=(0,), '
'right_kp=(1,))')
return_results = generate_pose_target(results)
assert return_results['imgs'].shape == (8, 64, 64, 3)
assert_array_almost_equal(return_results['imgs'][0],
return_results['imgs'][1])
results = dict(img_shape=img_shape, keypoint=kp, modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1, with_kp=True, left_kp=(0, ), right_kp=(1, ), skeletons=())
return_results = generate_pose_target(results)
assert return_results['imgs'].shape == (8, 64, 64, 3)
assert_array_almost_equal(return_results['imgs'][0],
return_results['imgs'][1])
generate_pose_target = GeneratePoseTarget(
sigma=1,
with_kp=False,
with_limb=True,
left_kp=(0, ),
right_kp=(1, ),
skeletons=((0, 1), (1, 2), (0, 2)))
return_results = generate_pose_target(results)
assert return_results['imgs'].shape == (8, 64, 64, 3)
assert_array_almost_equal(return_results['imgs'][0],
return_results['imgs'][1])
generate_pose_target = GeneratePoseTarget(
sigma=1,
with_kp=True,
with_limb=True,
left_kp=(0, ),
right_kp=(1, ),
skeletons=((0, 1), (1, 2), (0, 2)))
return_results = generate_pose_target(results)
assert return_results['imgs'].shape == (8, 64, 64, 6)
assert_array_almost_equal(return_results['imgs'][0],
return_results['imgs'][1])
generate_pose_target = GeneratePoseTarget(
sigma=1,
with_kp=True,
with_limb=True,
double=True,
left_kp=(0, ),
right_kp=(1, ),
skeletons=((0, 1), (1, 2), (0, 2)))
return_results = generate_pose_target(results)
imgs = return_results['imgs']
assert imgs.shape == (16, 64, 64, 6)
assert_array_almost_equal(imgs[0], imgs[1])
assert_array_almost_equal(imgs[:8, 2], imgs[8:, 2, :, ::-1])
assert_array_almost_equal(imgs[:8, 0], imgs[8:, 1, :, ::-1])
assert_array_almost_equal(imgs[:8, 1], imgs[8:, 0, :, ::-1])
img_shape = (64, 64)
kp = np.array([[[[24, 24], [40, 40], [24, 40]]]])
kpscore = np.array([[[0., 0., 0.]]])
kp = np.concatenate([kp] * 8, axis=1)
kpscore = np.concatenate([kpscore] * 8, axis=1)
results = dict(
img_shape=img_shape,
keypoint=kp,
keypoint_score=kpscore,
modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1, with_kp=True, left_kp=(0, ), right_kp=(1, ), skeletons=())
return_results = generate_pose_target(results)
assert_array_almost_equal(return_results['imgs'], 0)
img_shape = (64, 64)
kp = np.array([[[[24, 24], [40, 40], [24, 40]]]])
kpscore = np.array([[[0., 0., 0.]]])
kp = np.concatenate([kp] * 8, axis=1)
kpscore = np.concatenate([kpscore] * 8, axis=1)
results = dict(
img_shape=img_shape,
keypoint=kp,
keypoint_score=kpscore,
modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1,
with_kp=False,
with_limb=True,
left_kp=(0, ),
right_kp=(1, ),
skeletons=((0, 1), (1, 2), (0, 2)))
return_results = generate_pose_target(results)
assert_array_almost_equal(return_results['imgs'], 0)
img_shape = (64, 64)
kp = np.array([[[[124, 124], [140, 140], [124, 140]]]])
kpscore = np.array([[[0., 0., 0.]]])
kp = np.concatenate([kp] * 8, axis=1)
kpscore = np.concatenate([kpscore] * 8, axis=1)
results = dict(
img_shape=img_shape,
keypoint=kp,
keypoint_score=kpscore,
modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1, with_kp=True, left_kp=(0, ), right_kp=(1, ), skeletons=())
return_results = generate_pose_target(results)
assert_array_almost_equal(return_results['imgs'], 0)
img_shape = (64, 64)
kp = np.array([[[[124, 124], [140, 140], [124, 140]]]])
kpscore = np.array([[[0., 0., 0.]]])
kp = np.concatenate([kp] * 8, axis=1)
kpscore = np.concatenate([kpscore] * 8, axis=1)
results = dict(
img_shape=img_shape,
keypoint=kp,
keypoint_score=kpscore,
modality='Pose')
generate_pose_target = GeneratePoseTarget(
sigma=1,
with_kp=False,
with_limb=True,
left_kp=(0, ),
right_kp=(1, ),
skeletons=((0, 1), (1, 2), (0, 2)))
return_results = generate_pose_target(results)
assert_array_almost_equal(return_results['imgs'], 0)
| 14,911
| 41.243626
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_load.py
|
import copy
import numpy as np
import pytest
import torch
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import (LoadAudioFeature, LoadHVULabel,
LoadLocalizationFeature,
LoadProposals)
from .base import BaseTestLoading
class TestLoad(BaseTestLoading):
def test_load_hvu_label(self):
hvu_label_example1 = copy.deepcopy(self.hvu_label_example1)
hvu_label_example2 = copy.deepcopy(self.hvu_label_example2)
categories = hvu_label_example1['categories']
category_nums = hvu_label_example1['category_nums']
num_tags = sum(category_nums)
num_categories = len(categories)
loader = LoadHVULabel()
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={False})')
result1 = loader(hvu_label_example1)
label1 = torch.zeros(num_tags)
mask1 = torch.zeros(num_tags)
category_mask1 = torch.zeros(num_categories)
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={True})')
label1[[0, 4, 5, 7, 8]] = 1.
mask1[:10] = 1.
category_mask1[:3] = 1.
assert torch.all(torch.eq(label1, result1['label']))
assert torch.all(torch.eq(mask1, result1['mask']))
assert torch.all(torch.eq(category_mask1, result1['category_mask']))
result2 = loader(hvu_label_example2)
label2 = torch.zeros(num_tags)
mask2 = torch.zeros(num_tags)
category_mask2 = torch.zeros(num_categories)
label2[[1, 8, 9, 11]] = 1.
mask2[:2] = 1.
mask2[7:] = 1.
category_mask2[[0, 2, 3]] = 1.
assert torch.all(torch.eq(label2, result2['label']))
assert torch.all(torch.eq(mask2, result2['mask']))
assert torch.all(torch.eq(category_mask2, result2['category_mask']))
def test_load_localization_feature(self):
target_keys = ['raw_feature']
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_localization_feature = LoadLocalizationFeature(
'unsupport_ext')
# test normal cases
load_localization_feature = LoadLocalizationFeature()
load_localization_feature_result = load_localization_feature(
action_result)
assert assert_dict_has_keys(load_localization_feature_result,
target_keys)
assert load_localization_feature_result['raw_feature'].shape == (400,
5)
assert repr(load_localization_feature) == (
f'{load_localization_feature.__class__.__name__}('
f'raw_feature_ext=.csv)')
def test_load_proposals(self):
target_keys = [
'bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score',
'reference_temporal_iou'
]
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir,
'unsupport_ext')
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir, '.csv',
'unsupport_ext')
# test normal cases
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir)
load_proposals_result = load_proposals(action_result)
assert assert_dict_has_keys(load_proposals_result, target_keys)
assert load_proposals_result['bsp_feature'].shape[0] == 5
assert load_proposals_result['tmin'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4)
assert load_proposals_result['tmax'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4)
assert load_proposals_result['tmin_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin_score'],
np.arange(0.95, 0.90, -0.01),
decimal=4)
assert load_proposals_result['tmax_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax_score'],
np.arange(0.96, 0.91, -0.01),
decimal=4)
assert load_proposals_result['reference_temporal_iou'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['reference_temporal_iou'],
np.arange(0.85, 0.80, -0.01),
decimal=4)
assert repr(load_proposals) == (
f'{load_proposals.__class__.__name__}('
f'top_k={5}, '
f'pgm_proposals_dir={self.proposals_dir}, '
f'pgm_features_dir={self.bsp_feature_dir}, '
f'proposal_ext=.csv, '
f'feature_ext=.npy)')
def test_load_audio_feature(self):
target_keys = ['audios']
inputs = copy.deepcopy(self.audio_feature_results)
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert assert_dict_has_keys(results, target_keys)
# test when no audio feature file exists
inputs = copy.deepcopy(self.audio_feature_results)
inputs['audio_path'] = 'foo/foo/bar.npy'
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert results['audios'].shape == (640, 80)
assert assert_dict_has_keys(results, target_keys)
assert repr(load_audio_feature) == (
f'{load_audio_feature.__class__.__name__}('
f'pad_method=zero)')
| 6,171
| 39.605263
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_localization.py
|
import copy
import numpy as np
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import GenerateLocalizationLabels
from .base import BaseTestLoading
class TestLocalization(BaseTestLoading):
def test_generate_localization_label(self):
action_result = copy.deepcopy(self.action_results)
action_result['raw_feature'] = np.random.randn(400, 5)
# test default setting
target_keys = ['gt_bbox']
generate_localization_labels = GenerateLocalizationLabels()
generate_localization_labels_result = generate_localization_labels(
action_result)
assert assert_dict_has_keys(generate_localization_labels_result,
target_keys)
assert_array_almost_equal(
generate_localization_labels_result['gt_bbox'], [[0.375, 0.625]],
decimal=4)
| 940
| 32.607143
| 77
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.