Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- FateZero-main/data/negative_reg/car/21-67a268b0d4c005794f46f335fd278f251e1bfb37.jpg +3 -0
- FateZero-main/data/negative_reg/car/247-3532a178a890fed824f017222f8aeef46700771a.jpg +3 -0
- FateZero-main/data/negative_reg/car/262-cca35c12879e8cb215accd05f0c217475f319bef.jpg +3 -0
- FateZero-main/data/negative_reg/car/329-d92eeeb099a81d02f9e56e9619cedea9d2c233c2.jpg +3 -0
- FateZero-main/data/negative_reg/car/338-a5a37c85a5d8694106123c25f44190cdca3797bc.jpg +3 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/__init__.py +29 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/checkpoint.py +167 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/closure.py +11 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/evaluation.py +509 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/hook.py +92 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py +15 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/base.py +166 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py +58 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py +78 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py +82 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py +117 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/text.py +256 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py +56 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +670 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py +493 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py +20 -0
- Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py +22 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/__init__.py +19 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/ade.py +84 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/builder.py +169 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/chase_db1.py +27 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/cityscapes.py +217 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/custom.py +400 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/dataset_wrappers.py +50 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/drive.py +27 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/hrf.py +27 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pascal_context.py +103 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/__init__.py +16 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/compose.py +51 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/formating.py +288 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/loading.py +153 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/test_time_aug.py +133 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/transforms.py +889 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/stare.py +27 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/voc.py +29 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/__init__.py +12 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/__init__.py +17 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/cgnet.py +367 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/fast_scnn.py +375 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/hrnet.py +555 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py +180 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/mobilenet_v3.py +255 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnest.py +314 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnet.py +688 -0
- Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnext.py +145 -0
FateZero-main/data/negative_reg/car/21-67a268b0d4c005794f46f335fd278f251e1bfb37.jpg
ADDED
|
Git LFS Details
|
FateZero-main/data/negative_reg/car/247-3532a178a890fed824f017222f8aeef46700771a.jpg
ADDED
|
Git LFS Details
|
FateZero-main/data/negative_reg/car/262-cca35c12879e8cb215accd05f0c217475f319bef.jpg
ADDED
|
Git LFS Details
|
FateZero-main/data/negative_reg/car/329-d92eeeb099a81d02f9e56e9619cedea9d2c233c2.jpg
ADDED
|
Git LFS Details
|
FateZero-main/data/negative_reg/car/338-a5a37c85a5d8694106123c25f44190cdca3797bc.jpg
ADDED
|
Git LFS Details
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .checkpoint import CheckpointHook
|
| 3 |
+
from .closure import ClosureHook
|
| 4 |
+
from .ema import EMAHook
|
| 5 |
+
from .evaluation import DistEvalHook, EvalHook
|
| 6 |
+
from .hook import HOOKS, Hook
|
| 7 |
+
from .iter_timer import IterTimerHook
|
| 8 |
+
from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
|
| 9 |
+
NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
|
| 10 |
+
TextLoggerHook, WandbLoggerHook)
|
| 11 |
+
from .lr_updater import LrUpdaterHook
|
| 12 |
+
from .memory import EmptyCacheHook
|
| 13 |
+
from .momentum_updater import MomentumUpdaterHook
|
| 14 |
+
from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
|
| 15 |
+
GradientCumulativeOptimizerHook, OptimizerHook)
|
| 16 |
+
from .profiler import ProfilerHook
|
| 17 |
+
from .sampler_seed import DistSamplerSeedHook
|
| 18 |
+
from .sync_buffer import SyncBuffersHook
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
|
| 22 |
+
'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
|
| 23 |
+
'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
|
| 24 |
+
'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
|
| 25 |
+
'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
|
| 26 |
+
'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
|
| 27 |
+
'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
|
| 28 |
+
'GradientCumulativeFp16OptimizerHook'
|
| 29 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from annotator.uniformer.mmcv.fileio import FileClient
|
| 6 |
+
from ..dist_utils import allreduce_params, master_only
|
| 7 |
+
from .hook import HOOKS, Hook
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@HOOKS.register_module()
|
| 11 |
+
class CheckpointHook(Hook):
|
| 12 |
+
"""Save checkpoints periodically.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): The saving period. If ``by_epoch=True``, interval
|
| 16 |
+
indicates epochs, otherwise it indicates iterations.
|
| 17 |
+
Default: -1, which means "never".
|
| 18 |
+
by_epoch (bool): Saving checkpoints by epoch or by iteration.
|
| 19 |
+
Default: True.
|
| 20 |
+
save_optimizer (bool): Whether to save optimizer state_dict in the
|
| 21 |
+
checkpoint. It is usually used for resuming experiments.
|
| 22 |
+
Default: True.
|
| 23 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 24 |
+
specified, ``runner.work_dir`` will be used by default. If
|
| 25 |
+
specified, the ``out_dir`` will be the concatenation of ``out_dir``
|
| 26 |
+
and the last level directory of ``runner.work_dir``.
|
| 27 |
+
`Changed in version 1.3.16.`
|
| 28 |
+
max_keep_ckpts (int, optional): The maximum checkpoints to keep.
|
| 29 |
+
In some cases we want only the latest few checkpoints and would
|
| 30 |
+
like to delete old ones to save the disk space.
|
| 31 |
+
Default: -1, which means unlimited.
|
| 32 |
+
save_last (bool, optional): Whether to force the last checkpoint to be
|
| 33 |
+
saved regardless of interval. Default: True.
|
| 34 |
+
sync_buffer (bool, optional): Whether to synchronize buffers in
|
| 35 |
+
different gpus. Default: False.
|
| 36 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 37 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 38 |
+
Default: None.
|
| 39 |
+
`New in version 1.3.16.`
|
| 40 |
+
|
| 41 |
+
.. warning::
|
| 42 |
+
Before v1.3.16, the ``out_dir`` argument indicates the path where the
|
| 43 |
+
checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
|
| 44 |
+
root directory and the final path to save checkpoint is the
|
| 45 |
+
concatenation of ``out_dir`` and the last level directory of
|
| 46 |
+
``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
|
| 47 |
+
and the value of ``runner.work_dir`` is "/path/of/B", then the final
|
| 48 |
+
path will be "/path/of/A/B".
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self,
|
| 52 |
+
interval=-1,
|
| 53 |
+
by_epoch=True,
|
| 54 |
+
save_optimizer=True,
|
| 55 |
+
out_dir=None,
|
| 56 |
+
max_keep_ckpts=-1,
|
| 57 |
+
save_last=True,
|
| 58 |
+
sync_buffer=False,
|
| 59 |
+
file_client_args=None,
|
| 60 |
+
**kwargs):
|
| 61 |
+
self.interval = interval
|
| 62 |
+
self.by_epoch = by_epoch
|
| 63 |
+
self.save_optimizer = save_optimizer
|
| 64 |
+
self.out_dir = out_dir
|
| 65 |
+
self.max_keep_ckpts = max_keep_ckpts
|
| 66 |
+
self.save_last = save_last
|
| 67 |
+
self.args = kwargs
|
| 68 |
+
self.sync_buffer = sync_buffer
|
| 69 |
+
self.file_client_args = file_client_args
|
| 70 |
+
|
| 71 |
+
def before_run(self, runner):
|
| 72 |
+
if not self.out_dir:
|
| 73 |
+
self.out_dir = runner.work_dir
|
| 74 |
+
|
| 75 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 76 |
+
self.out_dir)
|
| 77 |
+
|
| 78 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 79 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 80 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 81 |
+
# `runner.work_dir`
|
| 82 |
+
if self.out_dir != runner.work_dir:
|
| 83 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 84 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 85 |
+
|
| 86 |
+
runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
|
| 87 |
+
f'{self.file_client.name}.'))
|
| 88 |
+
|
| 89 |
+
# disable the create_symlink option because some file backends do not
|
| 90 |
+
# allow to create a symlink
|
| 91 |
+
if 'create_symlink' in self.args:
|
| 92 |
+
if self.args[
|
| 93 |
+
'create_symlink'] and not self.file_client.allow_symlink:
|
| 94 |
+
self.args['create_symlink'] = False
|
| 95 |
+
warnings.warn(
|
| 96 |
+
('create_symlink is set as True by the user but is changed'
|
| 97 |
+
'to be False because creating symbolic link is not '
|
| 98 |
+
f'allowed in {self.file_client.name}'))
|
| 99 |
+
else:
|
| 100 |
+
self.args['create_symlink'] = self.file_client.allow_symlink
|
| 101 |
+
|
| 102 |
+
def after_train_epoch(self, runner):
|
| 103 |
+
if not self.by_epoch:
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
# save checkpoint for following cases:
|
| 107 |
+
# 1. every ``self.interval`` epochs
|
| 108 |
+
# 2. reach the last epoch of training
|
| 109 |
+
if self.every_n_epochs(
|
| 110 |
+
runner, self.interval) or (self.save_last
|
| 111 |
+
and self.is_last_epoch(runner)):
|
| 112 |
+
runner.logger.info(
|
| 113 |
+
f'Saving checkpoint at {runner.epoch + 1} epochs')
|
| 114 |
+
if self.sync_buffer:
|
| 115 |
+
allreduce_params(runner.model.buffers())
|
| 116 |
+
self._save_checkpoint(runner)
|
| 117 |
+
|
| 118 |
+
@master_only
|
| 119 |
+
def _save_checkpoint(self, runner):
|
| 120 |
+
"""Save the current checkpoint and delete unwanted checkpoint."""
|
| 121 |
+
runner.save_checkpoint(
|
| 122 |
+
self.out_dir, save_optimizer=self.save_optimizer, **self.args)
|
| 123 |
+
if runner.meta is not None:
|
| 124 |
+
if self.by_epoch:
|
| 125 |
+
cur_ckpt_filename = self.args.get(
|
| 126 |
+
'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
|
| 127 |
+
else:
|
| 128 |
+
cur_ckpt_filename = self.args.get(
|
| 129 |
+
'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
|
| 130 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 131 |
+
runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
|
| 132 |
+
self.out_dir, cur_ckpt_filename)
|
| 133 |
+
# remove other checkpoints
|
| 134 |
+
if self.max_keep_ckpts > 0:
|
| 135 |
+
if self.by_epoch:
|
| 136 |
+
name = 'epoch_{}.pth'
|
| 137 |
+
current_ckpt = runner.epoch + 1
|
| 138 |
+
else:
|
| 139 |
+
name = 'iter_{}.pth'
|
| 140 |
+
current_ckpt = runner.iter + 1
|
| 141 |
+
redundant_ckpts = range(
|
| 142 |
+
current_ckpt - self.max_keep_ckpts * self.interval, 0,
|
| 143 |
+
-self.interval)
|
| 144 |
+
filename_tmpl = self.args.get('filename_tmpl', name)
|
| 145 |
+
for _step in redundant_ckpts:
|
| 146 |
+
ckpt_path = self.file_client.join_path(
|
| 147 |
+
self.out_dir, filename_tmpl.format(_step))
|
| 148 |
+
if self.file_client.isfile(ckpt_path):
|
| 149 |
+
self.file_client.remove(ckpt_path)
|
| 150 |
+
else:
|
| 151 |
+
break
|
| 152 |
+
|
| 153 |
+
def after_train_iter(self, runner):
|
| 154 |
+
if self.by_epoch:
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
# save checkpoint for following cases:
|
| 158 |
+
# 1. every ``self.interval`` iterations
|
| 159 |
+
# 2. reach the last iteration of training
|
| 160 |
+
if self.every_n_iters(
|
| 161 |
+
runner, self.interval) or (self.save_last
|
| 162 |
+
and self.is_last_iter(runner)):
|
| 163 |
+
runner.logger.info(
|
| 164 |
+
f'Saving checkpoint at {runner.iter + 1} iterations')
|
| 165 |
+
if self.sync_buffer:
|
| 166 |
+
allreduce_params(runner.model.buffers())
|
| 167 |
+
self._save_checkpoint(runner)
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/closure.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class ClosureHook(Hook):
|
| 7 |
+
|
| 8 |
+
def __init__(self, fn_name, fn):
|
| 9 |
+
assert hasattr(self, fn_name)
|
| 10 |
+
assert callable(fn)
|
| 11 |
+
setattr(self, fn_name, fn)
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/evaluation.py
ADDED
|
@@ -0,0 +1,509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
from math import inf
|
| 5 |
+
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 8 |
+
from torch.utils.data import DataLoader
|
| 9 |
+
|
| 10 |
+
from annotator.uniformer.mmcv.fileio import FileClient
|
| 11 |
+
from annotator.uniformer.mmcv.utils import is_seq_of
|
| 12 |
+
from .hook import Hook
|
| 13 |
+
from .logger import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class EvalHook(Hook):
|
| 17 |
+
"""Non-Distributed evaluation hook.
|
| 18 |
+
|
| 19 |
+
This hook will regularly perform evaluation in a given interval when
|
| 20 |
+
performing in non-distributed environment.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 24 |
+
implemented ``evaluate`` function.
|
| 25 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 26 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 27 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 28 |
+
``interval``. Default: None.
|
| 29 |
+
interval (int): Evaluation interval. Default: 1.
|
| 30 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 31 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 32 |
+
Default: True.
|
| 33 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 34 |
+
the best checkpoint during evaluation. The information about best
|
| 35 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 36 |
+
best score value and best checkpoint path, which will be also
|
| 37 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 38 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 39 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 40 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 41 |
+
``OrderedDict`` result will be used. Default: None.
|
| 42 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 43 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 44 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 45 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 46 |
+
Default: None.
|
| 47 |
+
test_fn (callable, optional): test a model with samples from a
|
| 48 |
+
dataloader, and return the test results. If ``None``, the default
|
| 49 |
+
test function ``mmcv.engine.single_gpu_test`` will be used.
|
| 50 |
+
(default: ``None``)
|
| 51 |
+
greater_keys (List[str] | None, optional): Metric keys that will be
|
| 52 |
+
inferred by 'greater' comparison rule. If ``None``,
|
| 53 |
+
_default_greater_keys will be used. (default: ``None``)
|
| 54 |
+
less_keys (List[str] | None, optional): Metric keys that will be
|
| 55 |
+
inferred by 'less' comparison rule. If ``None``, _default_less_keys
|
| 56 |
+
will be used. (default: ``None``)
|
| 57 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 58 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 59 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 60 |
+
level directory of `runner.work_dir`.
|
| 61 |
+
`New in version 1.3.16.`
|
| 62 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 63 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 64 |
+
`New in version 1.3.16.`
|
| 65 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 66 |
+
the dataset.
|
| 67 |
+
|
| 68 |
+
Notes:
|
| 69 |
+
If new arguments are added for EvalHook, tools/test.py,
|
| 70 |
+
tools/eval_metric.py may be affected.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
# Since the key for determine greater or less is related to the downstream
|
| 74 |
+
# tasks, downstream repos may need to overwrite the following inner
|
| 75 |
+
# variable accordingly.
|
| 76 |
+
|
| 77 |
+
rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
|
| 78 |
+
init_value_map = {'greater': -inf, 'less': inf}
|
| 79 |
+
_default_greater_keys = [
|
| 80 |
+
'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU',
|
| 81 |
+
'mAcc', 'aAcc'
|
| 82 |
+
]
|
| 83 |
+
_default_less_keys = ['loss']
|
| 84 |
+
|
| 85 |
+
def __init__(self,
|
| 86 |
+
dataloader,
|
| 87 |
+
start=None,
|
| 88 |
+
interval=1,
|
| 89 |
+
by_epoch=True,
|
| 90 |
+
save_best=None,
|
| 91 |
+
rule=None,
|
| 92 |
+
test_fn=None,
|
| 93 |
+
greater_keys=None,
|
| 94 |
+
less_keys=None,
|
| 95 |
+
out_dir=None,
|
| 96 |
+
file_client_args=None,
|
| 97 |
+
**eval_kwargs):
|
| 98 |
+
if not isinstance(dataloader, DataLoader):
|
| 99 |
+
raise TypeError(f'dataloader must be a pytorch DataLoader, '
|
| 100 |
+
f'but got {type(dataloader)}')
|
| 101 |
+
|
| 102 |
+
if interval <= 0:
|
| 103 |
+
raise ValueError(f'interval must be a positive number, '
|
| 104 |
+
f'but got {interval}')
|
| 105 |
+
|
| 106 |
+
assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean'
|
| 107 |
+
|
| 108 |
+
if start is not None and start < 0:
|
| 109 |
+
raise ValueError(f'The evaluation start epoch {start} is smaller '
|
| 110 |
+
f'than 0')
|
| 111 |
+
|
| 112 |
+
self.dataloader = dataloader
|
| 113 |
+
self.interval = interval
|
| 114 |
+
self.start = start
|
| 115 |
+
self.by_epoch = by_epoch
|
| 116 |
+
|
| 117 |
+
assert isinstance(save_best, str) or save_best is None, \
|
| 118 |
+
'""save_best"" should be a str or None ' \
|
| 119 |
+
f'rather than {type(save_best)}'
|
| 120 |
+
self.save_best = save_best
|
| 121 |
+
self.eval_kwargs = eval_kwargs
|
| 122 |
+
self.initial_flag = True
|
| 123 |
+
|
| 124 |
+
if test_fn is None:
|
| 125 |
+
from annotator.uniformer.mmcv.engine import single_gpu_test
|
| 126 |
+
self.test_fn = single_gpu_test
|
| 127 |
+
else:
|
| 128 |
+
self.test_fn = test_fn
|
| 129 |
+
|
| 130 |
+
if greater_keys is None:
|
| 131 |
+
self.greater_keys = self._default_greater_keys
|
| 132 |
+
else:
|
| 133 |
+
if not isinstance(greater_keys, (list, tuple)):
|
| 134 |
+
greater_keys = (greater_keys, )
|
| 135 |
+
assert is_seq_of(greater_keys, str)
|
| 136 |
+
self.greater_keys = greater_keys
|
| 137 |
+
|
| 138 |
+
if less_keys is None:
|
| 139 |
+
self.less_keys = self._default_less_keys
|
| 140 |
+
else:
|
| 141 |
+
if not isinstance(less_keys, (list, tuple)):
|
| 142 |
+
less_keys = (less_keys, )
|
| 143 |
+
assert is_seq_of(less_keys, str)
|
| 144 |
+
self.less_keys = less_keys
|
| 145 |
+
|
| 146 |
+
if self.save_best is not None:
|
| 147 |
+
self.best_ckpt_path = None
|
| 148 |
+
self._init_rule(rule, self.save_best)
|
| 149 |
+
|
| 150 |
+
self.out_dir = out_dir
|
| 151 |
+
self.file_client_args = file_client_args
|
| 152 |
+
|
| 153 |
+
def _init_rule(self, rule, key_indicator):
|
| 154 |
+
"""Initialize rule, key_indicator, comparison_func, and best score.
|
| 155 |
+
|
| 156 |
+
Here is the rule to determine which rule is used for key indicator
|
| 157 |
+
when the rule is not specific (note that the key indicator matching
|
| 158 |
+
is case-insensitive):
|
| 159 |
+
1. If the key indicator is in ``self.greater_keys``, the rule will be
|
| 160 |
+
specified as 'greater'.
|
| 161 |
+
2. Or if the key indicator is in ``self.less_keys``, the rule will be
|
| 162 |
+
specified as 'less'.
|
| 163 |
+
3. Or if the key indicator is equal to the substring in any one item
|
| 164 |
+
in ``self.greater_keys``, the rule will be specified as 'greater'.
|
| 165 |
+
4. Or if the key indicator is equal to the substring in any one item
|
| 166 |
+
in ``self.less_keys``, the rule will be specified as 'less'.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
rule (str | None): Comparison rule for best score.
|
| 170 |
+
key_indicator (str | None): Key indicator to determine the
|
| 171 |
+
comparison rule.
|
| 172 |
+
"""
|
| 173 |
+
if rule not in self.rule_map and rule is not None:
|
| 174 |
+
raise KeyError(f'rule must be greater, less or None, '
|
| 175 |
+
f'but got {rule}.')
|
| 176 |
+
|
| 177 |
+
if rule is None:
|
| 178 |
+
if key_indicator != 'auto':
|
| 179 |
+
# `_lc` here means we use the lower case of keys for
|
| 180 |
+
# case-insensitive matching
|
| 181 |
+
key_indicator_lc = key_indicator.lower()
|
| 182 |
+
greater_keys = [key.lower() for key in self.greater_keys]
|
| 183 |
+
less_keys = [key.lower() for key in self.less_keys]
|
| 184 |
+
|
| 185 |
+
if key_indicator_lc in greater_keys:
|
| 186 |
+
rule = 'greater'
|
| 187 |
+
elif key_indicator_lc in less_keys:
|
| 188 |
+
rule = 'less'
|
| 189 |
+
elif any(key in key_indicator_lc for key in greater_keys):
|
| 190 |
+
rule = 'greater'
|
| 191 |
+
elif any(key in key_indicator_lc for key in less_keys):
|
| 192 |
+
rule = 'less'
|
| 193 |
+
else:
|
| 194 |
+
raise ValueError(f'Cannot infer the rule for key '
|
| 195 |
+
f'{key_indicator}, thus a specific rule '
|
| 196 |
+
f'must be specified.')
|
| 197 |
+
self.rule = rule
|
| 198 |
+
self.key_indicator = key_indicator
|
| 199 |
+
if self.rule is not None:
|
| 200 |
+
self.compare_func = self.rule_map[self.rule]
|
| 201 |
+
|
| 202 |
+
def before_run(self, runner):
|
| 203 |
+
if not self.out_dir:
|
| 204 |
+
self.out_dir = runner.work_dir
|
| 205 |
+
|
| 206 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 207 |
+
self.out_dir)
|
| 208 |
+
|
| 209 |
+
# if `self.out_dir` is not equal to `runner.work_dir`, it means that
|
| 210 |
+
# `self.out_dir` is set so the final `self.out_dir` is the
|
| 211 |
+
# concatenation of `self.out_dir` and the last level directory of
|
| 212 |
+
# `runner.work_dir`
|
| 213 |
+
if self.out_dir != runner.work_dir:
|
| 214 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 215 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 216 |
+
runner.logger.info(
|
| 217 |
+
(f'The best checkpoint will be saved to {self.out_dir} by '
|
| 218 |
+
f'{self.file_client.name}'))
|
| 219 |
+
|
| 220 |
+
if self.save_best is not None:
|
| 221 |
+
if runner.meta is None:
|
| 222 |
+
warnings.warn('runner.meta is None. Creating an empty one.')
|
| 223 |
+
runner.meta = dict()
|
| 224 |
+
runner.meta.setdefault('hook_msgs', dict())
|
| 225 |
+
self.best_ckpt_path = runner.meta['hook_msgs'].get(
|
| 226 |
+
'best_ckpt', None)
|
| 227 |
+
|
| 228 |
+
def before_train_iter(self, runner):
|
| 229 |
+
"""Evaluate the model only at the start of training by iteration."""
|
| 230 |
+
if self.by_epoch or not self.initial_flag:
|
| 231 |
+
return
|
| 232 |
+
if self.start is not None and runner.iter >= self.start:
|
| 233 |
+
self.after_train_iter(runner)
|
| 234 |
+
self.initial_flag = False
|
| 235 |
+
|
| 236 |
+
def before_train_epoch(self, runner):
|
| 237 |
+
"""Evaluate the model only at the start of training by epoch."""
|
| 238 |
+
if not (self.by_epoch and self.initial_flag):
|
| 239 |
+
return
|
| 240 |
+
if self.start is not None and runner.epoch >= self.start:
|
| 241 |
+
self.after_train_epoch(runner)
|
| 242 |
+
self.initial_flag = False
|
| 243 |
+
|
| 244 |
+
def after_train_iter(self, runner):
|
| 245 |
+
"""Called after every training iter to evaluate the results."""
|
| 246 |
+
if not self.by_epoch and self._should_evaluate(runner):
|
| 247 |
+
# Because the priority of EvalHook is higher than LoggerHook, the
|
| 248 |
+
# training log and the evaluating log are mixed. Therefore,
|
| 249 |
+
# we need to dump the training log and clear it before evaluating
|
| 250 |
+
# log is generated. In addition, this problem will only appear in
|
| 251 |
+
# `IterBasedRunner` whose `self.by_epoch` is False, because
|
| 252 |
+
# `EpochBasedRunner` whose `self.by_epoch` is True calls
|
| 253 |
+
# `_do_evaluate` in `after_train_epoch` stage, and at this stage
|
| 254 |
+
# the training log has been printed, so it will not cause any
|
| 255 |
+
# problem. more details at
|
| 256 |
+
# https://github.com/open-mmlab/mmsegmentation/issues/694
|
| 257 |
+
for hook in runner._hooks:
|
| 258 |
+
if isinstance(hook, LoggerHook):
|
| 259 |
+
hook.after_train_iter(runner)
|
| 260 |
+
runner.log_buffer.clear()
|
| 261 |
+
|
| 262 |
+
self._do_evaluate(runner)
|
| 263 |
+
|
| 264 |
+
def after_train_epoch(self, runner):
|
| 265 |
+
"""Called after every training epoch to evaluate the results."""
|
| 266 |
+
if self.by_epoch and self._should_evaluate(runner):
|
| 267 |
+
self._do_evaluate(runner)
|
| 268 |
+
|
| 269 |
+
def _do_evaluate(self, runner):
|
| 270 |
+
"""perform evaluation and save ckpt."""
|
| 271 |
+
results = self.test_fn(runner.model, self.dataloader)
|
| 272 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 273 |
+
key_score = self.evaluate(runner, results)
|
| 274 |
+
# the key_score may be `None` so it needs to skip the action to save
|
| 275 |
+
# the best checkpoint
|
| 276 |
+
if self.save_best and key_score:
|
| 277 |
+
self._save_ckpt(runner, key_score)
|
| 278 |
+
|
| 279 |
+
def _should_evaluate(self, runner):
|
| 280 |
+
"""Judge whether to perform evaluation.
|
| 281 |
+
|
| 282 |
+
Here is the rule to judge whether to perform evaluation:
|
| 283 |
+
1. It will not perform evaluation during the epoch/iteration interval,
|
| 284 |
+
which is determined by ``self.interval``.
|
| 285 |
+
2. It will not perform evaluation if the start time is larger than
|
| 286 |
+
current time.
|
| 287 |
+
3. It will not perform evaluation when current time is larger than
|
| 288 |
+
the start time but during epoch/iteration interval.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
bool: The flag indicating whether to perform evaluation.
|
| 292 |
+
"""
|
| 293 |
+
if self.by_epoch:
|
| 294 |
+
current = runner.epoch
|
| 295 |
+
check_time = self.every_n_epochs
|
| 296 |
+
else:
|
| 297 |
+
current = runner.iter
|
| 298 |
+
check_time = self.every_n_iters
|
| 299 |
+
|
| 300 |
+
if self.start is None:
|
| 301 |
+
if not check_time(runner, self.interval):
|
| 302 |
+
# No evaluation during the interval.
|
| 303 |
+
return False
|
| 304 |
+
elif (current + 1) < self.start:
|
| 305 |
+
# No evaluation if start is larger than the current time.
|
| 306 |
+
return False
|
| 307 |
+
else:
|
| 308 |
+
# Evaluation only at epochs/iters 3, 5, 7...
|
| 309 |
+
# if start==3 and interval==2
|
| 310 |
+
if (current + 1 - self.start) % self.interval:
|
| 311 |
+
return False
|
| 312 |
+
return True
|
| 313 |
+
|
| 314 |
+
def _save_ckpt(self, runner, key_score):
|
| 315 |
+
"""Save the best checkpoint.
|
| 316 |
+
|
| 317 |
+
It will compare the score according to the compare function, write
|
| 318 |
+
related information (best score, best checkpoint path) and save the
|
| 319 |
+
best checkpoint into ``work_dir``.
|
| 320 |
+
"""
|
| 321 |
+
if self.by_epoch:
|
| 322 |
+
current = f'epoch_{runner.epoch + 1}'
|
| 323 |
+
cur_type, cur_time = 'epoch', runner.epoch + 1
|
| 324 |
+
else:
|
| 325 |
+
current = f'iter_{runner.iter + 1}'
|
| 326 |
+
cur_type, cur_time = 'iter', runner.iter + 1
|
| 327 |
+
|
| 328 |
+
best_score = runner.meta['hook_msgs'].get(
|
| 329 |
+
'best_score', self.init_value_map[self.rule])
|
| 330 |
+
if self.compare_func(key_score, best_score):
|
| 331 |
+
best_score = key_score
|
| 332 |
+
runner.meta['hook_msgs']['best_score'] = best_score
|
| 333 |
+
|
| 334 |
+
if self.best_ckpt_path and self.file_client.isfile(
|
| 335 |
+
self.best_ckpt_path):
|
| 336 |
+
self.file_client.remove(self.best_ckpt_path)
|
| 337 |
+
runner.logger.info(
|
| 338 |
+
(f'The previous best checkpoint {self.best_ckpt_path} was '
|
| 339 |
+
'removed'))
|
| 340 |
+
|
| 341 |
+
best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
|
| 342 |
+
self.best_ckpt_path = self.file_client.join_path(
|
| 343 |
+
self.out_dir, best_ckpt_name)
|
| 344 |
+
runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
|
| 345 |
+
|
| 346 |
+
runner.save_checkpoint(
|
| 347 |
+
self.out_dir, best_ckpt_name, create_symlink=False)
|
| 348 |
+
runner.logger.info(
|
| 349 |
+
f'Now best checkpoint is saved as {best_ckpt_name}.')
|
| 350 |
+
runner.logger.info(
|
| 351 |
+
f'Best {self.key_indicator} is {best_score:0.4f} '
|
| 352 |
+
f'at {cur_time} {cur_type}.')
|
| 353 |
+
|
| 354 |
+
def evaluate(self, runner, results):
|
| 355 |
+
"""Evaluate the results.
|
| 356 |
+
|
| 357 |
+
Args:
|
| 358 |
+
runner (:obj:`mmcv.Runner`): The underlined training runner.
|
| 359 |
+
results (list): Output results.
|
| 360 |
+
"""
|
| 361 |
+
eval_res = self.dataloader.dataset.evaluate(
|
| 362 |
+
results, logger=runner.logger, **self.eval_kwargs)
|
| 363 |
+
|
| 364 |
+
for name, val in eval_res.items():
|
| 365 |
+
runner.log_buffer.output[name] = val
|
| 366 |
+
runner.log_buffer.ready = True
|
| 367 |
+
|
| 368 |
+
if self.save_best is not None:
|
| 369 |
+
# If the performance of model is pool, the `eval_res` may be an
|
| 370 |
+
# empty dict and it will raise exception when `self.save_best` is
|
| 371 |
+
# not None. More details at
|
| 372 |
+
# https://github.com/open-mmlab/mmdetection/issues/6265.
|
| 373 |
+
if not eval_res:
|
| 374 |
+
warnings.warn(
|
| 375 |
+
'Since `eval_res` is an empty dict, the behavior to save '
|
| 376 |
+
'the best checkpoint will be skipped in this evaluation.')
|
| 377 |
+
return None
|
| 378 |
+
|
| 379 |
+
if self.key_indicator == 'auto':
|
| 380 |
+
# infer from eval_results
|
| 381 |
+
self._init_rule(self.rule, list(eval_res.keys())[0])
|
| 382 |
+
return eval_res[self.key_indicator]
|
| 383 |
+
|
| 384 |
+
return None
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class DistEvalHook(EvalHook):
|
| 388 |
+
"""Distributed evaluation hook.
|
| 389 |
+
|
| 390 |
+
This hook will regularly perform evaluation in a given interval when
|
| 391 |
+
performing in distributed environment.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
dataloader (DataLoader): A PyTorch dataloader, whose dataset has
|
| 395 |
+
implemented ``evaluate`` function.
|
| 396 |
+
start (int | None, optional): Evaluation starting epoch. It enables
|
| 397 |
+
evaluation before the training starts if ``start`` <= the resuming
|
| 398 |
+
epoch. If None, whether to evaluate is merely decided by
|
| 399 |
+
``interval``. Default: None.
|
| 400 |
+
interval (int): Evaluation interval. Default: 1.
|
| 401 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 402 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 403 |
+
default: True.
|
| 404 |
+
save_best (str, optional): If a metric is specified, it would measure
|
| 405 |
+
the best checkpoint during evaluation. The information about best
|
| 406 |
+
checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
|
| 407 |
+
best score value and best checkpoint path, which will be also
|
| 408 |
+
loaded when resume checkpoint. Options are the evaluation metrics
|
| 409 |
+
on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
|
| 410 |
+
detection and instance segmentation. ``AR@100`` for proposal
|
| 411 |
+
recall. If ``save_best`` is ``auto``, the first key of the returned
|
| 412 |
+
``OrderedDict`` result will be used. Default: None.
|
| 413 |
+
rule (str | None, optional): Comparison rule for best score. If set to
|
| 414 |
+
None, it will infer a reasonable rule. Keys such as 'acc', 'top'
|
| 415 |
+
.etc will be inferred by 'greater' rule. Keys contain 'loss' will
|
| 416 |
+
be inferred by 'less' rule. Options are 'greater', 'less', None.
|
| 417 |
+
Default: None.
|
| 418 |
+
test_fn (callable, optional): test a model with samples from a
|
| 419 |
+
dataloader in a multi-gpu manner, and return the test results. If
|
| 420 |
+
``None``, the default test function ``mmcv.engine.multi_gpu_test``
|
| 421 |
+
will be used. (default: ``None``)
|
| 422 |
+
tmpdir (str | None): Temporary directory to save the results of all
|
| 423 |
+
processes. Default: None.
|
| 424 |
+
gpu_collect (bool): Whether to use gpu or cpu to collect results.
|
| 425 |
+
Default: False.
|
| 426 |
+
broadcast_bn_buffer (bool): Whether to broadcast the
|
| 427 |
+
buffer(running_mean and running_var) of rank 0 to other rank
|
| 428 |
+
before evaluation. Default: True.
|
| 429 |
+
out_dir (str, optional): The root directory to save checkpoints. If not
|
| 430 |
+
specified, `runner.work_dir` will be used by default. If specified,
|
| 431 |
+
the `out_dir` will be the concatenation of `out_dir` and the last
|
| 432 |
+
level directory of `runner.work_dir`.
|
| 433 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 434 |
+
See :class:`mmcv.fileio.FileClient` for details. Default: None.
|
| 435 |
+
**eval_kwargs: Evaluation arguments fed into the evaluate function of
|
| 436 |
+
the dataset.
|
| 437 |
+
"""
|
| 438 |
+
|
| 439 |
+
def __init__(self,
|
| 440 |
+
dataloader,
|
| 441 |
+
start=None,
|
| 442 |
+
interval=1,
|
| 443 |
+
by_epoch=True,
|
| 444 |
+
save_best=None,
|
| 445 |
+
rule=None,
|
| 446 |
+
test_fn=None,
|
| 447 |
+
greater_keys=None,
|
| 448 |
+
less_keys=None,
|
| 449 |
+
broadcast_bn_buffer=True,
|
| 450 |
+
tmpdir=None,
|
| 451 |
+
gpu_collect=False,
|
| 452 |
+
out_dir=None,
|
| 453 |
+
file_client_args=None,
|
| 454 |
+
**eval_kwargs):
|
| 455 |
+
|
| 456 |
+
if test_fn is None:
|
| 457 |
+
from annotator.uniformer.mmcv.engine import multi_gpu_test
|
| 458 |
+
test_fn = multi_gpu_test
|
| 459 |
+
|
| 460 |
+
super().__init__(
|
| 461 |
+
dataloader,
|
| 462 |
+
start=start,
|
| 463 |
+
interval=interval,
|
| 464 |
+
by_epoch=by_epoch,
|
| 465 |
+
save_best=save_best,
|
| 466 |
+
rule=rule,
|
| 467 |
+
test_fn=test_fn,
|
| 468 |
+
greater_keys=greater_keys,
|
| 469 |
+
less_keys=less_keys,
|
| 470 |
+
out_dir=out_dir,
|
| 471 |
+
file_client_args=file_client_args,
|
| 472 |
+
**eval_kwargs)
|
| 473 |
+
|
| 474 |
+
self.broadcast_bn_buffer = broadcast_bn_buffer
|
| 475 |
+
self.tmpdir = tmpdir
|
| 476 |
+
self.gpu_collect = gpu_collect
|
| 477 |
+
|
| 478 |
+
def _do_evaluate(self, runner):
|
| 479 |
+
"""perform evaluation and save ckpt."""
|
| 480 |
+
# Synchronization of BatchNorm's buffer (running_mean
|
| 481 |
+
# and running_var) is not supported in the DDP of pytorch,
|
| 482 |
+
# which may cause the inconsistent performance of models in
|
| 483 |
+
# different ranks, so we broadcast BatchNorm's buffers
|
| 484 |
+
# of rank 0 to other ranks to avoid this.
|
| 485 |
+
if self.broadcast_bn_buffer:
|
| 486 |
+
model = runner.model
|
| 487 |
+
for name, module in model.named_modules():
|
| 488 |
+
if isinstance(module,
|
| 489 |
+
_BatchNorm) and module.track_running_stats:
|
| 490 |
+
dist.broadcast(module.running_var, 0)
|
| 491 |
+
dist.broadcast(module.running_mean, 0)
|
| 492 |
+
|
| 493 |
+
tmpdir = self.tmpdir
|
| 494 |
+
if tmpdir is None:
|
| 495 |
+
tmpdir = osp.join(runner.work_dir, '.eval_hook')
|
| 496 |
+
|
| 497 |
+
results = self.test_fn(
|
| 498 |
+
runner.model,
|
| 499 |
+
self.dataloader,
|
| 500 |
+
tmpdir=tmpdir,
|
| 501 |
+
gpu_collect=self.gpu_collect)
|
| 502 |
+
if runner.rank == 0:
|
| 503 |
+
print('\n')
|
| 504 |
+
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
|
| 505 |
+
key_score = self.evaluate(runner, results)
|
| 506 |
+
# the key_score may be `None` so it needs to skip the action to
|
| 507 |
+
# save the best checkpoint
|
| 508 |
+
if self.save_best and key_score:
|
| 509 |
+
self._save_ckpt(runner, key_score)
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/hook.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from annotator.uniformer.mmcv.utils import Registry, is_method_overridden
|
| 3 |
+
|
| 4 |
+
HOOKS = Registry('hook')
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Hook:
|
| 8 |
+
stages = ('before_run', 'before_train_epoch', 'before_train_iter',
|
| 9 |
+
'after_train_iter', 'after_train_epoch', 'before_val_epoch',
|
| 10 |
+
'before_val_iter', 'after_val_iter', 'after_val_epoch',
|
| 11 |
+
'after_run')
|
| 12 |
+
|
| 13 |
+
def before_run(self, runner):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
def after_run(self, runner):
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
def before_epoch(self, runner):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
def after_epoch(self, runner):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def before_iter(self, runner):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
def after_iter(self, runner):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
def before_train_epoch(self, runner):
|
| 32 |
+
self.before_epoch(runner)
|
| 33 |
+
|
| 34 |
+
def before_val_epoch(self, runner):
|
| 35 |
+
self.before_epoch(runner)
|
| 36 |
+
|
| 37 |
+
def after_train_epoch(self, runner):
|
| 38 |
+
self.after_epoch(runner)
|
| 39 |
+
|
| 40 |
+
def after_val_epoch(self, runner):
|
| 41 |
+
self.after_epoch(runner)
|
| 42 |
+
|
| 43 |
+
def before_train_iter(self, runner):
|
| 44 |
+
self.before_iter(runner)
|
| 45 |
+
|
| 46 |
+
def before_val_iter(self, runner):
|
| 47 |
+
self.before_iter(runner)
|
| 48 |
+
|
| 49 |
+
def after_train_iter(self, runner):
|
| 50 |
+
self.after_iter(runner)
|
| 51 |
+
|
| 52 |
+
def after_val_iter(self, runner):
|
| 53 |
+
self.after_iter(runner)
|
| 54 |
+
|
| 55 |
+
def every_n_epochs(self, runner, n):
|
| 56 |
+
return (runner.epoch + 1) % n == 0 if n > 0 else False
|
| 57 |
+
|
| 58 |
+
def every_n_inner_iters(self, runner, n):
|
| 59 |
+
return (runner.inner_iter + 1) % n == 0 if n > 0 else False
|
| 60 |
+
|
| 61 |
+
def every_n_iters(self, runner, n):
|
| 62 |
+
return (runner.iter + 1) % n == 0 if n > 0 else False
|
| 63 |
+
|
| 64 |
+
def end_of_epoch(self, runner):
|
| 65 |
+
return runner.inner_iter + 1 == len(runner.data_loader)
|
| 66 |
+
|
| 67 |
+
def is_last_epoch(self, runner):
|
| 68 |
+
return runner.epoch + 1 == runner._max_epochs
|
| 69 |
+
|
| 70 |
+
def is_last_iter(self, runner):
|
| 71 |
+
return runner.iter + 1 == runner._max_iters
|
| 72 |
+
|
| 73 |
+
def get_triggered_stages(self):
|
| 74 |
+
trigger_stages = set()
|
| 75 |
+
for stage in Hook.stages:
|
| 76 |
+
if is_method_overridden(stage, Hook, self):
|
| 77 |
+
trigger_stages.add(stage)
|
| 78 |
+
|
| 79 |
+
# some methods will be triggered in multi stages
|
| 80 |
+
# use this dict to map method to stages.
|
| 81 |
+
method_stages_map = {
|
| 82 |
+
'before_epoch': ['before_train_epoch', 'before_val_epoch'],
|
| 83 |
+
'after_epoch': ['after_train_epoch', 'after_val_epoch'],
|
| 84 |
+
'before_iter': ['before_train_iter', 'before_val_iter'],
|
| 85 |
+
'after_iter': ['after_train_iter', 'after_val_iter'],
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
for method, map_stages in method_stages_map.items():
|
| 89 |
+
if is_method_overridden(method, Hook, self):
|
| 90 |
+
trigger_stages.update(map_stages)
|
| 91 |
+
|
| 92 |
+
return [stage for stage in Hook.stages if stage in trigger_stages]
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .base import LoggerHook
|
| 3 |
+
from .dvclive import DvcliveLoggerHook
|
| 4 |
+
from .mlflow import MlflowLoggerHook
|
| 5 |
+
from .neptune import NeptuneLoggerHook
|
| 6 |
+
from .pavi import PaviLoggerHook
|
| 7 |
+
from .tensorboard import TensorboardLoggerHook
|
| 8 |
+
from .text import TextLoggerHook
|
| 9 |
+
from .wandb import WandbLoggerHook
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
|
| 13 |
+
'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
|
| 14 |
+
'NeptuneLoggerHook', 'DvcliveLoggerHook'
|
| 15 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/base.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
from abc import ABCMeta, abstractmethod
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from ..hook import Hook
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class LoggerHook(Hook):
|
| 12 |
+
"""Base class for logger hooks.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 17 |
+
if less than `interval`.
|
| 18 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 19 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
__metaclass__ = ABCMeta
|
| 23 |
+
|
| 24 |
+
def __init__(self,
|
| 25 |
+
interval=10,
|
| 26 |
+
ignore_last=True,
|
| 27 |
+
reset_flag=False,
|
| 28 |
+
by_epoch=True):
|
| 29 |
+
self.interval = interval
|
| 30 |
+
self.ignore_last = ignore_last
|
| 31 |
+
self.reset_flag = reset_flag
|
| 32 |
+
self.by_epoch = by_epoch
|
| 33 |
+
|
| 34 |
+
@abstractmethod
|
| 35 |
+
def log(self, runner):
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def is_scalar(val, include_np=True, include_torch=True):
|
| 40 |
+
"""Tell the input variable is a scalar or not.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
val: Input variable.
|
| 44 |
+
include_np (bool): Whether include 0-d np.ndarray as a scalar.
|
| 45 |
+
include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
bool: True or False.
|
| 49 |
+
"""
|
| 50 |
+
if isinstance(val, numbers.Number):
|
| 51 |
+
return True
|
| 52 |
+
elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
|
| 53 |
+
return True
|
| 54 |
+
elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
|
| 55 |
+
return True
|
| 56 |
+
else:
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
def get_mode(self, runner):
|
| 60 |
+
if runner.mode == 'train':
|
| 61 |
+
if 'time' in runner.log_buffer.output:
|
| 62 |
+
mode = 'train'
|
| 63 |
+
else:
|
| 64 |
+
mode = 'val'
|
| 65 |
+
elif runner.mode == 'val':
|
| 66 |
+
mode = 'val'
|
| 67 |
+
else:
|
| 68 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 69 |
+
f'but got {runner.mode}')
|
| 70 |
+
return mode
|
| 71 |
+
|
| 72 |
+
def get_epoch(self, runner):
|
| 73 |
+
if runner.mode == 'train':
|
| 74 |
+
epoch = runner.epoch + 1
|
| 75 |
+
elif runner.mode == 'val':
|
| 76 |
+
# normal val mode
|
| 77 |
+
# runner.epoch += 1 has been done before val workflow
|
| 78 |
+
epoch = runner.epoch
|
| 79 |
+
else:
|
| 80 |
+
raise ValueError(f"runner mode should be 'train' or 'val', "
|
| 81 |
+
f'but got {runner.mode}')
|
| 82 |
+
return epoch
|
| 83 |
+
|
| 84 |
+
def get_iter(self, runner, inner_iter=False):
|
| 85 |
+
"""Get the current training iteration step."""
|
| 86 |
+
if self.by_epoch and inner_iter:
|
| 87 |
+
current_iter = runner.inner_iter + 1
|
| 88 |
+
else:
|
| 89 |
+
current_iter = runner.iter + 1
|
| 90 |
+
return current_iter
|
| 91 |
+
|
| 92 |
+
def get_lr_tags(self, runner):
|
| 93 |
+
tags = {}
|
| 94 |
+
lrs = runner.current_lr()
|
| 95 |
+
if isinstance(lrs, dict):
|
| 96 |
+
for name, value in lrs.items():
|
| 97 |
+
tags[f'learning_rate/{name}'] = value[0]
|
| 98 |
+
else:
|
| 99 |
+
tags['learning_rate'] = lrs[0]
|
| 100 |
+
return tags
|
| 101 |
+
|
| 102 |
+
def get_momentum_tags(self, runner):
|
| 103 |
+
tags = {}
|
| 104 |
+
momentums = runner.current_momentum()
|
| 105 |
+
if isinstance(momentums, dict):
|
| 106 |
+
for name, value in momentums.items():
|
| 107 |
+
tags[f'momentum/{name}'] = value[0]
|
| 108 |
+
else:
|
| 109 |
+
tags['momentum'] = momentums[0]
|
| 110 |
+
return tags
|
| 111 |
+
|
| 112 |
+
def get_loggable_tags(self,
|
| 113 |
+
runner,
|
| 114 |
+
allow_scalar=True,
|
| 115 |
+
allow_text=False,
|
| 116 |
+
add_mode=True,
|
| 117 |
+
tags_to_skip=('time', 'data_time')):
|
| 118 |
+
tags = {}
|
| 119 |
+
for var, val in runner.log_buffer.output.items():
|
| 120 |
+
if var in tags_to_skip:
|
| 121 |
+
continue
|
| 122 |
+
if self.is_scalar(val) and not allow_scalar:
|
| 123 |
+
continue
|
| 124 |
+
if isinstance(val, str) and not allow_text:
|
| 125 |
+
continue
|
| 126 |
+
if add_mode:
|
| 127 |
+
var = f'{self.get_mode(runner)}/{var}'
|
| 128 |
+
tags[var] = val
|
| 129 |
+
tags.update(self.get_lr_tags(runner))
|
| 130 |
+
tags.update(self.get_momentum_tags(runner))
|
| 131 |
+
return tags
|
| 132 |
+
|
| 133 |
+
def before_run(self, runner):
|
| 134 |
+
for hook in runner.hooks[::-1]:
|
| 135 |
+
if isinstance(hook, LoggerHook):
|
| 136 |
+
hook.reset_flag = True
|
| 137 |
+
break
|
| 138 |
+
|
| 139 |
+
def before_epoch(self, runner):
|
| 140 |
+
runner.log_buffer.clear() # clear logs of last epoch
|
| 141 |
+
|
| 142 |
+
def after_train_iter(self, runner):
|
| 143 |
+
if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
|
| 144 |
+
runner.log_buffer.average(self.interval)
|
| 145 |
+
elif not self.by_epoch and self.every_n_iters(runner, self.interval):
|
| 146 |
+
runner.log_buffer.average(self.interval)
|
| 147 |
+
elif self.end_of_epoch(runner) and not self.ignore_last:
|
| 148 |
+
# not precise but more stable
|
| 149 |
+
runner.log_buffer.average(self.interval)
|
| 150 |
+
|
| 151 |
+
if runner.log_buffer.ready:
|
| 152 |
+
self.log(runner)
|
| 153 |
+
if self.reset_flag:
|
| 154 |
+
runner.log_buffer.clear_output()
|
| 155 |
+
|
| 156 |
+
def after_train_epoch(self, runner):
|
| 157 |
+
if runner.log_buffer.ready:
|
| 158 |
+
self.log(runner)
|
| 159 |
+
if self.reset_flag:
|
| 160 |
+
runner.log_buffer.clear_output()
|
| 161 |
+
|
| 162 |
+
def after_val_epoch(self, runner):
|
| 163 |
+
runner.log_buffer.average()
|
| 164 |
+
self.log(runner)
|
| 165 |
+
if self.reset_flag:
|
| 166 |
+
runner.log_buffer.clear_output()
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class DvcliveLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics with dvclive.
|
| 10 |
+
|
| 11 |
+
It requires `dvclive`_ to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
path (str): Directory where dvclive will write TSV log files.
|
| 15 |
+
interval (int): Logging interval (every k iterations).
|
| 16 |
+
Default 10.
|
| 17 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 18 |
+
if less than `interval`.
|
| 19 |
+
Default: True.
|
| 20 |
+
reset_flag (bool): Whether to clear the output buffer after logging.
|
| 21 |
+
Default: True.
|
| 22 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 23 |
+
Default: True.
|
| 24 |
+
|
| 25 |
+
.. _dvclive:
|
| 26 |
+
https://dvc.org/doc/dvclive
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
path,
|
| 31 |
+
interval=10,
|
| 32 |
+
ignore_last=True,
|
| 33 |
+
reset_flag=True,
|
| 34 |
+
by_epoch=True):
|
| 35 |
+
|
| 36 |
+
super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
|
| 37 |
+
reset_flag, by_epoch)
|
| 38 |
+
self.path = path
|
| 39 |
+
self.import_dvclive()
|
| 40 |
+
|
| 41 |
+
def import_dvclive(self):
|
| 42 |
+
try:
|
| 43 |
+
import dvclive
|
| 44 |
+
except ImportError:
|
| 45 |
+
raise ImportError(
|
| 46 |
+
'Please run "pip install dvclive" to install dvclive')
|
| 47 |
+
self.dvclive = dvclive
|
| 48 |
+
|
| 49 |
+
@master_only
|
| 50 |
+
def before_run(self, runner):
|
| 51 |
+
self.dvclive.init(self.path)
|
| 52 |
+
|
| 53 |
+
@master_only
|
| 54 |
+
def log(self, runner):
|
| 55 |
+
tags = self.get_loggable_tags(runner)
|
| 56 |
+
if tags:
|
| 57 |
+
for k, v in tags.items():
|
| 58 |
+
self.dvclive.log(k, v, step=self.get_iter(runner))
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class MlflowLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
exp_name=None,
|
| 12 |
+
tags=None,
|
| 13 |
+
log_model=True,
|
| 14 |
+
interval=10,
|
| 15 |
+
ignore_last=True,
|
| 16 |
+
reset_flag=False,
|
| 17 |
+
by_epoch=True):
|
| 18 |
+
"""Class to log metrics and (optionally) a trained model to MLflow.
|
| 19 |
+
|
| 20 |
+
It requires `MLflow`_ to be installed.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
exp_name (str, optional): Name of the experiment to be used.
|
| 24 |
+
Default None.
|
| 25 |
+
If not None, set the active experiment.
|
| 26 |
+
If experiment does not exist, an experiment with provided name
|
| 27 |
+
will be created.
|
| 28 |
+
tags (dict of str: str, optional): Tags for the current run.
|
| 29 |
+
Default None.
|
| 30 |
+
If not None, set tags for the current run.
|
| 31 |
+
log_model (bool, optional): Whether to log an MLflow artifact.
|
| 32 |
+
Default True.
|
| 33 |
+
If True, log runner.model as an MLflow artifact
|
| 34 |
+
for the current run.
|
| 35 |
+
interval (int): Logging interval (every k iterations).
|
| 36 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 37 |
+
if less than `interval`.
|
| 38 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 39 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 40 |
+
|
| 41 |
+
.. _MLflow:
|
| 42 |
+
https://www.mlflow.org/docs/latest/index.html
|
| 43 |
+
"""
|
| 44 |
+
super(MlflowLoggerHook, self).__init__(interval, ignore_last,
|
| 45 |
+
reset_flag, by_epoch)
|
| 46 |
+
self.import_mlflow()
|
| 47 |
+
self.exp_name = exp_name
|
| 48 |
+
self.tags = tags
|
| 49 |
+
self.log_model = log_model
|
| 50 |
+
|
| 51 |
+
def import_mlflow(self):
|
| 52 |
+
try:
|
| 53 |
+
import mlflow
|
| 54 |
+
import mlflow.pytorch as mlflow_pytorch
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install mlflow" to install mlflow')
|
| 58 |
+
self.mlflow = mlflow
|
| 59 |
+
self.mlflow_pytorch = mlflow_pytorch
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
super(MlflowLoggerHook, self).before_run(runner)
|
| 64 |
+
if self.exp_name is not None:
|
| 65 |
+
self.mlflow.set_experiment(self.exp_name)
|
| 66 |
+
if self.tags is not None:
|
| 67 |
+
self.mlflow.set_tags(self.tags)
|
| 68 |
+
|
| 69 |
+
@master_only
|
| 70 |
+
def log(self, runner):
|
| 71 |
+
tags = self.get_loggable_tags(runner)
|
| 72 |
+
if tags:
|
| 73 |
+
self.mlflow.log_metrics(tags, step=self.get_iter(runner))
|
| 74 |
+
|
| 75 |
+
@master_only
|
| 76 |
+
def after_run(self, runner):
|
| 77 |
+
if self.log_model:
|
| 78 |
+
self.mlflow_pytorch.log_model(runner.model, 'models')
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class NeptuneLoggerHook(LoggerHook):
|
| 9 |
+
"""Class to log metrics to NeptuneAI.
|
| 10 |
+
|
| 11 |
+
It requires `neptune-client` to be installed.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
init_kwargs (dict): a dict contains the initialization keys as below:
|
| 15 |
+
- project (str): Name of a project in a form of
|
| 16 |
+
namespace/project_name. If None, the value of
|
| 17 |
+
NEPTUNE_PROJECT environment variable will be taken.
|
| 18 |
+
- api_token (str): User’s API token.
|
| 19 |
+
If None, the value of NEPTUNE_API_TOKEN environment
|
| 20 |
+
variable will be taken. Note: It is strongly recommended
|
| 21 |
+
to use NEPTUNE_API_TOKEN environment variable rather than
|
| 22 |
+
placing your API token in plain text in your source code.
|
| 23 |
+
- name (str, optional, default is 'Untitled'): Editable name of
|
| 24 |
+
the run. Name is displayed in the run's Details and in
|
| 25 |
+
Runs table as a column.
|
| 26 |
+
Check https://docs.neptune.ai/api-reference/neptune#init for
|
| 27 |
+
more init arguments.
|
| 28 |
+
interval (int): Logging interval (every k iterations).
|
| 29 |
+
ignore_last (bool): Ignore the log of last iterations in each epoch
|
| 30 |
+
if less than `interval`.
|
| 31 |
+
reset_flag (bool): Whether to clear the output buffer after logging
|
| 32 |
+
by_epoch (bool): Whether EpochBasedRunner is used.
|
| 33 |
+
|
| 34 |
+
.. _NeptuneAI:
|
| 35 |
+
https://docs.neptune.ai/you-should-know/logging-metadata
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self,
|
| 39 |
+
init_kwargs=None,
|
| 40 |
+
interval=10,
|
| 41 |
+
ignore_last=True,
|
| 42 |
+
reset_flag=True,
|
| 43 |
+
with_step=True,
|
| 44 |
+
by_epoch=True):
|
| 45 |
+
|
| 46 |
+
super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
|
| 47 |
+
reset_flag, by_epoch)
|
| 48 |
+
self.import_neptune()
|
| 49 |
+
self.init_kwargs = init_kwargs
|
| 50 |
+
self.with_step = with_step
|
| 51 |
+
|
| 52 |
+
def import_neptune(self):
|
| 53 |
+
try:
|
| 54 |
+
import neptune.new as neptune
|
| 55 |
+
except ImportError:
|
| 56 |
+
raise ImportError(
|
| 57 |
+
'Please run "pip install neptune-client" to install neptune')
|
| 58 |
+
self.neptune = neptune
|
| 59 |
+
self.run = None
|
| 60 |
+
|
| 61 |
+
@master_only
|
| 62 |
+
def before_run(self, runner):
|
| 63 |
+
if self.init_kwargs:
|
| 64 |
+
self.run = self.neptune.init(**self.init_kwargs)
|
| 65 |
+
else:
|
| 66 |
+
self.run = self.neptune.init()
|
| 67 |
+
|
| 68 |
+
@master_only
|
| 69 |
+
def log(self, runner):
|
| 70 |
+
tags = self.get_loggable_tags(runner)
|
| 71 |
+
if tags:
|
| 72 |
+
for tag_name, tag_value in tags.items():
|
| 73 |
+
if self.with_step:
|
| 74 |
+
self.run[tag_name].log(
|
| 75 |
+
tag_value, step=self.get_iter(runner))
|
| 76 |
+
else:
|
| 77 |
+
tags['global_step'] = self.get_iter(runner)
|
| 78 |
+
self.run[tag_name].log(tags)
|
| 79 |
+
|
| 80 |
+
@master_only
|
| 81 |
+
def after_run(self, runner):
|
| 82 |
+
self.run.stop()
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import yaml
|
| 8 |
+
|
| 9 |
+
import annotator.uniformer.mmcv as mmcv
|
| 10 |
+
from ....parallel.utils import is_module_wrapper
|
| 11 |
+
from ...dist_utils import master_only
|
| 12 |
+
from ..hook import HOOKS
|
| 13 |
+
from .base import LoggerHook
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@HOOKS.register_module()
|
| 17 |
+
class PaviLoggerHook(LoggerHook):
|
| 18 |
+
|
| 19 |
+
def __init__(self,
|
| 20 |
+
init_kwargs=None,
|
| 21 |
+
add_graph=False,
|
| 22 |
+
add_last_ckpt=False,
|
| 23 |
+
interval=10,
|
| 24 |
+
ignore_last=True,
|
| 25 |
+
reset_flag=False,
|
| 26 |
+
by_epoch=True,
|
| 27 |
+
img_key='img_info'):
|
| 28 |
+
super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 29 |
+
by_epoch)
|
| 30 |
+
self.init_kwargs = init_kwargs
|
| 31 |
+
self.add_graph = add_graph
|
| 32 |
+
self.add_last_ckpt = add_last_ckpt
|
| 33 |
+
self.img_key = img_key
|
| 34 |
+
|
| 35 |
+
@master_only
|
| 36 |
+
def before_run(self, runner):
|
| 37 |
+
super(PaviLoggerHook, self).before_run(runner)
|
| 38 |
+
try:
|
| 39 |
+
from pavi import SummaryWriter
|
| 40 |
+
except ImportError:
|
| 41 |
+
raise ImportError('Please run "pip install pavi" to install pavi.')
|
| 42 |
+
|
| 43 |
+
self.run_name = runner.work_dir.split('/')[-1]
|
| 44 |
+
|
| 45 |
+
if not self.init_kwargs:
|
| 46 |
+
self.init_kwargs = dict()
|
| 47 |
+
self.init_kwargs['name'] = self.run_name
|
| 48 |
+
self.init_kwargs['model'] = runner._model_name
|
| 49 |
+
if runner.meta is not None:
|
| 50 |
+
if 'config_dict' in runner.meta:
|
| 51 |
+
config_dict = runner.meta['config_dict']
|
| 52 |
+
assert isinstance(
|
| 53 |
+
config_dict,
|
| 54 |
+
dict), ('meta["config_dict"] has to be of a dict, '
|
| 55 |
+
f'but got {type(config_dict)}')
|
| 56 |
+
elif 'config_file' in runner.meta:
|
| 57 |
+
config_file = runner.meta['config_file']
|
| 58 |
+
config_dict = dict(mmcv.Config.fromfile(config_file))
|
| 59 |
+
else:
|
| 60 |
+
config_dict = None
|
| 61 |
+
if config_dict is not None:
|
| 62 |
+
# 'max_.*iter' is parsed in pavi sdk as the maximum iterations
|
| 63 |
+
# to properly set up the progress bar.
|
| 64 |
+
config_dict = config_dict.copy()
|
| 65 |
+
config_dict.setdefault('max_iter', runner.max_iters)
|
| 66 |
+
# non-serializable values are first converted in
|
| 67 |
+
# mmcv.dump to json
|
| 68 |
+
config_dict = json.loads(
|
| 69 |
+
mmcv.dump(config_dict, file_format='json'))
|
| 70 |
+
session_text = yaml.dump(config_dict)
|
| 71 |
+
self.init_kwargs['session_text'] = session_text
|
| 72 |
+
self.writer = SummaryWriter(**self.init_kwargs)
|
| 73 |
+
|
| 74 |
+
def get_step(self, runner):
|
| 75 |
+
"""Get the total training step/epoch."""
|
| 76 |
+
if self.get_mode(runner) == 'val' and self.by_epoch:
|
| 77 |
+
return self.get_epoch(runner)
|
| 78 |
+
else:
|
| 79 |
+
return self.get_iter(runner)
|
| 80 |
+
|
| 81 |
+
@master_only
|
| 82 |
+
def log(self, runner):
|
| 83 |
+
tags = self.get_loggable_tags(runner, add_mode=False)
|
| 84 |
+
if tags:
|
| 85 |
+
self.writer.add_scalars(
|
| 86 |
+
self.get_mode(runner), tags, self.get_step(runner))
|
| 87 |
+
|
| 88 |
+
@master_only
|
| 89 |
+
def after_run(self, runner):
|
| 90 |
+
if self.add_last_ckpt:
|
| 91 |
+
ckpt_path = osp.join(runner.work_dir, 'latest.pth')
|
| 92 |
+
if osp.islink(ckpt_path):
|
| 93 |
+
ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path))
|
| 94 |
+
|
| 95 |
+
if osp.isfile(ckpt_path):
|
| 96 |
+
# runner.epoch += 1 has been done before `after_run`.
|
| 97 |
+
iteration = runner.epoch if self.by_epoch else runner.iter
|
| 98 |
+
return self.writer.add_snapshot_file(
|
| 99 |
+
tag=self.run_name,
|
| 100 |
+
snapshot_file_path=ckpt_path,
|
| 101 |
+
iteration=iteration)
|
| 102 |
+
|
| 103 |
+
# flush the buffer and send a task ending signal to Pavi
|
| 104 |
+
self.writer.close()
|
| 105 |
+
|
| 106 |
+
@master_only
|
| 107 |
+
def before_epoch(self, runner):
|
| 108 |
+
if runner.epoch == 0 and self.add_graph:
|
| 109 |
+
if is_module_wrapper(runner.model):
|
| 110 |
+
_model = runner.model.module
|
| 111 |
+
else:
|
| 112 |
+
_model = runner.model
|
| 113 |
+
device = next(_model.parameters()).device
|
| 114 |
+
data = next(iter(runner.data_loader))
|
| 115 |
+
image = data[self.img_key][0:1].to(device)
|
| 116 |
+
with torch.no_grad():
|
| 117 |
+
self.writer.add_graph(_model, image)
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/text.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import datetime
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
|
| 10 |
+
import annotator.uniformer.mmcv as mmcv
|
| 11 |
+
from annotator.uniformer.mmcv.fileio.file_client import FileClient
|
| 12 |
+
from annotator.uniformer.mmcv.utils import is_tuple_of, scandir
|
| 13 |
+
from ..hook import HOOKS
|
| 14 |
+
from .base import LoggerHook
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@HOOKS.register_module()
|
| 18 |
+
class TextLoggerHook(LoggerHook):
|
| 19 |
+
"""Logger hook in text.
|
| 20 |
+
|
| 21 |
+
In this logger hook, the information will be printed on terminal and
|
| 22 |
+
saved in json file.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
by_epoch (bool, optional): Whether EpochBasedRunner is used.
|
| 26 |
+
Default: True.
|
| 27 |
+
interval (int, optional): Logging interval (every k iterations).
|
| 28 |
+
Default: 10.
|
| 29 |
+
ignore_last (bool, optional): Ignore the log of last iterations in each
|
| 30 |
+
epoch if less than :attr:`interval`. Default: True.
|
| 31 |
+
reset_flag (bool, optional): Whether to clear the output buffer after
|
| 32 |
+
logging. Default: False.
|
| 33 |
+
interval_exp_name (int, optional): Logging interval for experiment
|
| 34 |
+
name. This feature is to help users conveniently get the experiment
|
| 35 |
+
information from screen or log file. Default: 1000.
|
| 36 |
+
out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.
|
| 37 |
+
If ``out_dir`` is specified, logs will be copied to a new directory
|
| 38 |
+
which is the concatenation of ``out_dir`` and the last level
|
| 39 |
+
directory of ``runner.work_dir``. Default: None.
|
| 40 |
+
`New in version 1.3.16.`
|
| 41 |
+
out_suffix (str or tuple[str], optional): Those filenames ending with
|
| 42 |
+
``out_suffix`` will be copied to ``out_dir``.
|
| 43 |
+
Default: ('.log.json', '.log', '.py').
|
| 44 |
+
`New in version 1.3.16.`
|
| 45 |
+
keep_local (bool, optional): Whether to keep local log when
|
| 46 |
+
:attr:`out_dir` is specified. If False, the local log will be
|
| 47 |
+
removed. Default: True.
|
| 48 |
+
`New in version 1.3.16.`
|
| 49 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 50 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 51 |
+
Default: None.
|
| 52 |
+
`New in version 1.3.16.`
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self,
|
| 56 |
+
by_epoch=True,
|
| 57 |
+
interval=10,
|
| 58 |
+
ignore_last=True,
|
| 59 |
+
reset_flag=False,
|
| 60 |
+
interval_exp_name=1000,
|
| 61 |
+
out_dir=None,
|
| 62 |
+
out_suffix=('.log.json', '.log', '.py'),
|
| 63 |
+
keep_local=True,
|
| 64 |
+
file_client_args=None):
|
| 65 |
+
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
|
| 66 |
+
by_epoch)
|
| 67 |
+
self.by_epoch = by_epoch
|
| 68 |
+
self.time_sec_tot = 0
|
| 69 |
+
self.interval_exp_name = interval_exp_name
|
| 70 |
+
|
| 71 |
+
if out_dir is None and file_client_args is not None:
|
| 72 |
+
raise ValueError(
|
| 73 |
+
'file_client_args should be "None" when `out_dir` is not'
|
| 74 |
+
'specified.')
|
| 75 |
+
self.out_dir = out_dir
|
| 76 |
+
|
| 77 |
+
if not (out_dir is None or isinstance(out_dir, str)
|
| 78 |
+
or is_tuple_of(out_dir, str)):
|
| 79 |
+
raise TypeError('out_dir should be "None" or string or tuple of '
|
| 80 |
+
'string, but got {out_dir}')
|
| 81 |
+
self.out_suffix = out_suffix
|
| 82 |
+
|
| 83 |
+
self.keep_local = keep_local
|
| 84 |
+
self.file_client_args = file_client_args
|
| 85 |
+
if self.out_dir is not None:
|
| 86 |
+
self.file_client = FileClient.infer_client(file_client_args,
|
| 87 |
+
self.out_dir)
|
| 88 |
+
|
| 89 |
+
def before_run(self, runner):
|
| 90 |
+
super(TextLoggerHook, self).before_run(runner)
|
| 91 |
+
|
| 92 |
+
if self.out_dir is not None:
|
| 93 |
+
self.file_client = FileClient.infer_client(self.file_client_args,
|
| 94 |
+
self.out_dir)
|
| 95 |
+
# The final `self.out_dir` is the concatenation of `self.out_dir`
|
| 96 |
+
# and the last level directory of `runner.work_dir`
|
| 97 |
+
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
|
| 98 |
+
self.out_dir = self.file_client.join_path(self.out_dir, basename)
|
| 99 |
+
runner.logger.info(
|
| 100 |
+
(f'Text logs will be saved to {self.out_dir} by '
|
| 101 |
+
f'{self.file_client.name} after the training process.'))
|
| 102 |
+
|
| 103 |
+
self.start_iter = runner.iter
|
| 104 |
+
self.json_log_path = osp.join(runner.work_dir,
|
| 105 |
+
f'{runner.timestamp}.log.json')
|
| 106 |
+
if runner.meta is not None:
|
| 107 |
+
self._dump_log(runner.meta, runner)
|
| 108 |
+
|
| 109 |
+
def _get_max_memory(self, runner):
|
| 110 |
+
device = getattr(runner.model, 'output_device', None)
|
| 111 |
+
mem = torch.cuda.max_memory_allocated(device=device)
|
| 112 |
+
mem_mb = torch.tensor([mem / (1024 * 1024)],
|
| 113 |
+
dtype=torch.int,
|
| 114 |
+
device=device)
|
| 115 |
+
if runner.world_size > 1:
|
| 116 |
+
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
|
| 117 |
+
return mem_mb.item()
|
| 118 |
+
|
| 119 |
+
def _log_info(self, log_dict, runner):
|
| 120 |
+
# print exp name for users to distinguish experiments
|
| 121 |
+
# at every ``interval_exp_name`` iterations and the end of each epoch
|
| 122 |
+
if runner.meta is not None and 'exp_name' in runner.meta:
|
| 123 |
+
if (self.every_n_iters(runner, self.interval_exp_name)) or (
|
| 124 |
+
self.by_epoch and self.end_of_epoch(runner)):
|
| 125 |
+
exp_info = f'Exp name: {runner.meta["exp_name"]}'
|
| 126 |
+
runner.logger.info(exp_info)
|
| 127 |
+
|
| 128 |
+
if log_dict['mode'] == 'train':
|
| 129 |
+
if isinstance(log_dict['lr'], dict):
|
| 130 |
+
lr_str = []
|
| 131 |
+
for k, val in log_dict['lr'].items():
|
| 132 |
+
lr_str.append(f'lr_{k}: {val:.3e}')
|
| 133 |
+
lr_str = ' '.join(lr_str)
|
| 134 |
+
else:
|
| 135 |
+
lr_str = f'lr: {log_dict["lr"]:.3e}'
|
| 136 |
+
|
| 137 |
+
# by epoch: Epoch [4][100/1000]
|
| 138 |
+
# by iter: Iter [100/100000]
|
| 139 |
+
if self.by_epoch:
|
| 140 |
+
log_str = f'Epoch [{log_dict["epoch"]}]' \
|
| 141 |
+
f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
|
| 142 |
+
else:
|
| 143 |
+
log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
|
| 144 |
+
log_str += f'{lr_str}, '
|
| 145 |
+
|
| 146 |
+
if 'time' in log_dict.keys():
|
| 147 |
+
self.time_sec_tot += (log_dict['time'] * self.interval)
|
| 148 |
+
time_sec_avg = self.time_sec_tot / (
|
| 149 |
+
runner.iter - self.start_iter + 1)
|
| 150 |
+
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
|
| 151 |
+
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
|
| 152 |
+
log_str += f'eta: {eta_str}, '
|
| 153 |
+
log_str += f'time: {log_dict["time"]:.3f}, ' \
|
| 154 |
+
f'data_time: {log_dict["data_time"]:.3f}, '
|
| 155 |
+
# statistic memory
|
| 156 |
+
if torch.cuda.is_available():
|
| 157 |
+
log_str += f'memory: {log_dict["memory"]}, '
|
| 158 |
+
else:
|
| 159 |
+
# val/test time
|
| 160 |
+
# here 1000 is the length of the val dataloader
|
| 161 |
+
# by epoch: Epoch[val] [4][1000]
|
| 162 |
+
# by iter: Iter[val] [1000]
|
| 163 |
+
if self.by_epoch:
|
| 164 |
+
log_str = f'Epoch({log_dict["mode"]}) ' \
|
| 165 |
+
f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
|
| 166 |
+
else:
|
| 167 |
+
log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
|
| 168 |
+
|
| 169 |
+
log_items = []
|
| 170 |
+
for name, val in log_dict.items():
|
| 171 |
+
# TODO: resolve this hack
|
| 172 |
+
# these items have been in log_str
|
| 173 |
+
if name in [
|
| 174 |
+
'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
|
| 175 |
+
'memory', 'epoch'
|
| 176 |
+
]:
|
| 177 |
+
continue
|
| 178 |
+
if isinstance(val, float):
|
| 179 |
+
val = f'{val:.4f}'
|
| 180 |
+
log_items.append(f'{name}: {val}')
|
| 181 |
+
log_str += ', '.join(log_items)
|
| 182 |
+
|
| 183 |
+
runner.logger.info(log_str)
|
| 184 |
+
|
| 185 |
+
def _dump_log(self, log_dict, runner):
|
| 186 |
+
# dump log in json format
|
| 187 |
+
json_log = OrderedDict()
|
| 188 |
+
for k, v in log_dict.items():
|
| 189 |
+
json_log[k] = self._round_float(v)
|
| 190 |
+
# only append log at last line
|
| 191 |
+
if runner.rank == 0:
|
| 192 |
+
with open(self.json_log_path, 'a+') as f:
|
| 193 |
+
mmcv.dump(json_log, f, file_format='json')
|
| 194 |
+
f.write('\n')
|
| 195 |
+
|
| 196 |
+
def _round_float(self, items):
|
| 197 |
+
if isinstance(items, list):
|
| 198 |
+
return [self._round_float(item) for item in items]
|
| 199 |
+
elif isinstance(items, float):
|
| 200 |
+
return round(items, 5)
|
| 201 |
+
else:
|
| 202 |
+
return items
|
| 203 |
+
|
| 204 |
+
def log(self, runner):
|
| 205 |
+
if 'eval_iter_num' in runner.log_buffer.output:
|
| 206 |
+
# this doesn't modify runner.iter and is regardless of by_epoch
|
| 207 |
+
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
|
| 208 |
+
else:
|
| 209 |
+
cur_iter = self.get_iter(runner, inner_iter=True)
|
| 210 |
+
|
| 211 |
+
log_dict = OrderedDict(
|
| 212 |
+
mode=self.get_mode(runner),
|
| 213 |
+
epoch=self.get_epoch(runner),
|
| 214 |
+
iter=cur_iter)
|
| 215 |
+
|
| 216 |
+
# only record lr of the first param group
|
| 217 |
+
cur_lr = runner.current_lr()
|
| 218 |
+
if isinstance(cur_lr, list):
|
| 219 |
+
log_dict['lr'] = cur_lr[0]
|
| 220 |
+
else:
|
| 221 |
+
assert isinstance(cur_lr, dict)
|
| 222 |
+
log_dict['lr'] = {}
|
| 223 |
+
for k, lr_ in cur_lr.items():
|
| 224 |
+
assert isinstance(lr_, list)
|
| 225 |
+
log_dict['lr'].update({k: lr_[0]})
|
| 226 |
+
|
| 227 |
+
if 'time' in runner.log_buffer.output:
|
| 228 |
+
# statistic memory
|
| 229 |
+
if torch.cuda.is_available():
|
| 230 |
+
log_dict['memory'] = self._get_max_memory(runner)
|
| 231 |
+
|
| 232 |
+
log_dict = dict(log_dict, **runner.log_buffer.output)
|
| 233 |
+
|
| 234 |
+
self._log_info(log_dict, runner)
|
| 235 |
+
self._dump_log(log_dict, runner)
|
| 236 |
+
return log_dict
|
| 237 |
+
|
| 238 |
+
def after_run(self, runner):
|
| 239 |
+
# copy or upload logs to self.out_dir
|
| 240 |
+
if self.out_dir is not None:
|
| 241 |
+
for filename in scandir(runner.work_dir, self.out_suffix, True):
|
| 242 |
+
local_filepath = osp.join(runner.work_dir, filename)
|
| 243 |
+
out_filepath = self.file_client.join_path(
|
| 244 |
+
self.out_dir, filename)
|
| 245 |
+
with open(local_filepath, 'r') as f:
|
| 246 |
+
self.file_client.put_text(f.read(), out_filepath)
|
| 247 |
+
|
| 248 |
+
runner.logger.info(
|
| 249 |
+
(f'The file {local_filepath} has been uploaded to '
|
| 250 |
+
f'{out_filepath}.'))
|
| 251 |
+
|
| 252 |
+
if not self.keep_local:
|
| 253 |
+
os.remove(local_filepath)
|
| 254 |
+
runner.logger.info(
|
| 255 |
+
(f'{local_filepath} was removed due to the '
|
| 256 |
+
'`self.keep_local=False`'))
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ...dist_utils import master_only
|
| 3 |
+
from ..hook import HOOKS
|
| 4 |
+
from .base import LoggerHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@HOOKS.register_module()
|
| 8 |
+
class WandbLoggerHook(LoggerHook):
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
init_kwargs=None,
|
| 12 |
+
interval=10,
|
| 13 |
+
ignore_last=True,
|
| 14 |
+
reset_flag=False,
|
| 15 |
+
commit=True,
|
| 16 |
+
by_epoch=True,
|
| 17 |
+
with_step=True):
|
| 18 |
+
super(WandbLoggerHook, self).__init__(interval, ignore_last,
|
| 19 |
+
reset_flag, by_epoch)
|
| 20 |
+
self.import_wandb()
|
| 21 |
+
self.init_kwargs = init_kwargs
|
| 22 |
+
self.commit = commit
|
| 23 |
+
self.with_step = with_step
|
| 24 |
+
|
| 25 |
+
def import_wandb(self):
|
| 26 |
+
try:
|
| 27 |
+
import wandb
|
| 28 |
+
except ImportError:
|
| 29 |
+
raise ImportError(
|
| 30 |
+
'Please run "pip install wandb" to install wandb')
|
| 31 |
+
self.wandb = wandb
|
| 32 |
+
|
| 33 |
+
@master_only
|
| 34 |
+
def before_run(self, runner):
|
| 35 |
+
super(WandbLoggerHook, self).before_run(runner)
|
| 36 |
+
if self.wandb is None:
|
| 37 |
+
self.import_wandb()
|
| 38 |
+
if self.init_kwargs:
|
| 39 |
+
self.wandb.init(**self.init_kwargs)
|
| 40 |
+
else:
|
| 41 |
+
self.wandb.init()
|
| 42 |
+
|
| 43 |
+
@master_only
|
| 44 |
+
def log(self, runner):
|
| 45 |
+
tags = self.get_loggable_tags(runner)
|
| 46 |
+
if tags:
|
| 47 |
+
if self.with_step:
|
| 48 |
+
self.wandb.log(
|
| 49 |
+
tags, step=self.get_iter(runner), commit=self.commit)
|
| 50 |
+
else:
|
| 51 |
+
tags['global_step'] = self.get_iter(runner)
|
| 52 |
+
self.wandb.log(tags, commit=self.commit)
|
| 53 |
+
|
| 54 |
+
@master_only
|
| 55 |
+
def after_run(self, runner):
|
| 56 |
+
self.wandb.join()
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
from math import cos, pi
|
| 4 |
+
|
| 5 |
+
import annotator.uniformer.mmcv as mmcv
|
| 6 |
+
from .hook import HOOKS, Hook
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class LrUpdaterHook(Hook):
|
| 10 |
+
"""LR Scheduler in MMCV.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
by_epoch (bool): LR changes epoch by epoch
|
| 14 |
+
warmup (string): Type of warmup used. It can be None(use no warmup),
|
| 15 |
+
'constant', 'linear' or 'exp'
|
| 16 |
+
warmup_iters (int): The number of iterations or epochs that warmup
|
| 17 |
+
lasts
|
| 18 |
+
warmup_ratio (float): LR used at the beginning of warmup equals to
|
| 19 |
+
warmup_ratio * initial_lr
|
| 20 |
+
warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters
|
| 21 |
+
means the number of epochs that warmup lasts, otherwise means the
|
| 22 |
+
number of iteration that warmup lasts
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
by_epoch=True,
|
| 27 |
+
warmup=None,
|
| 28 |
+
warmup_iters=0,
|
| 29 |
+
warmup_ratio=0.1,
|
| 30 |
+
warmup_by_epoch=False):
|
| 31 |
+
# validate the "warmup" argument
|
| 32 |
+
if warmup is not None:
|
| 33 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 34 |
+
raise ValueError(
|
| 35 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 36 |
+
' types are "constant" and "linear"')
|
| 37 |
+
if warmup is not None:
|
| 38 |
+
assert warmup_iters > 0, \
|
| 39 |
+
'"warmup_iters" must be a positive integer'
|
| 40 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 41 |
+
'"warmup_ratio" must be in range (0,1]'
|
| 42 |
+
|
| 43 |
+
self.by_epoch = by_epoch
|
| 44 |
+
self.warmup = warmup
|
| 45 |
+
self.warmup_iters = warmup_iters
|
| 46 |
+
self.warmup_ratio = warmup_ratio
|
| 47 |
+
self.warmup_by_epoch = warmup_by_epoch
|
| 48 |
+
|
| 49 |
+
if self.warmup_by_epoch:
|
| 50 |
+
self.warmup_epochs = self.warmup_iters
|
| 51 |
+
self.warmup_iters = None
|
| 52 |
+
else:
|
| 53 |
+
self.warmup_epochs = None
|
| 54 |
+
|
| 55 |
+
self.base_lr = [] # initial lr for all param groups
|
| 56 |
+
self.regular_lr = [] # expected lr if no warming up is performed
|
| 57 |
+
|
| 58 |
+
def _set_lr(self, runner, lr_groups):
|
| 59 |
+
if isinstance(runner.optimizer, dict):
|
| 60 |
+
for k, optim in runner.optimizer.items():
|
| 61 |
+
for param_group, lr in zip(optim.param_groups, lr_groups[k]):
|
| 62 |
+
param_group['lr'] = lr
|
| 63 |
+
else:
|
| 64 |
+
for param_group, lr in zip(runner.optimizer.param_groups,
|
| 65 |
+
lr_groups):
|
| 66 |
+
param_group['lr'] = lr
|
| 67 |
+
|
| 68 |
+
def get_lr(self, runner, base_lr):
|
| 69 |
+
raise NotImplementedError
|
| 70 |
+
|
| 71 |
+
def get_regular_lr(self, runner):
|
| 72 |
+
if isinstance(runner.optimizer, dict):
|
| 73 |
+
lr_groups = {}
|
| 74 |
+
for k in runner.optimizer.keys():
|
| 75 |
+
_lr_group = [
|
| 76 |
+
self.get_lr(runner, _base_lr)
|
| 77 |
+
for _base_lr in self.base_lr[k]
|
| 78 |
+
]
|
| 79 |
+
lr_groups.update({k: _lr_group})
|
| 80 |
+
|
| 81 |
+
return lr_groups
|
| 82 |
+
else:
|
| 83 |
+
return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
|
| 84 |
+
|
| 85 |
+
def get_warmup_lr(self, cur_iters):
|
| 86 |
+
|
| 87 |
+
def _get_warmup_lr(cur_iters, regular_lr):
|
| 88 |
+
if self.warmup == 'constant':
|
| 89 |
+
warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr]
|
| 90 |
+
elif self.warmup == 'linear':
|
| 91 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 92 |
+
self.warmup_ratio)
|
| 93 |
+
warmup_lr = [_lr * (1 - k) for _lr in regular_lr]
|
| 94 |
+
elif self.warmup == 'exp':
|
| 95 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 96 |
+
warmup_lr = [_lr * k for _lr in regular_lr]
|
| 97 |
+
return warmup_lr
|
| 98 |
+
|
| 99 |
+
if isinstance(self.regular_lr, dict):
|
| 100 |
+
lr_groups = {}
|
| 101 |
+
for key, regular_lr in self.regular_lr.items():
|
| 102 |
+
lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr)
|
| 103 |
+
return lr_groups
|
| 104 |
+
else:
|
| 105 |
+
return _get_warmup_lr(cur_iters, self.regular_lr)
|
| 106 |
+
|
| 107 |
+
def before_run(self, runner):
|
| 108 |
+
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
|
| 109 |
+
# it will be set according to the optimizer params
|
| 110 |
+
if isinstance(runner.optimizer, dict):
|
| 111 |
+
self.base_lr = {}
|
| 112 |
+
for k, optim in runner.optimizer.items():
|
| 113 |
+
for group in optim.param_groups:
|
| 114 |
+
group.setdefault('initial_lr', group['lr'])
|
| 115 |
+
_base_lr = [
|
| 116 |
+
group['initial_lr'] for group in optim.param_groups
|
| 117 |
+
]
|
| 118 |
+
self.base_lr.update({k: _base_lr})
|
| 119 |
+
else:
|
| 120 |
+
for group in runner.optimizer.param_groups:
|
| 121 |
+
group.setdefault('initial_lr', group['lr'])
|
| 122 |
+
self.base_lr = [
|
| 123 |
+
group['initial_lr'] for group in runner.optimizer.param_groups
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
def before_train_epoch(self, runner):
|
| 127 |
+
if self.warmup_iters is None:
|
| 128 |
+
epoch_len = len(runner.data_loader)
|
| 129 |
+
self.warmup_iters = self.warmup_epochs * epoch_len
|
| 130 |
+
|
| 131 |
+
if not self.by_epoch:
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
self.regular_lr = self.get_regular_lr(runner)
|
| 135 |
+
self._set_lr(runner, self.regular_lr)
|
| 136 |
+
|
| 137 |
+
def before_train_iter(self, runner):
|
| 138 |
+
cur_iter = runner.iter
|
| 139 |
+
if not self.by_epoch:
|
| 140 |
+
self.regular_lr = self.get_regular_lr(runner)
|
| 141 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 142 |
+
self._set_lr(runner, self.regular_lr)
|
| 143 |
+
else:
|
| 144 |
+
warmup_lr = self.get_warmup_lr(cur_iter)
|
| 145 |
+
self._set_lr(runner, warmup_lr)
|
| 146 |
+
elif self.by_epoch:
|
| 147 |
+
if self.warmup is None or cur_iter > self.warmup_iters:
|
| 148 |
+
return
|
| 149 |
+
elif cur_iter == self.warmup_iters:
|
| 150 |
+
self._set_lr(runner, self.regular_lr)
|
| 151 |
+
else:
|
| 152 |
+
warmup_lr = self.get_warmup_lr(cur_iter)
|
| 153 |
+
self._set_lr(runner, warmup_lr)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@HOOKS.register_module()
|
| 157 |
+
class FixedLrUpdaterHook(LrUpdaterHook):
|
| 158 |
+
|
| 159 |
+
def __init__(self, **kwargs):
|
| 160 |
+
super(FixedLrUpdaterHook, self).__init__(**kwargs)
|
| 161 |
+
|
| 162 |
+
def get_lr(self, runner, base_lr):
|
| 163 |
+
return base_lr
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@HOOKS.register_module()
|
| 167 |
+
class StepLrUpdaterHook(LrUpdaterHook):
|
| 168 |
+
"""Step LR scheduler with min_lr clipping.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
step (int | list[int]): Step to decay the LR. If an int value is given,
|
| 172 |
+
regard it as the decay interval. If a list is given, decay LR at
|
| 173 |
+
these steps.
|
| 174 |
+
gamma (float, optional): Decay LR ratio. Default: 0.1.
|
| 175 |
+
min_lr (float, optional): Minimum LR value to keep. If LR after decay
|
| 176 |
+
is lower than `min_lr`, it will be clipped to this value. If None
|
| 177 |
+
is given, we don't perform lr clipping. Default: None.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, step, gamma=0.1, min_lr=None, **kwargs):
|
| 181 |
+
if isinstance(step, list):
|
| 182 |
+
assert mmcv.is_list_of(step, int)
|
| 183 |
+
assert all([s > 0 for s in step])
|
| 184 |
+
elif isinstance(step, int):
|
| 185 |
+
assert step > 0
|
| 186 |
+
else:
|
| 187 |
+
raise TypeError('"step" must be a list or integer')
|
| 188 |
+
self.step = step
|
| 189 |
+
self.gamma = gamma
|
| 190 |
+
self.min_lr = min_lr
|
| 191 |
+
super(StepLrUpdaterHook, self).__init__(**kwargs)
|
| 192 |
+
|
| 193 |
+
def get_lr(self, runner, base_lr):
|
| 194 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 195 |
+
|
| 196 |
+
# calculate exponential term
|
| 197 |
+
if isinstance(self.step, int):
|
| 198 |
+
exp = progress // self.step
|
| 199 |
+
else:
|
| 200 |
+
exp = len(self.step)
|
| 201 |
+
for i, s in enumerate(self.step):
|
| 202 |
+
if progress < s:
|
| 203 |
+
exp = i
|
| 204 |
+
break
|
| 205 |
+
|
| 206 |
+
lr = base_lr * (self.gamma**exp)
|
| 207 |
+
if self.min_lr is not None:
|
| 208 |
+
# clip to a minimum value
|
| 209 |
+
lr = max(lr, self.min_lr)
|
| 210 |
+
return lr
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@HOOKS.register_module()
|
| 214 |
+
class ExpLrUpdaterHook(LrUpdaterHook):
|
| 215 |
+
|
| 216 |
+
def __init__(self, gamma, **kwargs):
|
| 217 |
+
self.gamma = gamma
|
| 218 |
+
super(ExpLrUpdaterHook, self).__init__(**kwargs)
|
| 219 |
+
|
| 220 |
+
def get_lr(self, runner, base_lr):
|
| 221 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 222 |
+
return base_lr * self.gamma**progress
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@HOOKS.register_module()
|
| 226 |
+
class PolyLrUpdaterHook(LrUpdaterHook):
|
| 227 |
+
|
| 228 |
+
def __init__(self, power=1., min_lr=0., **kwargs):
|
| 229 |
+
self.power = power
|
| 230 |
+
self.min_lr = min_lr
|
| 231 |
+
super(PolyLrUpdaterHook, self).__init__(**kwargs)
|
| 232 |
+
|
| 233 |
+
def get_lr(self, runner, base_lr):
|
| 234 |
+
if self.by_epoch:
|
| 235 |
+
progress = runner.epoch
|
| 236 |
+
max_progress = runner.max_epochs
|
| 237 |
+
else:
|
| 238 |
+
progress = runner.iter
|
| 239 |
+
max_progress = runner.max_iters
|
| 240 |
+
coeff = (1 - progress / max_progress)**self.power
|
| 241 |
+
return (base_lr - self.min_lr) * coeff + self.min_lr
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@HOOKS.register_module()
|
| 245 |
+
class InvLrUpdaterHook(LrUpdaterHook):
|
| 246 |
+
|
| 247 |
+
def __init__(self, gamma, power=1., **kwargs):
|
| 248 |
+
self.gamma = gamma
|
| 249 |
+
self.power = power
|
| 250 |
+
super(InvLrUpdaterHook, self).__init__(**kwargs)
|
| 251 |
+
|
| 252 |
+
def get_lr(self, runner, base_lr):
|
| 253 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 254 |
+
return base_lr * (1 + self.gamma * progress)**(-self.power)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@HOOKS.register_module()
|
| 258 |
+
class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
| 259 |
+
|
| 260 |
+
def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs):
|
| 261 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 262 |
+
self.min_lr = min_lr
|
| 263 |
+
self.min_lr_ratio = min_lr_ratio
|
| 264 |
+
super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
| 265 |
+
|
| 266 |
+
def get_lr(self, runner, base_lr):
|
| 267 |
+
if self.by_epoch:
|
| 268 |
+
progress = runner.epoch
|
| 269 |
+
max_progress = runner.max_epochs
|
| 270 |
+
else:
|
| 271 |
+
progress = runner.iter
|
| 272 |
+
max_progress = runner.max_iters
|
| 273 |
+
|
| 274 |
+
if self.min_lr_ratio is not None:
|
| 275 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 276 |
+
else:
|
| 277 |
+
target_lr = self.min_lr
|
| 278 |
+
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@HOOKS.register_module()
|
| 282 |
+
class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
|
| 283 |
+
"""Flat + Cosine lr schedule.
|
| 284 |
+
|
| 285 |
+
Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501
|
| 286 |
+
|
| 287 |
+
Args:
|
| 288 |
+
start_percent (float): When to start annealing the learning rate
|
| 289 |
+
after the percentage of the total training steps.
|
| 290 |
+
The value should be in range [0, 1).
|
| 291 |
+
Default: 0.75
|
| 292 |
+
min_lr (float, optional): The minimum lr. Default: None.
|
| 293 |
+
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
| 294 |
+
Either `min_lr` or `min_lr_ratio` should be specified.
|
| 295 |
+
Default: None.
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
def __init__(self,
|
| 299 |
+
start_percent=0.75,
|
| 300 |
+
min_lr=None,
|
| 301 |
+
min_lr_ratio=None,
|
| 302 |
+
**kwargs):
|
| 303 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 304 |
+
if start_percent < 0 or start_percent > 1 or not isinstance(
|
| 305 |
+
start_percent, float):
|
| 306 |
+
raise ValueError(
|
| 307 |
+
'expected float between 0 and 1 start_percent, but '
|
| 308 |
+
f'got {start_percent}')
|
| 309 |
+
self.start_percent = start_percent
|
| 310 |
+
self.min_lr = min_lr
|
| 311 |
+
self.min_lr_ratio = min_lr_ratio
|
| 312 |
+
super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
|
| 313 |
+
|
| 314 |
+
def get_lr(self, runner, base_lr):
|
| 315 |
+
if self.by_epoch:
|
| 316 |
+
start = round(runner.max_epochs * self.start_percent)
|
| 317 |
+
progress = runner.epoch - start
|
| 318 |
+
max_progress = runner.max_epochs - start
|
| 319 |
+
else:
|
| 320 |
+
start = round(runner.max_iters * self.start_percent)
|
| 321 |
+
progress = runner.iter - start
|
| 322 |
+
max_progress = runner.max_iters - start
|
| 323 |
+
|
| 324 |
+
if self.min_lr_ratio is not None:
|
| 325 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 326 |
+
else:
|
| 327 |
+
target_lr = self.min_lr
|
| 328 |
+
|
| 329 |
+
if progress < 0:
|
| 330 |
+
return base_lr
|
| 331 |
+
else:
|
| 332 |
+
return annealing_cos(base_lr, target_lr, progress / max_progress)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@HOOKS.register_module()
|
| 336 |
+
class CosineRestartLrUpdaterHook(LrUpdaterHook):
|
| 337 |
+
"""Cosine annealing with restarts learning rate scheme.
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
periods (list[int]): Periods for each cosine anneling cycle.
|
| 341 |
+
restart_weights (list[float], optional): Restart weights at each
|
| 342 |
+
restart iteration. Default: [1].
|
| 343 |
+
min_lr (float, optional): The minimum lr. Default: None.
|
| 344 |
+
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
|
| 345 |
+
Either `min_lr` or `min_lr_ratio` should be specified.
|
| 346 |
+
Default: None.
|
| 347 |
+
"""
|
| 348 |
+
|
| 349 |
+
def __init__(self,
|
| 350 |
+
periods,
|
| 351 |
+
restart_weights=[1],
|
| 352 |
+
min_lr=None,
|
| 353 |
+
min_lr_ratio=None,
|
| 354 |
+
**kwargs):
|
| 355 |
+
assert (min_lr is None) ^ (min_lr_ratio is None)
|
| 356 |
+
self.periods = periods
|
| 357 |
+
self.min_lr = min_lr
|
| 358 |
+
self.min_lr_ratio = min_lr_ratio
|
| 359 |
+
self.restart_weights = restart_weights
|
| 360 |
+
assert (len(self.periods) == len(self.restart_weights)
|
| 361 |
+
), 'periods and restart_weights should have the same length.'
|
| 362 |
+
super(CosineRestartLrUpdaterHook, self).__init__(**kwargs)
|
| 363 |
+
|
| 364 |
+
self.cumulative_periods = [
|
| 365 |
+
sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
|
| 366 |
+
]
|
| 367 |
+
|
| 368 |
+
def get_lr(self, runner, base_lr):
|
| 369 |
+
if self.by_epoch:
|
| 370 |
+
progress = runner.epoch
|
| 371 |
+
else:
|
| 372 |
+
progress = runner.iter
|
| 373 |
+
|
| 374 |
+
if self.min_lr_ratio is not None:
|
| 375 |
+
target_lr = base_lr * self.min_lr_ratio
|
| 376 |
+
else:
|
| 377 |
+
target_lr = self.min_lr
|
| 378 |
+
|
| 379 |
+
idx = get_position_from_periods(progress, self.cumulative_periods)
|
| 380 |
+
current_weight = self.restart_weights[idx]
|
| 381 |
+
nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1]
|
| 382 |
+
current_periods = self.periods[idx]
|
| 383 |
+
|
| 384 |
+
alpha = min((progress - nearest_restart) / current_periods, 1)
|
| 385 |
+
return annealing_cos(base_lr, target_lr, alpha, current_weight)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def get_position_from_periods(iteration, cumulative_periods):
|
| 389 |
+
"""Get the position from a period list.
|
| 390 |
+
|
| 391 |
+
It will return the index of the right-closest number in the period list.
|
| 392 |
+
For example, the cumulative_periods = [100, 200, 300, 400],
|
| 393 |
+
if iteration == 50, return 0;
|
| 394 |
+
if iteration == 210, return 2;
|
| 395 |
+
if iteration == 300, return 3.
|
| 396 |
+
|
| 397 |
+
Args:
|
| 398 |
+
iteration (int): Current iteration.
|
| 399 |
+
cumulative_periods (list[int]): Cumulative period list.
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
int: The position of the right-closest number in the period list.
|
| 403 |
+
"""
|
| 404 |
+
for i, period in enumerate(cumulative_periods):
|
| 405 |
+
if iteration < period:
|
| 406 |
+
return i
|
| 407 |
+
raise ValueError(f'Current iteration {iteration} exceeds '
|
| 408 |
+
f'cumulative_periods {cumulative_periods}')
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@HOOKS.register_module()
|
| 412 |
+
class CyclicLrUpdaterHook(LrUpdaterHook):
|
| 413 |
+
"""Cyclic LR Scheduler.
|
| 414 |
+
|
| 415 |
+
Implement the cyclical learning rate policy (CLR) described in
|
| 416 |
+
https://arxiv.org/pdf/1506.01186.pdf
|
| 417 |
+
|
| 418 |
+
Different from the original paper, we use cosine annealing rather than
|
| 419 |
+
triangular policy inside a cycle. This improves the performance in the
|
| 420 |
+
3D detection area.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
by_epoch (bool): Whether to update LR by epoch.
|
| 424 |
+
target_ratio (tuple[float]): Relative ratio of the highest LR and the
|
| 425 |
+
lowest LR to the initial LR.
|
| 426 |
+
cyclic_times (int): Number of cycles during training
|
| 427 |
+
step_ratio_up (float): The ratio of the increasing process of LR in
|
| 428 |
+
the total cycle.
|
| 429 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 430 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 431 |
+
'linear' for linear annealing. Default: 'cos'.
|
| 432 |
+
"""
|
| 433 |
+
|
| 434 |
+
def __init__(self,
|
| 435 |
+
by_epoch=False,
|
| 436 |
+
target_ratio=(10, 1e-4),
|
| 437 |
+
cyclic_times=1,
|
| 438 |
+
step_ratio_up=0.4,
|
| 439 |
+
anneal_strategy='cos',
|
| 440 |
+
**kwargs):
|
| 441 |
+
if isinstance(target_ratio, float):
|
| 442 |
+
target_ratio = (target_ratio, target_ratio / 1e5)
|
| 443 |
+
elif isinstance(target_ratio, tuple):
|
| 444 |
+
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
| 445 |
+
if len(target_ratio) == 1 else target_ratio
|
| 446 |
+
else:
|
| 447 |
+
raise ValueError('target_ratio should be either float '
|
| 448 |
+
f'or tuple, got {type(target_ratio)}')
|
| 449 |
+
|
| 450 |
+
assert len(target_ratio) == 2, \
|
| 451 |
+
'"target_ratio" must be list or tuple of two floats'
|
| 452 |
+
assert 0 <= step_ratio_up < 1.0, \
|
| 453 |
+
'"step_ratio_up" must be in range [0,1)'
|
| 454 |
+
|
| 455 |
+
self.target_ratio = target_ratio
|
| 456 |
+
self.cyclic_times = cyclic_times
|
| 457 |
+
self.step_ratio_up = step_ratio_up
|
| 458 |
+
self.lr_phases = [] # init lr_phases
|
| 459 |
+
# validate anneal_strategy
|
| 460 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 461 |
+
raise ValueError('anneal_strategy must be one of "cos" or '
|
| 462 |
+
f'"linear", instead got {anneal_strategy}')
|
| 463 |
+
elif anneal_strategy == 'cos':
|
| 464 |
+
self.anneal_func = annealing_cos
|
| 465 |
+
elif anneal_strategy == 'linear':
|
| 466 |
+
self.anneal_func = annealing_linear
|
| 467 |
+
|
| 468 |
+
assert not by_epoch, \
|
| 469 |
+
'currently only support "by_epoch" = False'
|
| 470 |
+
super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs)
|
| 471 |
+
|
| 472 |
+
def before_run(self, runner):
|
| 473 |
+
super(CyclicLrUpdaterHook, self).before_run(runner)
|
| 474 |
+
# initiate lr_phases
|
| 475 |
+
# total lr_phases are separated as up and down
|
| 476 |
+
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
| 477 |
+
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
| 478 |
+
self.lr_phases.append(
|
| 479 |
+
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
| 480 |
+
self.lr_phases.append([
|
| 481 |
+
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
| 482 |
+
self.target_ratio[0], self.target_ratio[1]
|
| 483 |
+
])
|
| 484 |
+
|
| 485 |
+
def get_lr(self, runner, base_lr):
|
| 486 |
+
curr_iter = runner.iter
|
| 487 |
+
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
| 488 |
+
end_ratio) in self.lr_phases:
|
| 489 |
+
curr_iter %= max_iter_per_phase
|
| 490 |
+
if start_iter <= curr_iter < end_iter:
|
| 491 |
+
progress = curr_iter - start_iter
|
| 492 |
+
return self.anneal_func(base_lr * start_ratio,
|
| 493 |
+
base_lr * end_ratio,
|
| 494 |
+
progress / (end_iter - start_iter))
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
@HOOKS.register_module()
|
| 498 |
+
class OneCycleLrUpdaterHook(LrUpdaterHook):
|
| 499 |
+
"""One Cycle LR Scheduler.
|
| 500 |
+
|
| 501 |
+
The 1cycle learning rate policy changes the learning rate after every
|
| 502 |
+
batch. The one cycle learning rate policy is described in
|
| 503 |
+
https://arxiv.org/pdf/1708.07120.pdf
|
| 504 |
+
|
| 505 |
+
Args:
|
| 506 |
+
max_lr (float or list): Upper learning rate boundaries in the cycle
|
| 507 |
+
for each parameter group.
|
| 508 |
+
total_steps (int, optional): The total number of steps in the cycle.
|
| 509 |
+
Note that if a value is not provided here, it will be the max_iter
|
| 510 |
+
of runner. Default: None.
|
| 511 |
+
pct_start (float): The percentage of the cycle (in number of steps)
|
| 512 |
+
spent increasing the learning rate.
|
| 513 |
+
Default: 0.3
|
| 514 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 515 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 516 |
+
'linear' for linear annealing.
|
| 517 |
+
Default: 'cos'
|
| 518 |
+
div_factor (float): Determines the initial learning rate via
|
| 519 |
+
initial_lr = max_lr/div_factor
|
| 520 |
+
Default: 25
|
| 521 |
+
final_div_factor (float): Determines the minimum learning rate via
|
| 522 |
+
min_lr = initial_lr/final_div_factor
|
| 523 |
+
Default: 1e4
|
| 524 |
+
three_phase (bool): If three_phase is True, use a third phase of the
|
| 525 |
+
schedule to annihilate the learning rate according to
|
| 526 |
+
final_div_factor instead of modifying the second phase (the first
|
| 527 |
+
two phases will be symmetrical about the step indicated by
|
| 528 |
+
pct_start).
|
| 529 |
+
Default: False
|
| 530 |
+
"""
|
| 531 |
+
|
| 532 |
+
def __init__(self,
|
| 533 |
+
max_lr,
|
| 534 |
+
total_steps=None,
|
| 535 |
+
pct_start=0.3,
|
| 536 |
+
anneal_strategy='cos',
|
| 537 |
+
div_factor=25,
|
| 538 |
+
final_div_factor=1e4,
|
| 539 |
+
three_phase=False,
|
| 540 |
+
**kwargs):
|
| 541 |
+
# validate by_epoch, currently only support by_epoch = False
|
| 542 |
+
if 'by_epoch' not in kwargs:
|
| 543 |
+
kwargs['by_epoch'] = False
|
| 544 |
+
else:
|
| 545 |
+
assert not kwargs['by_epoch'], \
|
| 546 |
+
'currently only support "by_epoch" = False'
|
| 547 |
+
if not isinstance(max_lr, (numbers.Number, list, dict)):
|
| 548 |
+
raise ValueError('the type of max_lr must be the one of list or '
|
| 549 |
+
f'dict, but got {type(max_lr)}')
|
| 550 |
+
self._max_lr = max_lr
|
| 551 |
+
if total_steps is not None:
|
| 552 |
+
if not isinstance(total_steps, int):
|
| 553 |
+
raise ValueError('the type of total_steps must be int, but'
|
| 554 |
+
f'got {type(total_steps)}')
|
| 555 |
+
self.total_steps = total_steps
|
| 556 |
+
# validate pct_start
|
| 557 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 558 |
+
raise ValueError('expected float between 0 and 1 pct_start, but '
|
| 559 |
+
f'got {pct_start}')
|
| 560 |
+
self.pct_start = pct_start
|
| 561 |
+
# validate anneal_strategy
|
| 562 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 563 |
+
raise ValueError('anneal_strategy must be one of "cos" or '
|
| 564 |
+
f'"linear", instead got {anneal_strategy}')
|
| 565 |
+
elif anneal_strategy == 'cos':
|
| 566 |
+
self.anneal_func = annealing_cos
|
| 567 |
+
elif anneal_strategy == 'linear':
|
| 568 |
+
self.anneal_func = annealing_linear
|
| 569 |
+
self.div_factor = div_factor
|
| 570 |
+
self.final_div_factor = final_div_factor
|
| 571 |
+
self.three_phase = three_phase
|
| 572 |
+
self.lr_phases = [] # init lr_phases
|
| 573 |
+
super(OneCycleLrUpdaterHook, self).__init__(**kwargs)
|
| 574 |
+
|
| 575 |
+
def before_run(self, runner):
|
| 576 |
+
if hasattr(self, 'total_steps'):
|
| 577 |
+
total_steps = self.total_steps
|
| 578 |
+
else:
|
| 579 |
+
total_steps = runner.max_iters
|
| 580 |
+
if total_steps < runner.max_iters:
|
| 581 |
+
raise ValueError(
|
| 582 |
+
'The total steps must be greater than or equal to max '
|
| 583 |
+
f'iterations {runner.max_iters} of runner, but total steps '
|
| 584 |
+
f'is {total_steps}.')
|
| 585 |
+
|
| 586 |
+
if isinstance(runner.optimizer, dict):
|
| 587 |
+
self.base_lr = {}
|
| 588 |
+
for k, optim in runner.optimizer.items():
|
| 589 |
+
_max_lr = format_param(k, optim, self._max_lr)
|
| 590 |
+
self.base_lr[k] = [lr / self.div_factor for lr in _max_lr]
|
| 591 |
+
for group, lr in zip(optim.param_groups, self.base_lr[k]):
|
| 592 |
+
group.setdefault('initial_lr', lr)
|
| 593 |
+
else:
|
| 594 |
+
k = type(runner.optimizer).__name__
|
| 595 |
+
_max_lr = format_param(k, runner.optimizer, self._max_lr)
|
| 596 |
+
self.base_lr = [lr / self.div_factor for lr in _max_lr]
|
| 597 |
+
for group, lr in zip(runner.optimizer.param_groups, self.base_lr):
|
| 598 |
+
group.setdefault('initial_lr', lr)
|
| 599 |
+
|
| 600 |
+
if self.three_phase:
|
| 601 |
+
self.lr_phases.append(
|
| 602 |
+
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
| 603 |
+
self.lr_phases.append([
|
| 604 |
+
float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1
|
| 605 |
+
])
|
| 606 |
+
self.lr_phases.append(
|
| 607 |
+
[total_steps - 1, 1, 1 / self.final_div_factor])
|
| 608 |
+
else:
|
| 609 |
+
self.lr_phases.append(
|
| 610 |
+
[float(self.pct_start * total_steps) - 1, 1, self.div_factor])
|
| 611 |
+
self.lr_phases.append(
|
| 612 |
+
[total_steps - 1, self.div_factor, 1 / self.final_div_factor])
|
| 613 |
+
|
| 614 |
+
def get_lr(self, runner, base_lr):
|
| 615 |
+
curr_iter = runner.iter
|
| 616 |
+
start_iter = 0
|
| 617 |
+
for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases):
|
| 618 |
+
if curr_iter <= end_iter:
|
| 619 |
+
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
| 620 |
+
lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr,
|
| 621 |
+
pct)
|
| 622 |
+
break
|
| 623 |
+
start_iter = end_iter
|
| 624 |
+
return lr
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def annealing_cos(start, end, factor, weight=1):
|
| 628 |
+
"""Calculate annealing cos learning rate.
|
| 629 |
+
|
| 630 |
+
Cosine anneal from `weight * start + (1 - weight) * end` to `end` as
|
| 631 |
+
percentage goes from 0.0 to 1.0.
|
| 632 |
+
|
| 633 |
+
Args:
|
| 634 |
+
start (float): The starting learning rate of the cosine annealing.
|
| 635 |
+
end (float): The ending learing rate of the cosine annealing.
|
| 636 |
+
factor (float): The coefficient of `pi` when calculating the current
|
| 637 |
+
percentage. Range from 0.0 to 1.0.
|
| 638 |
+
weight (float, optional): The combination factor of `start` and `end`
|
| 639 |
+
when calculating the actual starting learning rate. Default to 1.
|
| 640 |
+
"""
|
| 641 |
+
cos_out = cos(pi * factor) + 1
|
| 642 |
+
return end + 0.5 * weight * (start - end) * cos_out
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def annealing_linear(start, end, factor):
|
| 646 |
+
"""Calculate annealing linear learning rate.
|
| 647 |
+
|
| 648 |
+
Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.
|
| 649 |
+
|
| 650 |
+
Args:
|
| 651 |
+
start (float): The starting learning rate of the linear annealing.
|
| 652 |
+
end (float): The ending learing rate of the linear annealing.
|
| 653 |
+
factor (float): The coefficient of `pi` when calculating the current
|
| 654 |
+
percentage. Range from 0.0 to 1.0.
|
| 655 |
+
"""
|
| 656 |
+
return start + (end - start) * factor
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def format_param(name, optim, param):
|
| 660 |
+
if isinstance(param, numbers.Number):
|
| 661 |
+
return [param] * len(optim.param_groups)
|
| 662 |
+
elif isinstance(param, (list, tuple)): # multi param groups
|
| 663 |
+
if len(param) != len(optim.param_groups):
|
| 664 |
+
raise ValueError(f'expected {len(optim.param_groups)} '
|
| 665 |
+
f'values for {name}, got {len(param)}')
|
| 666 |
+
return param
|
| 667 |
+
else: # multi optimizers
|
| 668 |
+
if name not in param:
|
| 669 |
+
raise KeyError(f'{name} is not found in {param.keys()}')
|
| 670 |
+
return param[name]
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import annotator.uniformer.mmcv as mmcv
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
from .lr_updater import annealing_cos, annealing_linear, format_param
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MomentumUpdaterHook(Hook):
|
| 8 |
+
|
| 9 |
+
def __init__(self,
|
| 10 |
+
by_epoch=True,
|
| 11 |
+
warmup=None,
|
| 12 |
+
warmup_iters=0,
|
| 13 |
+
warmup_ratio=0.9):
|
| 14 |
+
# validate the "warmup" argument
|
| 15 |
+
if warmup is not None:
|
| 16 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 17 |
+
raise ValueError(
|
| 18 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 19 |
+
' types are "constant" and "linear"')
|
| 20 |
+
if warmup is not None:
|
| 21 |
+
assert warmup_iters > 0, \
|
| 22 |
+
'"warmup_iters" must be a positive integer'
|
| 23 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 24 |
+
'"warmup_momentum" must be in range (0,1]'
|
| 25 |
+
|
| 26 |
+
self.by_epoch = by_epoch
|
| 27 |
+
self.warmup = warmup
|
| 28 |
+
self.warmup_iters = warmup_iters
|
| 29 |
+
self.warmup_ratio = warmup_ratio
|
| 30 |
+
|
| 31 |
+
self.base_momentum = [] # initial momentum for all param groups
|
| 32 |
+
self.regular_momentum = [
|
| 33 |
+
] # expected momentum if no warming up is performed
|
| 34 |
+
|
| 35 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 36 |
+
if isinstance(runner.optimizer, dict):
|
| 37 |
+
for k, optim in runner.optimizer.items():
|
| 38 |
+
for param_group, mom in zip(optim.param_groups,
|
| 39 |
+
momentum_groups[k]):
|
| 40 |
+
if 'momentum' in param_group.keys():
|
| 41 |
+
param_group['momentum'] = mom
|
| 42 |
+
elif 'betas' in param_group.keys():
|
| 43 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 44 |
+
else:
|
| 45 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 46 |
+
momentum_groups):
|
| 47 |
+
if 'momentum' in param_group.keys():
|
| 48 |
+
param_group['momentum'] = mom
|
| 49 |
+
elif 'betas' in param_group.keys():
|
| 50 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 51 |
+
|
| 52 |
+
def get_momentum(self, runner, base_momentum):
|
| 53 |
+
raise NotImplementedError
|
| 54 |
+
|
| 55 |
+
def get_regular_momentum(self, runner):
|
| 56 |
+
if isinstance(runner.optimizer, dict):
|
| 57 |
+
momentum_groups = {}
|
| 58 |
+
for k in runner.optimizer.keys():
|
| 59 |
+
_momentum_group = [
|
| 60 |
+
self.get_momentum(runner, _base_momentum)
|
| 61 |
+
for _base_momentum in self.base_momentum[k]
|
| 62 |
+
]
|
| 63 |
+
momentum_groups.update({k: _momentum_group})
|
| 64 |
+
return momentum_groups
|
| 65 |
+
else:
|
| 66 |
+
return [
|
| 67 |
+
self.get_momentum(runner, _base_momentum)
|
| 68 |
+
for _base_momentum in self.base_momentum
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
def get_warmup_momentum(self, cur_iters):
|
| 72 |
+
|
| 73 |
+
def _get_warmup_momentum(cur_iters, regular_momentum):
|
| 74 |
+
if self.warmup == 'constant':
|
| 75 |
+
warmup_momentum = [
|
| 76 |
+
_momentum / self.warmup_ratio
|
| 77 |
+
for _momentum in self.regular_momentum
|
| 78 |
+
]
|
| 79 |
+
elif self.warmup == 'linear':
|
| 80 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 81 |
+
self.warmup_ratio)
|
| 82 |
+
warmup_momentum = [
|
| 83 |
+
_momentum / (1 - k) for _momentum in self.regular_mom
|
| 84 |
+
]
|
| 85 |
+
elif self.warmup == 'exp':
|
| 86 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 87 |
+
warmup_momentum = [
|
| 88 |
+
_momentum / k for _momentum in self.regular_mom
|
| 89 |
+
]
|
| 90 |
+
return warmup_momentum
|
| 91 |
+
|
| 92 |
+
if isinstance(self.regular_momentum, dict):
|
| 93 |
+
momentum_groups = {}
|
| 94 |
+
for key, regular_momentum in self.regular_momentum.items():
|
| 95 |
+
momentum_groups[key] = _get_warmup_momentum(
|
| 96 |
+
cur_iters, regular_momentum)
|
| 97 |
+
return momentum_groups
|
| 98 |
+
else:
|
| 99 |
+
return _get_warmup_momentum(cur_iters, self.regular_momentum)
|
| 100 |
+
|
| 101 |
+
def before_run(self, runner):
|
| 102 |
+
# NOTE: when resuming from a checkpoint,
|
| 103 |
+
# if 'initial_momentum' is not saved,
|
| 104 |
+
# it will be set according to the optimizer params
|
| 105 |
+
if isinstance(runner.optimizer, dict):
|
| 106 |
+
self.base_momentum = {}
|
| 107 |
+
for k, optim in runner.optimizer.items():
|
| 108 |
+
for group in optim.param_groups:
|
| 109 |
+
if 'momentum' in group.keys():
|
| 110 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 111 |
+
else:
|
| 112 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 113 |
+
_base_momentum = [
|
| 114 |
+
group['initial_momentum'] for group in optim.param_groups
|
| 115 |
+
]
|
| 116 |
+
self.base_momentum.update({k: _base_momentum})
|
| 117 |
+
else:
|
| 118 |
+
for group in runner.optimizer.param_groups:
|
| 119 |
+
if 'momentum' in group.keys():
|
| 120 |
+
group.setdefault('initial_momentum', group['momentum'])
|
| 121 |
+
else:
|
| 122 |
+
group.setdefault('initial_momentum', group['betas'][0])
|
| 123 |
+
self.base_momentum = [
|
| 124 |
+
group['initial_momentum']
|
| 125 |
+
for group in runner.optimizer.param_groups
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
def before_train_epoch(self, runner):
|
| 129 |
+
if not self.by_epoch:
|
| 130 |
+
return
|
| 131 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 132 |
+
self._set_momentum(runner, self.regular_mom)
|
| 133 |
+
|
| 134 |
+
def before_train_iter(self, runner):
|
| 135 |
+
cur_iter = runner.iter
|
| 136 |
+
if not self.by_epoch:
|
| 137 |
+
self.regular_mom = self.get_regular_momentum(runner)
|
| 138 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 139 |
+
self._set_momentum(runner, self.regular_mom)
|
| 140 |
+
else:
|
| 141 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 142 |
+
self._set_momentum(runner, warmup_momentum)
|
| 143 |
+
elif self.by_epoch:
|
| 144 |
+
if self.warmup is None or cur_iter > self.warmup_iters:
|
| 145 |
+
return
|
| 146 |
+
elif cur_iter == self.warmup_iters:
|
| 147 |
+
self._set_momentum(runner, self.regular_mom)
|
| 148 |
+
else:
|
| 149 |
+
warmup_momentum = self.get_warmup_momentum(cur_iter)
|
| 150 |
+
self._set_momentum(runner, warmup_momentum)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@HOOKS.register_module()
|
| 154 |
+
class StepMomentumUpdaterHook(MomentumUpdaterHook):
|
| 155 |
+
"""Step momentum scheduler with min value clipping.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
step (int | list[int]): Step to decay the momentum. If an int value is
|
| 159 |
+
given, regard it as the decay interval. If a list is given, decay
|
| 160 |
+
momentum at these steps.
|
| 161 |
+
gamma (float, optional): Decay momentum ratio. Default: 0.5.
|
| 162 |
+
min_momentum (float, optional): Minimum momentum value to keep. If
|
| 163 |
+
momentum after decay is lower than this value, it will be clipped
|
| 164 |
+
accordingly. If None is given, we don't perform lr clipping.
|
| 165 |
+
Default: None.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs):
|
| 169 |
+
if isinstance(step, list):
|
| 170 |
+
assert mmcv.is_list_of(step, int)
|
| 171 |
+
assert all([s > 0 for s in step])
|
| 172 |
+
elif isinstance(step, int):
|
| 173 |
+
assert step > 0
|
| 174 |
+
else:
|
| 175 |
+
raise TypeError('"step" must be a list or integer')
|
| 176 |
+
self.step = step
|
| 177 |
+
self.gamma = gamma
|
| 178 |
+
self.min_momentum = min_momentum
|
| 179 |
+
super(StepMomentumUpdaterHook, self).__init__(**kwargs)
|
| 180 |
+
|
| 181 |
+
def get_momentum(self, runner, base_momentum):
|
| 182 |
+
progress = runner.epoch if self.by_epoch else runner.iter
|
| 183 |
+
|
| 184 |
+
# calculate exponential term
|
| 185 |
+
if isinstance(self.step, int):
|
| 186 |
+
exp = progress // self.step
|
| 187 |
+
else:
|
| 188 |
+
exp = len(self.step)
|
| 189 |
+
for i, s in enumerate(self.step):
|
| 190 |
+
if progress < s:
|
| 191 |
+
exp = i
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
momentum = base_momentum * (self.gamma**exp)
|
| 195 |
+
if self.min_momentum is not None:
|
| 196 |
+
# clip to a minimum value
|
| 197 |
+
momentum = max(momentum, self.min_momentum)
|
| 198 |
+
return momentum
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@HOOKS.register_module()
|
| 202 |
+
class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
|
| 203 |
+
|
| 204 |
+
def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs):
|
| 205 |
+
assert (min_momentum is None) ^ (min_momentum_ratio is None)
|
| 206 |
+
self.min_momentum = min_momentum
|
| 207 |
+
self.min_momentum_ratio = min_momentum_ratio
|
| 208 |
+
super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs)
|
| 209 |
+
|
| 210 |
+
def get_momentum(self, runner, base_momentum):
|
| 211 |
+
if self.by_epoch:
|
| 212 |
+
progress = runner.epoch
|
| 213 |
+
max_progress = runner.max_epochs
|
| 214 |
+
else:
|
| 215 |
+
progress = runner.iter
|
| 216 |
+
max_progress = runner.max_iters
|
| 217 |
+
if self.min_momentum_ratio is not None:
|
| 218 |
+
target_momentum = base_momentum * self.min_momentum_ratio
|
| 219 |
+
else:
|
| 220 |
+
target_momentum = self.min_momentum
|
| 221 |
+
return annealing_cos(base_momentum, target_momentum,
|
| 222 |
+
progress / max_progress)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@HOOKS.register_module()
|
| 226 |
+
class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
|
| 227 |
+
"""Cyclic momentum Scheduler.
|
| 228 |
+
|
| 229 |
+
Implement the cyclical momentum scheduler policy described in
|
| 230 |
+
https://arxiv.org/pdf/1708.07120.pdf
|
| 231 |
+
|
| 232 |
+
This momentum scheduler usually used together with the CyclicLRUpdater
|
| 233 |
+
to improve the performance in the 3D detection area.
|
| 234 |
+
|
| 235 |
+
Attributes:
|
| 236 |
+
target_ratio (tuple[float]): Relative ratio of the lowest momentum and
|
| 237 |
+
the highest momentum to the initial momentum.
|
| 238 |
+
cyclic_times (int): Number of cycles during training
|
| 239 |
+
step_ratio_up (float): The ratio of the increasing process of momentum
|
| 240 |
+
in the total cycle.
|
| 241 |
+
by_epoch (bool): Whether to update momentum by epoch.
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
def __init__(self,
|
| 245 |
+
by_epoch=False,
|
| 246 |
+
target_ratio=(0.85 / 0.95, 1),
|
| 247 |
+
cyclic_times=1,
|
| 248 |
+
step_ratio_up=0.4,
|
| 249 |
+
**kwargs):
|
| 250 |
+
if isinstance(target_ratio, float):
|
| 251 |
+
target_ratio = (target_ratio, target_ratio / 1e5)
|
| 252 |
+
elif isinstance(target_ratio, tuple):
|
| 253 |
+
target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
|
| 254 |
+
if len(target_ratio) == 1 else target_ratio
|
| 255 |
+
else:
|
| 256 |
+
raise ValueError('target_ratio should be either float '
|
| 257 |
+
f'or tuple, got {type(target_ratio)}')
|
| 258 |
+
|
| 259 |
+
assert len(target_ratio) == 2, \
|
| 260 |
+
'"target_ratio" must be list or tuple of two floats'
|
| 261 |
+
assert 0 <= step_ratio_up < 1.0, \
|
| 262 |
+
'"step_ratio_up" must be in range [0,1)'
|
| 263 |
+
|
| 264 |
+
self.target_ratio = target_ratio
|
| 265 |
+
self.cyclic_times = cyclic_times
|
| 266 |
+
self.step_ratio_up = step_ratio_up
|
| 267 |
+
self.momentum_phases = [] # init momentum_phases
|
| 268 |
+
# currently only support by_epoch=False
|
| 269 |
+
assert not by_epoch, \
|
| 270 |
+
'currently only support "by_epoch" = False'
|
| 271 |
+
super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs)
|
| 272 |
+
|
| 273 |
+
def before_run(self, runner):
|
| 274 |
+
super(CyclicMomentumUpdaterHook, self).before_run(runner)
|
| 275 |
+
# initiate momentum_phases
|
| 276 |
+
# total momentum_phases are separated as up and down
|
| 277 |
+
max_iter_per_phase = runner.max_iters // self.cyclic_times
|
| 278 |
+
iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
|
| 279 |
+
self.momentum_phases.append(
|
| 280 |
+
[0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
|
| 281 |
+
self.momentum_phases.append([
|
| 282 |
+
iter_up_phase, max_iter_per_phase, max_iter_per_phase,
|
| 283 |
+
self.target_ratio[0], self.target_ratio[1]
|
| 284 |
+
])
|
| 285 |
+
|
| 286 |
+
def get_momentum(self, runner, base_momentum):
|
| 287 |
+
curr_iter = runner.iter
|
| 288 |
+
for (start_iter, end_iter, max_iter_per_phase, start_ratio,
|
| 289 |
+
end_ratio) in self.momentum_phases:
|
| 290 |
+
curr_iter %= max_iter_per_phase
|
| 291 |
+
if start_iter <= curr_iter < end_iter:
|
| 292 |
+
progress = curr_iter - start_iter
|
| 293 |
+
return annealing_cos(base_momentum * start_ratio,
|
| 294 |
+
base_momentum * end_ratio,
|
| 295 |
+
progress / (end_iter - start_iter))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@HOOKS.register_module()
|
| 299 |
+
class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
|
| 300 |
+
"""OneCycle momentum Scheduler.
|
| 301 |
+
|
| 302 |
+
This momentum scheduler usually used together with the OneCycleLrUpdater
|
| 303 |
+
to improve the performance.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
base_momentum (float or list): Lower momentum boundaries in the cycle
|
| 307 |
+
for each parameter group. Note that momentum is cycled inversely
|
| 308 |
+
to learning rate; at the peak of a cycle, momentum is
|
| 309 |
+
'base_momentum' and learning rate is 'max_lr'.
|
| 310 |
+
Default: 0.85
|
| 311 |
+
max_momentum (float or list): Upper momentum boundaries in the cycle
|
| 312 |
+
for each parameter group. Functionally,
|
| 313 |
+
it defines the cycle amplitude (max_momentum - base_momentum).
|
| 314 |
+
Note that momentum is cycled inversely
|
| 315 |
+
to learning rate; at the start of a cycle, momentum is
|
| 316 |
+
'max_momentum' and learning rate is 'base_lr'
|
| 317 |
+
Default: 0.95
|
| 318 |
+
pct_start (float): The percentage of the cycle (in number of steps)
|
| 319 |
+
spent increasing the learning rate.
|
| 320 |
+
Default: 0.3
|
| 321 |
+
anneal_strategy (str): {'cos', 'linear'}
|
| 322 |
+
Specifies the annealing strategy: 'cos' for cosine annealing,
|
| 323 |
+
'linear' for linear annealing.
|
| 324 |
+
Default: 'cos'
|
| 325 |
+
three_phase (bool): If three_phase is True, use a third phase of the
|
| 326 |
+
schedule to annihilate the learning rate according to
|
| 327 |
+
final_div_factor instead of modifying the second phase (the first
|
| 328 |
+
two phases will be symmetrical about the step indicated by
|
| 329 |
+
pct_start).
|
| 330 |
+
Default: False
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
def __init__(self,
|
| 334 |
+
base_momentum=0.85,
|
| 335 |
+
max_momentum=0.95,
|
| 336 |
+
pct_start=0.3,
|
| 337 |
+
anneal_strategy='cos',
|
| 338 |
+
three_phase=False,
|
| 339 |
+
**kwargs):
|
| 340 |
+
# validate by_epoch, currently only support by_epoch=False
|
| 341 |
+
if 'by_epoch' not in kwargs:
|
| 342 |
+
kwargs['by_epoch'] = False
|
| 343 |
+
else:
|
| 344 |
+
assert not kwargs['by_epoch'], \
|
| 345 |
+
'currently only support "by_epoch" = False'
|
| 346 |
+
if not isinstance(base_momentum, (float, list, dict)):
|
| 347 |
+
raise ValueError('base_momentum must be the type among of float,'
|
| 348 |
+
'list or dict.')
|
| 349 |
+
self._base_momentum = base_momentum
|
| 350 |
+
if not isinstance(max_momentum, (float, list, dict)):
|
| 351 |
+
raise ValueError('max_momentum must be the type among of float,'
|
| 352 |
+
'list or dict.')
|
| 353 |
+
self._max_momentum = max_momentum
|
| 354 |
+
# validate pct_start
|
| 355 |
+
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
|
| 356 |
+
raise ValueError('Expected float between 0 and 1 pct_start, but '
|
| 357 |
+
f'got {pct_start}')
|
| 358 |
+
self.pct_start = pct_start
|
| 359 |
+
# validate anneal_strategy
|
| 360 |
+
if anneal_strategy not in ['cos', 'linear']:
|
| 361 |
+
raise ValueError('anneal_strategy must by one of "cos" or '
|
| 362 |
+
f'"linear", instead got {anneal_strategy}')
|
| 363 |
+
elif anneal_strategy == 'cos':
|
| 364 |
+
self.anneal_func = annealing_cos
|
| 365 |
+
elif anneal_strategy == 'linear':
|
| 366 |
+
self.anneal_func = annealing_linear
|
| 367 |
+
self.three_phase = three_phase
|
| 368 |
+
self.momentum_phases = [] # init momentum_phases
|
| 369 |
+
super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs)
|
| 370 |
+
|
| 371 |
+
def before_run(self, runner):
|
| 372 |
+
if isinstance(runner.optimizer, dict):
|
| 373 |
+
for k, optim in runner.optimizer.items():
|
| 374 |
+
if ('momentum' not in optim.defaults
|
| 375 |
+
and 'betas' not in optim.defaults):
|
| 376 |
+
raise ValueError('optimizer must support momentum with'
|
| 377 |
+
'option enabled')
|
| 378 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 379 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 380 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 381 |
+
for group, b_momentum, m_momentum in zip(
|
| 382 |
+
optim.param_groups, _base_momentum, _max_momentum):
|
| 383 |
+
if self.use_beta1:
|
| 384 |
+
_, beta2 = group['betas']
|
| 385 |
+
group['betas'] = (m_momentum, beta2)
|
| 386 |
+
else:
|
| 387 |
+
group['momentum'] = m_momentum
|
| 388 |
+
group['base_momentum'] = b_momentum
|
| 389 |
+
group['max_momentum'] = m_momentum
|
| 390 |
+
else:
|
| 391 |
+
optim = runner.optimizer
|
| 392 |
+
if ('momentum' not in optim.defaults
|
| 393 |
+
and 'betas' not in optim.defaults):
|
| 394 |
+
raise ValueError('optimizer must support momentum with'
|
| 395 |
+
'option enabled')
|
| 396 |
+
self.use_beta1 = 'betas' in optim.defaults
|
| 397 |
+
k = type(optim).__name__
|
| 398 |
+
_base_momentum = format_param(k, optim, self._base_momentum)
|
| 399 |
+
_max_momentum = format_param(k, optim, self._max_momentum)
|
| 400 |
+
for group, b_momentum, m_momentum in zip(optim.param_groups,
|
| 401 |
+
_base_momentum,
|
| 402 |
+
_max_momentum):
|
| 403 |
+
if self.use_beta1:
|
| 404 |
+
_, beta2 = group['betas']
|
| 405 |
+
group['betas'] = (m_momentum, beta2)
|
| 406 |
+
else:
|
| 407 |
+
group['momentum'] = m_momentum
|
| 408 |
+
group['base_momentum'] = b_momentum
|
| 409 |
+
group['max_momentum'] = m_momentum
|
| 410 |
+
|
| 411 |
+
if self.three_phase:
|
| 412 |
+
self.momentum_phases.append({
|
| 413 |
+
'end_iter':
|
| 414 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 415 |
+
'start_momentum':
|
| 416 |
+
'max_momentum',
|
| 417 |
+
'end_momentum':
|
| 418 |
+
'base_momentum'
|
| 419 |
+
})
|
| 420 |
+
self.momentum_phases.append({
|
| 421 |
+
'end_iter':
|
| 422 |
+
float(2 * self.pct_start * runner.max_iters) - 2,
|
| 423 |
+
'start_momentum':
|
| 424 |
+
'base_momentum',
|
| 425 |
+
'end_momentum':
|
| 426 |
+
'max_momentum'
|
| 427 |
+
})
|
| 428 |
+
self.momentum_phases.append({
|
| 429 |
+
'end_iter': runner.max_iters - 1,
|
| 430 |
+
'start_momentum': 'max_momentum',
|
| 431 |
+
'end_momentum': 'max_momentum'
|
| 432 |
+
})
|
| 433 |
+
else:
|
| 434 |
+
self.momentum_phases.append({
|
| 435 |
+
'end_iter':
|
| 436 |
+
float(self.pct_start * runner.max_iters) - 1,
|
| 437 |
+
'start_momentum':
|
| 438 |
+
'max_momentum',
|
| 439 |
+
'end_momentum':
|
| 440 |
+
'base_momentum'
|
| 441 |
+
})
|
| 442 |
+
self.momentum_phases.append({
|
| 443 |
+
'end_iter': runner.max_iters - 1,
|
| 444 |
+
'start_momentum': 'base_momentum',
|
| 445 |
+
'end_momentum': 'max_momentum'
|
| 446 |
+
})
|
| 447 |
+
|
| 448 |
+
def _set_momentum(self, runner, momentum_groups):
|
| 449 |
+
if isinstance(runner.optimizer, dict):
|
| 450 |
+
for k, optim in runner.optimizer.items():
|
| 451 |
+
for param_group, mom in zip(optim.param_groups,
|
| 452 |
+
momentum_groups[k]):
|
| 453 |
+
if 'momentum' in param_group.keys():
|
| 454 |
+
param_group['momentum'] = mom
|
| 455 |
+
elif 'betas' in param_group.keys():
|
| 456 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 457 |
+
else:
|
| 458 |
+
for param_group, mom in zip(runner.optimizer.param_groups,
|
| 459 |
+
momentum_groups):
|
| 460 |
+
if 'momentum' in param_group.keys():
|
| 461 |
+
param_group['momentum'] = mom
|
| 462 |
+
elif 'betas' in param_group.keys():
|
| 463 |
+
param_group['betas'] = (mom, param_group['betas'][1])
|
| 464 |
+
|
| 465 |
+
def get_momentum(self, runner, param_group):
|
| 466 |
+
curr_iter = runner.iter
|
| 467 |
+
start_iter = 0
|
| 468 |
+
for i, phase in enumerate(self.momentum_phases):
|
| 469 |
+
end_iter = phase['end_iter']
|
| 470 |
+
if curr_iter <= end_iter or i == len(self.momentum_phases) - 1:
|
| 471 |
+
pct = (curr_iter - start_iter) / (end_iter - start_iter)
|
| 472 |
+
momentum = self.anneal_func(
|
| 473 |
+
param_group[phase['start_momentum']],
|
| 474 |
+
param_group[phase['end_momentum']], pct)
|
| 475 |
+
break
|
| 476 |
+
start_iter = end_iter
|
| 477 |
+
return momentum
|
| 478 |
+
|
| 479 |
+
def get_regular_momentum(self, runner):
|
| 480 |
+
if isinstance(runner.optimizer, dict):
|
| 481 |
+
momentum_groups = {}
|
| 482 |
+
for k, optim in runner.optimizer.items():
|
| 483 |
+
_momentum_group = [
|
| 484 |
+
self.get_momentum(runner, param_group)
|
| 485 |
+
for param_group in optim.param_groups
|
| 486 |
+
]
|
| 487 |
+
momentum_groups.update({k: _momentum_group})
|
| 488 |
+
return momentum_groups
|
| 489 |
+
else:
|
| 490 |
+
momentum_groups = []
|
| 491 |
+
for param_group in runner.optimizer.param_groups:
|
| 492 |
+
momentum_groups.append(self.get_momentum(runner, param_group))
|
| 493 |
+
return momentum_groups
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .hook import HOOKS, Hook
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@HOOKS.register_module()
|
| 6 |
+
class DistSamplerSeedHook(Hook):
|
| 7 |
+
"""Data-loading sampler for distributed training.
|
| 8 |
+
|
| 9 |
+
When distributed training, it is only useful in conjunction with
|
| 10 |
+
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
|
| 11 |
+
purpose with :obj:`IterLoader`.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def before_epoch(self, runner):
|
| 15 |
+
if hasattr(runner.data_loader.sampler, 'set_epoch'):
|
| 16 |
+
# in case the data loader uses `SequentialSampler` in Pytorch
|
| 17 |
+
runner.data_loader.sampler.set_epoch(runner.epoch)
|
| 18 |
+
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
|
| 19 |
+
# batch sampler in pytorch warps the sampler as its attributes.
|
| 20 |
+
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
|
Text2Video-Zero-main/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from ..dist_utils import allreduce_params
|
| 3 |
+
from .hook import HOOKS, Hook
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@HOOKS.register_module()
|
| 7 |
+
class SyncBuffersHook(Hook):
|
| 8 |
+
"""Synchronize model buffers such as running_mean and running_var in BN at
|
| 9 |
+
the end of each epoch.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
distributed (bool): Whether distributed training is used. It is
|
| 13 |
+
effective only for distributed training. Defaults to True.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, distributed=True):
|
| 17 |
+
self.distributed = distributed
|
| 18 |
+
|
| 19 |
+
def after_epoch(self, runner):
|
| 20 |
+
"""All-reduce model buffers at the end of each epoch."""
|
| 21 |
+
if self.distributed:
|
| 22 |
+
allreduce_params(runner.model.buffers())
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ade import ADE20KDataset
|
| 2 |
+
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
| 3 |
+
from .chase_db1 import ChaseDB1Dataset
|
| 4 |
+
from .cityscapes import CityscapesDataset
|
| 5 |
+
from .custom import CustomDataset
|
| 6 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 7 |
+
from .drive import DRIVEDataset
|
| 8 |
+
from .hrf import HRFDataset
|
| 9 |
+
from .pascal_context import PascalContextDataset, PascalContextDataset59
|
| 10 |
+
from .stare import STAREDataset
|
| 11 |
+
from .voc import PascalVOCDataset
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
| 15 |
+
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
|
| 16 |
+
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
|
| 17 |
+
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
|
| 18 |
+
'STAREDataset'
|
| 19 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/ade.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .builder import DATASETS
|
| 2 |
+
from .custom import CustomDataset
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@DATASETS.register_module()
|
| 6 |
+
class ADE20KDataset(CustomDataset):
|
| 7 |
+
"""ADE20K dataset.
|
| 8 |
+
|
| 9 |
+
In segmentation map annotation for ADE20K, 0 stands for background, which
|
| 10 |
+
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
|
| 11 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
|
| 12 |
+
'.png'.
|
| 13 |
+
"""
|
| 14 |
+
CLASSES = (
|
| 15 |
+
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
|
| 16 |
+
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
|
| 17 |
+
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
|
| 18 |
+
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
|
| 19 |
+
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
|
| 20 |
+
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
|
| 21 |
+
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
|
| 22 |
+
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
|
| 23 |
+
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
|
| 24 |
+
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
|
| 25 |
+
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
|
| 26 |
+
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
|
| 27 |
+
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
|
| 28 |
+
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
|
| 29 |
+
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
|
| 30 |
+
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
|
| 31 |
+
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
|
| 32 |
+
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
|
| 33 |
+
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
|
| 34 |
+
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
|
| 35 |
+
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
|
| 36 |
+
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
|
| 37 |
+
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
|
| 38 |
+
'clock', 'flag')
|
| 39 |
+
|
| 40 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 41 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 42 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 43 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 44 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 45 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 46 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 47 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 48 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 49 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 50 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 51 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 52 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 53 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 54 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
| 55 |
+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
| 56 |
+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
| 57 |
+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
| 58 |
+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
| 59 |
+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
| 60 |
+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
| 61 |
+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
| 62 |
+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
| 63 |
+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
| 64 |
+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
| 65 |
+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
| 66 |
+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
| 67 |
+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
| 68 |
+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
| 69 |
+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
| 70 |
+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
| 71 |
+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
| 72 |
+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
| 73 |
+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
| 74 |
+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
| 75 |
+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
| 76 |
+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
| 77 |
+
[102, 255, 0], [92, 0, 255]]
|
| 78 |
+
|
| 79 |
+
def __init__(self, **kwargs):
|
| 80 |
+
super(ADE20KDataset, self).__init__(
|
| 81 |
+
img_suffix='.jpg',
|
| 82 |
+
seg_map_suffix='.png',
|
| 83 |
+
reduce_zero_label=True,
|
| 84 |
+
**kwargs)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/builder.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import platform
|
| 3 |
+
import random
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from annotator.uniformer.mmcv.parallel import collate
|
| 8 |
+
from annotator.uniformer.mmcv.runner import get_dist_info
|
| 9 |
+
from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
|
| 10 |
+
from annotator.uniformer.mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
|
| 11 |
+
from torch.utils.data import DistributedSampler
|
| 12 |
+
|
| 13 |
+
if platform.system() != 'Windows':
|
| 14 |
+
# https://github.com/pytorch/pytorch/issues/973
|
| 15 |
+
import resource
|
| 16 |
+
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
|
| 17 |
+
hard_limit = rlimit[1]
|
| 18 |
+
soft_limit = min(4096, hard_limit)
|
| 19 |
+
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
|
| 20 |
+
|
| 21 |
+
DATASETS = Registry('dataset')
|
| 22 |
+
PIPELINES = Registry('pipeline')
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _concat_dataset(cfg, default_args=None):
|
| 26 |
+
"""Build :obj:`ConcatDataset by."""
|
| 27 |
+
from .dataset_wrappers import ConcatDataset
|
| 28 |
+
img_dir = cfg['img_dir']
|
| 29 |
+
ann_dir = cfg.get('ann_dir', None)
|
| 30 |
+
split = cfg.get('split', None)
|
| 31 |
+
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
|
| 32 |
+
if ann_dir is not None:
|
| 33 |
+
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
|
| 34 |
+
else:
|
| 35 |
+
num_ann_dir = 0
|
| 36 |
+
if split is not None:
|
| 37 |
+
num_split = len(split) if isinstance(split, (list, tuple)) else 1
|
| 38 |
+
else:
|
| 39 |
+
num_split = 0
|
| 40 |
+
if num_img_dir > 1:
|
| 41 |
+
assert num_img_dir == num_ann_dir or num_ann_dir == 0
|
| 42 |
+
assert num_img_dir == num_split or num_split == 0
|
| 43 |
+
else:
|
| 44 |
+
assert num_split == num_ann_dir or num_ann_dir <= 1
|
| 45 |
+
num_dset = max(num_split, num_img_dir)
|
| 46 |
+
|
| 47 |
+
datasets = []
|
| 48 |
+
for i in range(num_dset):
|
| 49 |
+
data_cfg = copy.deepcopy(cfg)
|
| 50 |
+
if isinstance(img_dir, (list, tuple)):
|
| 51 |
+
data_cfg['img_dir'] = img_dir[i]
|
| 52 |
+
if isinstance(ann_dir, (list, tuple)):
|
| 53 |
+
data_cfg['ann_dir'] = ann_dir[i]
|
| 54 |
+
if isinstance(split, (list, tuple)):
|
| 55 |
+
data_cfg['split'] = split[i]
|
| 56 |
+
datasets.append(build_dataset(data_cfg, default_args))
|
| 57 |
+
|
| 58 |
+
return ConcatDataset(datasets)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def build_dataset(cfg, default_args=None):
|
| 62 |
+
"""Build datasets."""
|
| 63 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 64 |
+
if isinstance(cfg, (list, tuple)):
|
| 65 |
+
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
|
| 66 |
+
elif cfg['type'] == 'RepeatDataset':
|
| 67 |
+
dataset = RepeatDataset(
|
| 68 |
+
build_dataset(cfg['dataset'], default_args), cfg['times'])
|
| 69 |
+
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
|
| 70 |
+
cfg.get('split', None), (list, tuple)):
|
| 71 |
+
dataset = _concat_dataset(cfg, default_args)
|
| 72 |
+
else:
|
| 73 |
+
dataset = build_from_cfg(cfg, DATASETS, default_args)
|
| 74 |
+
|
| 75 |
+
return dataset
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def build_dataloader(dataset,
|
| 79 |
+
samples_per_gpu,
|
| 80 |
+
workers_per_gpu,
|
| 81 |
+
num_gpus=1,
|
| 82 |
+
dist=True,
|
| 83 |
+
shuffle=True,
|
| 84 |
+
seed=None,
|
| 85 |
+
drop_last=False,
|
| 86 |
+
pin_memory=True,
|
| 87 |
+
dataloader_type='PoolDataLoader',
|
| 88 |
+
**kwargs):
|
| 89 |
+
"""Build PyTorch DataLoader.
|
| 90 |
+
|
| 91 |
+
In distributed training, each GPU/process has a dataloader.
|
| 92 |
+
In non-distributed training, there is only one dataloader for all GPUs.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
dataset (Dataset): A PyTorch dataset.
|
| 96 |
+
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
|
| 97 |
+
batch size of each GPU.
|
| 98 |
+
workers_per_gpu (int): How many subprocesses to use for data loading
|
| 99 |
+
for each GPU.
|
| 100 |
+
num_gpus (int): Number of GPUs. Only used in non-distributed training.
|
| 101 |
+
dist (bool): Distributed training/test or not. Default: True.
|
| 102 |
+
shuffle (bool): Whether to shuffle the data at every epoch.
|
| 103 |
+
Default: True.
|
| 104 |
+
seed (int | None): Seed to be used. Default: None.
|
| 105 |
+
drop_last (bool): Whether to drop the last incomplete batch in epoch.
|
| 106 |
+
Default: False
|
| 107 |
+
pin_memory (bool): Whether to use pin_memory in DataLoader.
|
| 108 |
+
Default: True
|
| 109 |
+
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
|
| 110 |
+
kwargs: any keyword argument to be used to initialize DataLoader
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
DataLoader: A PyTorch dataloader.
|
| 114 |
+
"""
|
| 115 |
+
rank, world_size = get_dist_info()
|
| 116 |
+
if dist:
|
| 117 |
+
sampler = DistributedSampler(
|
| 118 |
+
dataset, world_size, rank, shuffle=shuffle)
|
| 119 |
+
shuffle = False
|
| 120 |
+
batch_size = samples_per_gpu
|
| 121 |
+
num_workers = workers_per_gpu
|
| 122 |
+
else:
|
| 123 |
+
sampler = None
|
| 124 |
+
batch_size = num_gpus * samples_per_gpu
|
| 125 |
+
num_workers = num_gpus * workers_per_gpu
|
| 126 |
+
|
| 127 |
+
init_fn = partial(
|
| 128 |
+
worker_init_fn, num_workers=num_workers, rank=rank,
|
| 129 |
+
seed=seed) if seed is not None else None
|
| 130 |
+
|
| 131 |
+
assert dataloader_type in (
|
| 132 |
+
'DataLoader',
|
| 133 |
+
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
|
| 134 |
+
|
| 135 |
+
if dataloader_type == 'PoolDataLoader':
|
| 136 |
+
dataloader = PoolDataLoader
|
| 137 |
+
elif dataloader_type == 'DataLoader':
|
| 138 |
+
dataloader = DataLoader
|
| 139 |
+
|
| 140 |
+
data_loader = dataloader(
|
| 141 |
+
dataset,
|
| 142 |
+
batch_size=batch_size,
|
| 143 |
+
sampler=sampler,
|
| 144 |
+
num_workers=num_workers,
|
| 145 |
+
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
|
| 146 |
+
pin_memory=pin_memory,
|
| 147 |
+
shuffle=shuffle,
|
| 148 |
+
worker_init_fn=init_fn,
|
| 149 |
+
drop_last=drop_last,
|
| 150 |
+
**kwargs)
|
| 151 |
+
|
| 152 |
+
return data_loader
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def worker_init_fn(worker_id, num_workers, rank, seed):
|
| 156 |
+
"""Worker init func for dataloader.
|
| 157 |
+
|
| 158 |
+
The seed of each worker equals to num_worker * rank + worker_id + user_seed
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
worker_id (int): Worker id.
|
| 162 |
+
num_workers (int): Number of workers.
|
| 163 |
+
rank (int): The rank of current process.
|
| 164 |
+
seed (int): The random seed to use.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
worker_seed = num_workers * rank + worker_id + seed
|
| 168 |
+
np.random.seed(worker_seed)
|
| 169 |
+
random.seed(worker_seed)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class ChaseDB1Dataset(CustomDataset):
|
| 9 |
+
"""Chase_db1 dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for Chase_db1, 0 stands for background,
|
| 12 |
+
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
|
| 13 |
+
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_1stHO.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(ChaseDB1Dataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_1stHO.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
import tempfile
|
| 3 |
+
|
| 4 |
+
import annotator.uniformer.mmcv as mmcv
|
| 5 |
+
import numpy as np
|
| 6 |
+
from annotator.uniformer.mmcv.utils import print_log
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
from .builder import DATASETS
|
| 10 |
+
from .custom import CustomDataset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@DATASETS.register_module()
|
| 14 |
+
class CityscapesDataset(CustomDataset):
|
| 15 |
+
"""Cityscapes dataset.
|
| 16 |
+
|
| 17 |
+
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
| 18 |
+
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
| 22 |
+
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
|
| 23 |
+
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
|
| 24 |
+
'bicycle')
|
| 25 |
+
|
| 26 |
+
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
| 27 |
+
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
|
| 28 |
+
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
|
| 29 |
+
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
|
| 30 |
+
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
| 31 |
+
|
| 32 |
+
def __init__(self, **kwargs):
|
| 33 |
+
super(CityscapesDataset, self).__init__(
|
| 34 |
+
img_suffix='_leftImg8bit.png',
|
| 35 |
+
seg_map_suffix='_gtFine_labelTrainIds.png',
|
| 36 |
+
**kwargs)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def _convert_to_label_id(result):
|
| 40 |
+
"""Convert trainId to id for cityscapes."""
|
| 41 |
+
if isinstance(result, str):
|
| 42 |
+
result = np.load(result)
|
| 43 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 44 |
+
result_copy = result.copy()
|
| 45 |
+
for trainId, label in CSLabels.trainId2label.items():
|
| 46 |
+
result_copy[result == trainId] = label.id
|
| 47 |
+
|
| 48 |
+
return result_copy
|
| 49 |
+
|
| 50 |
+
def results2img(self, results, imgfile_prefix, to_label_id):
|
| 51 |
+
"""Write the segmentation results to images.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
results (list[list | tuple | ndarray]): Testing results of the
|
| 55 |
+
dataset.
|
| 56 |
+
imgfile_prefix (str): The filename prefix of the png files.
|
| 57 |
+
If the prefix is "somepath/xxx",
|
| 58 |
+
the png files will be named "somepath/xxx.png".
|
| 59 |
+
to_label_id (bool): whether convert output to label_id for
|
| 60 |
+
submission
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
list[str: str]: result txt files which contains corresponding
|
| 64 |
+
semantic segmentation images.
|
| 65 |
+
"""
|
| 66 |
+
mmcv.mkdir_or_exist(imgfile_prefix)
|
| 67 |
+
result_files = []
|
| 68 |
+
prog_bar = mmcv.ProgressBar(len(self))
|
| 69 |
+
for idx in range(len(self)):
|
| 70 |
+
result = results[idx]
|
| 71 |
+
if to_label_id:
|
| 72 |
+
result = self._convert_to_label_id(result)
|
| 73 |
+
filename = self.img_infos[idx]['filename']
|
| 74 |
+
basename = osp.splitext(osp.basename(filename))[0]
|
| 75 |
+
|
| 76 |
+
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
|
| 77 |
+
|
| 78 |
+
output = Image.fromarray(result.astype(np.uint8)).convert('P')
|
| 79 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 80 |
+
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
|
| 81 |
+
for label_id, label in CSLabels.id2label.items():
|
| 82 |
+
palette[label_id] = label.color
|
| 83 |
+
|
| 84 |
+
output.putpalette(palette)
|
| 85 |
+
output.save(png_filename)
|
| 86 |
+
result_files.append(png_filename)
|
| 87 |
+
prog_bar.update()
|
| 88 |
+
|
| 89 |
+
return result_files
|
| 90 |
+
|
| 91 |
+
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
|
| 92 |
+
"""Format the results into dir (standard format for Cityscapes
|
| 93 |
+
evaluation).
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
results (list): Testing results of the dataset.
|
| 97 |
+
imgfile_prefix (str | None): The prefix of images files. It
|
| 98 |
+
includes the file path and the prefix of filename, e.g.,
|
| 99 |
+
"a/b/prefix". If not specified, a temp file will be created.
|
| 100 |
+
Default: None.
|
| 101 |
+
to_label_id (bool): whether convert output to label_id for
|
| 102 |
+
submission. Default: False
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
tuple: (result_files, tmp_dir), result_files is a list containing
|
| 106 |
+
the image paths, tmp_dir is the temporal directory created
|
| 107 |
+
for saving json/png files when img_prefix is not specified.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
assert isinstance(results, list), 'results must be a list'
|
| 111 |
+
assert len(results) == len(self), (
|
| 112 |
+
'The length of results is not equal to the dataset len: '
|
| 113 |
+
f'{len(results)} != {len(self)}')
|
| 114 |
+
|
| 115 |
+
if imgfile_prefix is None:
|
| 116 |
+
tmp_dir = tempfile.TemporaryDirectory()
|
| 117 |
+
imgfile_prefix = tmp_dir.name
|
| 118 |
+
else:
|
| 119 |
+
tmp_dir = None
|
| 120 |
+
result_files = self.results2img(results, imgfile_prefix, to_label_id)
|
| 121 |
+
|
| 122 |
+
return result_files, tmp_dir
|
| 123 |
+
|
| 124 |
+
def evaluate(self,
|
| 125 |
+
results,
|
| 126 |
+
metric='mIoU',
|
| 127 |
+
logger=None,
|
| 128 |
+
imgfile_prefix=None,
|
| 129 |
+
efficient_test=False):
|
| 130 |
+
"""Evaluation in Cityscapes/default protocol.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
results (list): Testing results of the dataset.
|
| 134 |
+
metric (str | list[str]): Metrics to be evaluated.
|
| 135 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 136 |
+
related information during evaluation. Default: None.
|
| 137 |
+
imgfile_prefix (str | None): The prefix of output image file,
|
| 138 |
+
for cityscapes evaluation only. It includes the file path and
|
| 139 |
+
the prefix of filename, e.g., "a/b/prefix".
|
| 140 |
+
If results are evaluated with cityscapes protocol, it would be
|
| 141 |
+
the prefix of output png files. The output files would be
|
| 142 |
+
png images under folder "a/b/prefix/xxx.png", where "xxx" is
|
| 143 |
+
the image name of cityscapes. If not specified, a temp file
|
| 144 |
+
will be created for evaluation.
|
| 145 |
+
Default: None.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
dict[str, float]: Cityscapes/default metrics.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
eval_results = dict()
|
| 152 |
+
metrics = metric.copy() if isinstance(metric, list) else [metric]
|
| 153 |
+
if 'cityscapes' in metrics:
|
| 154 |
+
eval_results.update(
|
| 155 |
+
self._evaluate_cityscapes(results, logger, imgfile_prefix))
|
| 156 |
+
metrics.remove('cityscapes')
|
| 157 |
+
if len(metrics) > 0:
|
| 158 |
+
eval_results.update(
|
| 159 |
+
super(CityscapesDataset,
|
| 160 |
+
self).evaluate(results, metrics, logger, efficient_test))
|
| 161 |
+
|
| 162 |
+
return eval_results
|
| 163 |
+
|
| 164 |
+
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
|
| 165 |
+
"""Evaluation in Cityscapes protocol.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
results (list): Testing results of the dataset.
|
| 169 |
+
logger (logging.Logger | str | None): Logger used for printing
|
| 170 |
+
related information during evaluation. Default: None.
|
| 171 |
+
imgfile_prefix (str | None): The prefix of output image file
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
dict[str: float]: Cityscapes evaluation results.
|
| 175 |
+
"""
|
| 176 |
+
try:
|
| 177 |
+
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
|
| 178 |
+
except ImportError:
|
| 179 |
+
raise ImportError('Please run "pip install cityscapesscripts" to '
|
| 180 |
+
'install cityscapesscripts first.')
|
| 181 |
+
msg = 'Evaluating in Cityscapes style'
|
| 182 |
+
if logger is None:
|
| 183 |
+
msg = '\n' + msg
|
| 184 |
+
print_log(msg, logger=logger)
|
| 185 |
+
|
| 186 |
+
result_files, tmp_dir = self.format_results(results, imgfile_prefix)
|
| 187 |
+
|
| 188 |
+
if tmp_dir is None:
|
| 189 |
+
result_dir = imgfile_prefix
|
| 190 |
+
else:
|
| 191 |
+
result_dir = tmp_dir.name
|
| 192 |
+
|
| 193 |
+
eval_results = dict()
|
| 194 |
+
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
|
| 195 |
+
|
| 196 |
+
CSEval.args.evalInstLevelScore = True
|
| 197 |
+
CSEval.args.predictionPath = osp.abspath(result_dir)
|
| 198 |
+
CSEval.args.evalPixelAccuracy = True
|
| 199 |
+
CSEval.args.JSONOutput = False
|
| 200 |
+
|
| 201 |
+
seg_map_list = []
|
| 202 |
+
pred_list = []
|
| 203 |
+
|
| 204 |
+
# when evaluating with official cityscapesscripts,
|
| 205 |
+
# **_gtFine_labelIds.png is used
|
| 206 |
+
for seg_map in mmcv.scandir(
|
| 207 |
+
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
|
| 208 |
+
seg_map_list.append(osp.join(self.ann_dir, seg_map))
|
| 209 |
+
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
|
| 210 |
+
|
| 211 |
+
eval_results.update(
|
| 212 |
+
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
|
| 213 |
+
|
| 214 |
+
if tmp_dir is not None:
|
| 215 |
+
tmp_dir.cleanup()
|
| 216 |
+
|
| 217 |
+
return eval_results
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/custom.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path as osp
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
from functools import reduce
|
| 5 |
+
|
| 6 |
+
import annotator.uniformer.mmcv as mmcv
|
| 7 |
+
import numpy as np
|
| 8 |
+
from annotator.uniformer.mmcv.utils import print_log
|
| 9 |
+
from prettytable import PrettyTable
|
| 10 |
+
from torch.utils.data import Dataset
|
| 11 |
+
|
| 12 |
+
from annotator.uniformer.mmseg.core import eval_metrics
|
| 13 |
+
from annotator.uniformer.mmseg.utils import get_root_logger
|
| 14 |
+
from .builder import DATASETS
|
| 15 |
+
from .pipelines import Compose
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@DATASETS.register_module()
|
| 19 |
+
class CustomDataset(Dataset):
|
| 20 |
+
"""Custom dataset for semantic segmentation. An example of file structure
|
| 21 |
+
is as followed.
|
| 22 |
+
|
| 23 |
+
.. code-block:: none
|
| 24 |
+
|
| 25 |
+
├── data
|
| 26 |
+
│ ├── my_dataset
|
| 27 |
+
│ │ ├── img_dir
|
| 28 |
+
│ │ │ ├── train
|
| 29 |
+
│ │ │ │ ├── xxx{img_suffix}
|
| 30 |
+
│ │ │ │ ├── yyy{img_suffix}
|
| 31 |
+
│ │ │ │ ├── zzz{img_suffix}
|
| 32 |
+
│ │ │ ├── val
|
| 33 |
+
│ │ ├── ann_dir
|
| 34 |
+
│ │ │ ├── train
|
| 35 |
+
│ │ │ │ ├── xxx{seg_map_suffix}
|
| 36 |
+
│ │ │ │ ├── yyy{seg_map_suffix}
|
| 37 |
+
│ │ │ │ ├── zzz{seg_map_suffix}
|
| 38 |
+
│ │ │ ├── val
|
| 39 |
+
|
| 40 |
+
The img/gt_semantic_seg pair of CustomDataset should be of the same
|
| 41 |
+
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
| 42 |
+
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
| 43 |
+
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
| 44 |
+
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
|
| 45 |
+
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
pipeline (list[dict]): Processing pipeline
|
| 50 |
+
img_dir (str): Path to image directory
|
| 51 |
+
img_suffix (str): Suffix of images. Default: '.jpg'
|
| 52 |
+
ann_dir (str, optional): Path to annotation directory. Default: None
|
| 53 |
+
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
|
| 54 |
+
split (str, optional): Split txt file. If split is specified, only
|
| 55 |
+
file with suffix in the splits will be loaded. Otherwise, all
|
| 56 |
+
images in img_dir/ann_dir will be loaded. Default: None
|
| 57 |
+
data_root (str, optional): Data root for img_dir/ann_dir. Default:
|
| 58 |
+
None.
|
| 59 |
+
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
|
| 60 |
+
ignore_index (int): The label index to be ignored. Default: 255
|
| 61 |
+
reduce_zero_label (bool): Whether to mark label zero as ignored.
|
| 62 |
+
Default: False
|
| 63 |
+
classes (str | Sequence[str], optional): Specify classes to load.
|
| 64 |
+
If is None, ``cls.CLASSES`` will be used. Default: None.
|
| 65 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 66 |
+
The palette of segmentation map. If None is given, and
|
| 67 |
+
self.PALETTE is None, random palette will be generated.
|
| 68 |
+
Default: None
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
CLASSES = None
|
| 72 |
+
|
| 73 |
+
PALETTE = None
|
| 74 |
+
|
| 75 |
+
def __init__(self,
|
| 76 |
+
pipeline,
|
| 77 |
+
img_dir,
|
| 78 |
+
img_suffix='.jpg',
|
| 79 |
+
ann_dir=None,
|
| 80 |
+
seg_map_suffix='.png',
|
| 81 |
+
split=None,
|
| 82 |
+
data_root=None,
|
| 83 |
+
test_mode=False,
|
| 84 |
+
ignore_index=255,
|
| 85 |
+
reduce_zero_label=False,
|
| 86 |
+
classes=None,
|
| 87 |
+
palette=None):
|
| 88 |
+
self.pipeline = Compose(pipeline)
|
| 89 |
+
self.img_dir = img_dir
|
| 90 |
+
self.img_suffix = img_suffix
|
| 91 |
+
self.ann_dir = ann_dir
|
| 92 |
+
self.seg_map_suffix = seg_map_suffix
|
| 93 |
+
self.split = split
|
| 94 |
+
self.data_root = data_root
|
| 95 |
+
self.test_mode = test_mode
|
| 96 |
+
self.ignore_index = ignore_index
|
| 97 |
+
self.reduce_zero_label = reduce_zero_label
|
| 98 |
+
self.label_map = None
|
| 99 |
+
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
|
| 100 |
+
classes, palette)
|
| 101 |
+
|
| 102 |
+
# join paths if data_root is specified
|
| 103 |
+
if self.data_root is not None:
|
| 104 |
+
if not osp.isabs(self.img_dir):
|
| 105 |
+
self.img_dir = osp.join(self.data_root, self.img_dir)
|
| 106 |
+
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
|
| 107 |
+
self.ann_dir = osp.join(self.data_root, self.ann_dir)
|
| 108 |
+
if not (self.split is None or osp.isabs(self.split)):
|
| 109 |
+
self.split = osp.join(self.data_root, self.split)
|
| 110 |
+
|
| 111 |
+
# load annotations
|
| 112 |
+
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
|
| 113 |
+
self.ann_dir,
|
| 114 |
+
self.seg_map_suffix, self.split)
|
| 115 |
+
|
| 116 |
+
def __len__(self):
|
| 117 |
+
"""Total number of samples of data."""
|
| 118 |
+
return len(self.img_infos)
|
| 119 |
+
|
| 120 |
+
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
|
| 121 |
+
split):
|
| 122 |
+
"""Load annotation from directory.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
img_dir (str): Path to image directory
|
| 126 |
+
img_suffix (str): Suffix of images.
|
| 127 |
+
ann_dir (str|None): Path to annotation directory.
|
| 128 |
+
seg_map_suffix (str|None): Suffix of segmentation maps.
|
| 129 |
+
split (str|None): Split txt file. If split is specified, only file
|
| 130 |
+
with suffix in the splits will be loaded. Otherwise, all images
|
| 131 |
+
in img_dir/ann_dir will be loaded. Default: None
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
list[dict]: All image info of dataset.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
img_infos = []
|
| 138 |
+
if split is not None:
|
| 139 |
+
with open(split) as f:
|
| 140 |
+
for line in f:
|
| 141 |
+
img_name = line.strip()
|
| 142 |
+
img_info = dict(filename=img_name + img_suffix)
|
| 143 |
+
if ann_dir is not None:
|
| 144 |
+
seg_map = img_name + seg_map_suffix
|
| 145 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 146 |
+
img_infos.append(img_info)
|
| 147 |
+
else:
|
| 148 |
+
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
|
| 149 |
+
img_info = dict(filename=img)
|
| 150 |
+
if ann_dir is not None:
|
| 151 |
+
seg_map = img.replace(img_suffix, seg_map_suffix)
|
| 152 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 153 |
+
img_infos.append(img_info)
|
| 154 |
+
|
| 155 |
+
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
|
| 156 |
+
return img_infos
|
| 157 |
+
|
| 158 |
+
def get_ann_info(self, idx):
|
| 159 |
+
"""Get annotation by index.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
idx (int): Index of data.
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
dict: Annotation info of specified index.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
return self.img_infos[idx]['ann']
|
| 169 |
+
|
| 170 |
+
def pre_pipeline(self, results):
|
| 171 |
+
"""Prepare results dict for pipeline."""
|
| 172 |
+
results['seg_fields'] = []
|
| 173 |
+
results['img_prefix'] = self.img_dir
|
| 174 |
+
results['seg_prefix'] = self.ann_dir
|
| 175 |
+
if self.custom_classes:
|
| 176 |
+
results['label_map'] = self.label_map
|
| 177 |
+
|
| 178 |
+
def __getitem__(self, idx):
|
| 179 |
+
"""Get training/test data after pipeline.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
idx (int): Index of data.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
dict: Training/test data (with annotation if `test_mode` is set
|
| 186 |
+
False).
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
if self.test_mode:
|
| 190 |
+
return self.prepare_test_img(idx)
|
| 191 |
+
else:
|
| 192 |
+
return self.prepare_train_img(idx)
|
| 193 |
+
|
| 194 |
+
def prepare_train_img(self, idx):
|
| 195 |
+
"""Get training data and annotations after pipeline.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
idx (int): Index of data.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
dict: Training data and annotation after pipeline with new keys
|
| 202 |
+
introduced by pipeline.
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
img_info = self.img_infos[idx]
|
| 206 |
+
ann_info = self.get_ann_info(idx)
|
| 207 |
+
results = dict(img_info=img_info, ann_info=ann_info)
|
| 208 |
+
self.pre_pipeline(results)
|
| 209 |
+
return self.pipeline(results)
|
| 210 |
+
|
| 211 |
+
def prepare_test_img(self, idx):
|
| 212 |
+
"""Get testing data after pipeline.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
idx (int): Index of data.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
dict: Testing data after pipeline with new keys introduced by
|
| 219 |
+
pipeline.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
img_info = self.img_infos[idx]
|
| 223 |
+
results = dict(img_info=img_info)
|
| 224 |
+
self.pre_pipeline(results)
|
| 225 |
+
return self.pipeline(results)
|
| 226 |
+
|
| 227 |
+
def format_results(self, results, **kwargs):
|
| 228 |
+
"""Place holder to format result to dataset specific output."""
|
| 229 |
+
|
| 230 |
+
def get_gt_seg_maps(self, efficient_test=False):
|
| 231 |
+
"""Get ground truth segmentation maps for evaluation."""
|
| 232 |
+
gt_seg_maps = []
|
| 233 |
+
for img_info in self.img_infos:
|
| 234 |
+
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
|
| 235 |
+
if efficient_test:
|
| 236 |
+
gt_seg_map = seg_map
|
| 237 |
+
else:
|
| 238 |
+
gt_seg_map = mmcv.imread(
|
| 239 |
+
seg_map, flag='unchanged', backend='pillow')
|
| 240 |
+
gt_seg_maps.append(gt_seg_map)
|
| 241 |
+
return gt_seg_maps
|
| 242 |
+
|
| 243 |
+
def get_classes_and_palette(self, classes=None, palette=None):
|
| 244 |
+
"""Get class names of current dataset.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
classes (Sequence[str] | str | None): If classes is None, use
|
| 248 |
+
default CLASSES defined by builtin dataset. If classes is a
|
| 249 |
+
string, take it as a file name. The file contains the name of
|
| 250 |
+
classes where each line contains one class name. If classes is
|
| 251 |
+
a tuple or list, override the CLASSES defined by the dataset.
|
| 252 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 253 |
+
The palette of segmentation map. If None is given, random
|
| 254 |
+
palette will be generated. Default: None
|
| 255 |
+
"""
|
| 256 |
+
if classes is None:
|
| 257 |
+
self.custom_classes = False
|
| 258 |
+
return self.CLASSES, self.PALETTE
|
| 259 |
+
|
| 260 |
+
self.custom_classes = True
|
| 261 |
+
if isinstance(classes, str):
|
| 262 |
+
# take it as a file path
|
| 263 |
+
class_names = mmcv.list_from_file(classes)
|
| 264 |
+
elif isinstance(classes, (tuple, list)):
|
| 265 |
+
class_names = classes
|
| 266 |
+
else:
|
| 267 |
+
raise ValueError(f'Unsupported type {type(classes)} of classes.')
|
| 268 |
+
|
| 269 |
+
if self.CLASSES:
|
| 270 |
+
if not set(classes).issubset(self.CLASSES):
|
| 271 |
+
raise ValueError('classes is not a subset of CLASSES.')
|
| 272 |
+
|
| 273 |
+
# dictionary, its keys are the old label ids and its values
|
| 274 |
+
# are the new label ids.
|
| 275 |
+
# used for changing pixel labels in load_annotations.
|
| 276 |
+
self.label_map = {}
|
| 277 |
+
for i, c in enumerate(self.CLASSES):
|
| 278 |
+
if c not in class_names:
|
| 279 |
+
self.label_map[i] = -1
|
| 280 |
+
else:
|
| 281 |
+
self.label_map[i] = classes.index(c)
|
| 282 |
+
|
| 283 |
+
palette = self.get_palette_for_custom_classes(class_names, palette)
|
| 284 |
+
|
| 285 |
+
return class_names, palette
|
| 286 |
+
|
| 287 |
+
def get_palette_for_custom_classes(self, class_names, palette=None):
|
| 288 |
+
|
| 289 |
+
if self.label_map is not None:
|
| 290 |
+
# return subset of palette
|
| 291 |
+
palette = []
|
| 292 |
+
for old_id, new_id in sorted(
|
| 293 |
+
self.label_map.items(), key=lambda x: x[1]):
|
| 294 |
+
if new_id != -1:
|
| 295 |
+
palette.append(self.PALETTE[old_id])
|
| 296 |
+
palette = type(self.PALETTE)(palette)
|
| 297 |
+
|
| 298 |
+
elif palette is None:
|
| 299 |
+
if self.PALETTE is None:
|
| 300 |
+
palette = np.random.randint(0, 255, size=(len(class_names), 3))
|
| 301 |
+
else:
|
| 302 |
+
palette = self.PALETTE
|
| 303 |
+
|
| 304 |
+
return palette
|
| 305 |
+
|
| 306 |
+
def evaluate(self,
|
| 307 |
+
results,
|
| 308 |
+
metric='mIoU',
|
| 309 |
+
logger=None,
|
| 310 |
+
efficient_test=False,
|
| 311 |
+
**kwargs):
|
| 312 |
+
"""Evaluate the dataset.
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
results (list): Testing results of the dataset.
|
| 316 |
+
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
|
| 317 |
+
'mDice' and 'mFscore' are supported.
|
| 318 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 319 |
+
related information during evaluation. Default: None.
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
dict[str, float]: Default metrics.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
if isinstance(metric, str):
|
| 326 |
+
metric = [metric]
|
| 327 |
+
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
|
| 328 |
+
if not set(metric).issubset(set(allowed_metrics)):
|
| 329 |
+
raise KeyError('metric {} is not supported'.format(metric))
|
| 330 |
+
eval_results = {}
|
| 331 |
+
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
|
| 332 |
+
if self.CLASSES is None:
|
| 333 |
+
num_classes = len(
|
| 334 |
+
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
|
| 335 |
+
else:
|
| 336 |
+
num_classes = len(self.CLASSES)
|
| 337 |
+
ret_metrics = eval_metrics(
|
| 338 |
+
results,
|
| 339 |
+
gt_seg_maps,
|
| 340 |
+
num_classes,
|
| 341 |
+
self.ignore_index,
|
| 342 |
+
metric,
|
| 343 |
+
label_map=self.label_map,
|
| 344 |
+
reduce_zero_label=self.reduce_zero_label)
|
| 345 |
+
|
| 346 |
+
if self.CLASSES is None:
|
| 347 |
+
class_names = tuple(range(num_classes))
|
| 348 |
+
else:
|
| 349 |
+
class_names = self.CLASSES
|
| 350 |
+
|
| 351 |
+
# summary table
|
| 352 |
+
ret_metrics_summary = OrderedDict({
|
| 353 |
+
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
|
| 354 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 355 |
+
})
|
| 356 |
+
|
| 357 |
+
# each class table
|
| 358 |
+
ret_metrics.pop('aAcc', None)
|
| 359 |
+
ret_metrics_class = OrderedDict({
|
| 360 |
+
ret_metric: np.round(ret_metric_value * 100, 2)
|
| 361 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 362 |
+
})
|
| 363 |
+
ret_metrics_class.update({'Class': class_names})
|
| 364 |
+
ret_metrics_class.move_to_end('Class', last=False)
|
| 365 |
+
|
| 366 |
+
# for logger
|
| 367 |
+
class_table_data = PrettyTable()
|
| 368 |
+
for key, val in ret_metrics_class.items():
|
| 369 |
+
class_table_data.add_column(key, val)
|
| 370 |
+
|
| 371 |
+
summary_table_data = PrettyTable()
|
| 372 |
+
for key, val in ret_metrics_summary.items():
|
| 373 |
+
if key == 'aAcc':
|
| 374 |
+
summary_table_data.add_column(key, [val])
|
| 375 |
+
else:
|
| 376 |
+
summary_table_data.add_column('m' + key, [val])
|
| 377 |
+
|
| 378 |
+
print_log('per class results:', logger)
|
| 379 |
+
print_log('\n' + class_table_data.get_string(), logger=logger)
|
| 380 |
+
print_log('Summary:', logger)
|
| 381 |
+
print_log('\n' + summary_table_data.get_string(), logger=logger)
|
| 382 |
+
|
| 383 |
+
# each metric dict
|
| 384 |
+
for key, value in ret_metrics_summary.items():
|
| 385 |
+
if key == 'aAcc':
|
| 386 |
+
eval_results[key] = value / 100.0
|
| 387 |
+
else:
|
| 388 |
+
eval_results['m' + key] = value / 100.0
|
| 389 |
+
|
| 390 |
+
ret_metrics_class.pop('Class', None)
|
| 391 |
+
for key, value in ret_metrics_class.items():
|
| 392 |
+
eval_results.update({
|
| 393 |
+
key + '.' + str(name): value[idx] / 100.0
|
| 394 |
+
for idx, name in enumerate(class_names)
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
if mmcv.is_list_of(results, str):
|
| 398 |
+
for file_name in results:
|
| 399 |
+
os.remove(file_name)
|
| 400 |
+
return eval_results
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/dataset_wrappers.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class ConcatDataset(_ConcatDataset):
|
| 8 |
+
"""A wrapper of concatenated dataset.
|
| 9 |
+
|
| 10 |
+
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
|
| 11 |
+
concat the group flag for image aspect ratio.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
datasets (list[:obj:`Dataset`]): A list of datasets.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, datasets):
|
| 18 |
+
super(ConcatDataset, self).__init__(datasets)
|
| 19 |
+
self.CLASSES = datasets[0].CLASSES
|
| 20 |
+
self.PALETTE = datasets[0].PALETTE
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@DATASETS.register_module()
|
| 24 |
+
class RepeatDataset(object):
|
| 25 |
+
"""A wrapper of repeated dataset.
|
| 26 |
+
|
| 27 |
+
The length of repeated dataset will be `times` larger than the original
|
| 28 |
+
dataset. This is useful when the data loading time is long but the dataset
|
| 29 |
+
is small. Using RepeatDataset can reduce the data loading time between
|
| 30 |
+
epochs.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
dataset (:obj:`Dataset`): The dataset to be repeated.
|
| 34 |
+
times (int): Repeat times.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, dataset, times):
|
| 38 |
+
self.dataset = dataset
|
| 39 |
+
self.times = times
|
| 40 |
+
self.CLASSES = dataset.CLASSES
|
| 41 |
+
self.PALETTE = dataset.PALETTE
|
| 42 |
+
self._ori_len = len(self.dataset)
|
| 43 |
+
|
| 44 |
+
def __getitem__(self, idx):
|
| 45 |
+
"""Get item from original dataset."""
|
| 46 |
+
return self.dataset[idx % self._ori_len]
|
| 47 |
+
|
| 48 |
+
def __len__(self):
|
| 49 |
+
"""The length is multiplied by ``times``"""
|
| 50 |
+
return self.times * self._ori_len
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/drive.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class DRIVEDataset(CustomDataset):
|
| 9 |
+
"""DRIVE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_manual1.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(DRIVEDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_manual1.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/hrf.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class HRFDataset(CustomDataset):
|
| 9 |
+
"""HRF dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for HRF, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(HRFDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class PascalContextDataset(CustomDataset):
|
| 9 |
+
"""PascalContext dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 12 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 13 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 14 |
+
fixed to '.png'.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
split (str): Split txt file for PascalContext.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
|
| 21 |
+
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
|
| 22 |
+
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
|
| 23 |
+
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
|
| 24 |
+
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
|
| 25 |
+
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
|
| 26 |
+
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
|
| 27 |
+
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
|
| 28 |
+
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
|
| 29 |
+
'window', 'wood')
|
| 30 |
+
|
| 31 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 32 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 33 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 34 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 35 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 36 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 37 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 38 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 39 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 40 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 41 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 42 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 43 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 44 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 45 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 46 |
+
|
| 47 |
+
def __init__(self, split, **kwargs):
|
| 48 |
+
super(PascalContextDataset, self).__init__(
|
| 49 |
+
img_suffix='.jpg',
|
| 50 |
+
seg_map_suffix='.png',
|
| 51 |
+
split=split,
|
| 52 |
+
reduce_zero_label=False,
|
| 53 |
+
**kwargs)
|
| 54 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@DATASETS.register_module()
|
| 58 |
+
class PascalContextDataset59(CustomDataset):
|
| 59 |
+
"""PascalContext dataset.
|
| 60 |
+
|
| 61 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 62 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 63 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 64 |
+
fixed to '.png'.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
split (str): Split txt file for PascalContext.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
|
| 71 |
+
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
|
| 72 |
+
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
|
| 73 |
+
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
|
| 74 |
+
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
|
| 75 |
+
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
|
| 76 |
+
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
|
| 77 |
+
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
|
| 78 |
+
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
|
| 79 |
+
|
| 80 |
+
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
| 81 |
+
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
|
| 82 |
+
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
|
| 83 |
+
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
|
| 84 |
+
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
|
| 85 |
+
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
|
| 86 |
+
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
|
| 87 |
+
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
|
| 88 |
+
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
|
| 89 |
+
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
|
| 90 |
+
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
|
| 91 |
+
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
|
| 92 |
+
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
|
| 93 |
+
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
|
| 94 |
+
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 95 |
+
|
| 96 |
+
def __init__(self, split, **kwargs):
|
| 97 |
+
super(PascalContextDataset59, self).__init__(
|
| 98 |
+
img_suffix='.jpg',
|
| 99 |
+
seg_map_suffix='.png',
|
| 100 |
+
split=split,
|
| 101 |
+
reduce_zero_label=True,
|
| 102 |
+
**kwargs)
|
| 103 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .compose import Compose
|
| 2 |
+
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
|
| 3 |
+
Transpose, to_tensor)
|
| 4 |
+
from .loading import LoadAnnotations, LoadImageFromFile
|
| 5 |
+
from .test_time_aug import MultiScaleFlipAug
|
| 6 |
+
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
|
| 7 |
+
PhotoMetricDistortion, RandomCrop, RandomFlip,
|
| 8 |
+
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
|
| 12 |
+
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
|
| 13 |
+
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
|
| 14 |
+
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
|
| 15 |
+
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
|
| 16 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/compose.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
|
| 3 |
+
from annotator.uniformer.mmcv.utils import build_from_cfg
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@PIPELINES.register_module()
|
| 9 |
+
class Compose(object):
|
| 10 |
+
"""Compose multiple transforms sequentially.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
transforms (Sequence[dict | callable]): Sequence of transform object or
|
| 14 |
+
config dict to be composed.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, transforms):
|
| 18 |
+
assert isinstance(transforms, collections.abc.Sequence)
|
| 19 |
+
self.transforms = []
|
| 20 |
+
for transform in transforms:
|
| 21 |
+
if isinstance(transform, dict):
|
| 22 |
+
transform = build_from_cfg(transform, PIPELINES)
|
| 23 |
+
self.transforms.append(transform)
|
| 24 |
+
elif callable(transform):
|
| 25 |
+
self.transforms.append(transform)
|
| 26 |
+
else:
|
| 27 |
+
raise TypeError('transform must be callable or a dict')
|
| 28 |
+
|
| 29 |
+
def __call__(self, data):
|
| 30 |
+
"""Call function to apply transforms sequentially.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
data (dict): A result dict contains the data to transform.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
dict: Transformed data.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
for t in self.transforms:
|
| 40 |
+
data = t(data)
|
| 41 |
+
if data is None:
|
| 42 |
+
return None
|
| 43 |
+
return data
|
| 44 |
+
|
| 45 |
+
def __repr__(self):
|
| 46 |
+
format_string = self.__class__.__name__ + '('
|
| 47 |
+
for t in self.transforms:
|
| 48 |
+
format_string += '\n'
|
| 49 |
+
format_string += f' {t}'
|
| 50 |
+
format_string += '\n)'
|
| 51 |
+
return format_string
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/formating.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from annotator.uniformer.mmcv.parallel import DataContainer as DC
|
| 7 |
+
|
| 8 |
+
from ..builder import PIPELINES
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def to_tensor(data):
|
| 12 |
+
"""Convert objects of various python types to :obj:`torch.Tensor`.
|
| 13 |
+
|
| 14 |
+
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
|
| 15 |
+
:class:`Sequence`, :class:`int` and :class:`float`.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
|
| 19 |
+
be converted.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
if isinstance(data, torch.Tensor):
|
| 23 |
+
return data
|
| 24 |
+
elif isinstance(data, np.ndarray):
|
| 25 |
+
return torch.from_numpy(data)
|
| 26 |
+
elif isinstance(data, Sequence) and not mmcv.is_str(data):
|
| 27 |
+
return torch.tensor(data)
|
| 28 |
+
elif isinstance(data, int):
|
| 29 |
+
return torch.LongTensor([data])
|
| 30 |
+
elif isinstance(data, float):
|
| 31 |
+
return torch.FloatTensor([data])
|
| 32 |
+
else:
|
| 33 |
+
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@PIPELINES.register_module()
|
| 37 |
+
class ToTensor(object):
|
| 38 |
+
"""Convert some results to :obj:`torch.Tensor` by given keys.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
keys (Sequence[str]): Keys that need to be converted to Tensor.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, keys):
|
| 45 |
+
self.keys = keys
|
| 46 |
+
|
| 47 |
+
def __call__(self, results):
|
| 48 |
+
"""Call function to convert data in results to :obj:`torch.Tensor`.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
results (dict): Result dict contains the data to convert.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
dict: The result dict contains the data converted
|
| 55 |
+
to :obj:`torch.Tensor`.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
for key in self.keys:
|
| 59 |
+
results[key] = to_tensor(results[key])
|
| 60 |
+
return results
|
| 61 |
+
|
| 62 |
+
def __repr__(self):
|
| 63 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@PIPELINES.register_module()
|
| 67 |
+
class ImageToTensor(object):
|
| 68 |
+
"""Convert image to :obj:`torch.Tensor` by given keys.
|
| 69 |
+
|
| 70 |
+
The dimension order of input image is (H, W, C). The pipeline will convert
|
| 71 |
+
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
|
| 72 |
+
(1, H, W).
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
keys (Sequence[str]): Key of images to be converted to Tensor.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, keys):
|
| 79 |
+
self.keys = keys
|
| 80 |
+
|
| 81 |
+
def __call__(self, results):
|
| 82 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 83 |
+
transpose the channel order.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
results (dict): Result dict contains the image data to convert.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
dict: The result dict contains the image converted
|
| 90 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
for key in self.keys:
|
| 94 |
+
img = results[key]
|
| 95 |
+
if len(img.shape) < 3:
|
| 96 |
+
img = np.expand_dims(img, -1)
|
| 97 |
+
results[key] = to_tensor(img.transpose(2, 0, 1))
|
| 98 |
+
return results
|
| 99 |
+
|
| 100 |
+
def __repr__(self):
|
| 101 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@PIPELINES.register_module()
|
| 105 |
+
class Transpose(object):
|
| 106 |
+
"""Transpose some results by given keys.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
keys (Sequence[str]): Keys of results to be transposed.
|
| 110 |
+
order (Sequence[int]): Order of transpose.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, keys, order):
|
| 114 |
+
self.keys = keys
|
| 115 |
+
self.order = order
|
| 116 |
+
|
| 117 |
+
def __call__(self, results):
|
| 118 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 119 |
+
transpose the channel order.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
results (dict): Result dict contains the image data to convert.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
dict: The result dict contains the image converted
|
| 126 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
for key in self.keys:
|
| 130 |
+
results[key] = results[key].transpose(self.order)
|
| 131 |
+
return results
|
| 132 |
+
|
| 133 |
+
def __repr__(self):
|
| 134 |
+
return self.__class__.__name__ + \
|
| 135 |
+
f'(keys={self.keys}, order={self.order})'
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@PIPELINES.register_module()
|
| 139 |
+
class ToDataContainer(object):
|
| 140 |
+
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
fields (Sequence[dict]): Each field is a dict like
|
| 144 |
+
``dict(key='xxx', **kwargs)``. The ``key`` in result will
|
| 145 |
+
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
|
| 146 |
+
Default: ``(dict(key='img', stack=True),
|
| 147 |
+
dict(key='gt_semantic_seg'))``.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self,
|
| 151 |
+
fields=(dict(key='img',
|
| 152 |
+
stack=True), dict(key='gt_semantic_seg'))):
|
| 153 |
+
self.fields = fields
|
| 154 |
+
|
| 155 |
+
def __call__(self, results):
|
| 156 |
+
"""Call function to convert data in results to
|
| 157 |
+
:obj:`mmcv.DataContainer`.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
results (dict): Result dict contains the data to convert.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
dict: The result dict contains the data converted to
|
| 164 |
+
:obj:`mmcv.DataContainer`.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
for field in self.fields:
|
| 168 |
+
field = field.copy()
|
| 169 |
+
key = field.pop('key')
|
| 170 |
+
results[key] = DC(results[key], **field)
|
| 171 |
+
return results
|
| 172 |
+
|
| 173 |
+
def __repr__(self):
|
| 174 |
+
return self.__class__.__name__ + f'(fields={self.fields})'
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@PIPELINES.register_module()
|
| 178 |
+
class DefaultFormatBundle(object):
|
| 179 |
+
"""Default formatting bundle.
|
| 180 |
+
|
| 181 |
+
It simplifies the pipeline of formatting common fields, including "img"
|
| 182 |
+
and "gt_semantic_seg". These fields are formatted as follows.
|
| 183 |
+
|
| 184 |
+
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
|
| 185 |
+
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
|
| 186 |
+
(3)to DataContainer (stack=True)
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
def __call__(self, results):
|
| 190 |
+
"""Call function to transform and format common fields in results.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
results (dict): Result dict contains the data to convert.
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
dict: The result dict contains the data that is formatted with
|
| 197 |
+
default bundle.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
if 'img' in results:
|
| 201 |
+
img = results['img']
|
| 202 |
+
if len(img.shape) < 3:
|
| 203 |
+
img = np.expand_dims(img, -1)
|
| 204 |
+
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
| 205 |
+
results['img'] = DC(to_tensor(img), stack=True)
|
| 206 |
+
if 'gt_semantic_seg' in results:
|
| 207 |
+
# convert to long
|
| 208 |
+
results['gt_semantic_seg'] = DC(
|
| 209 |
+
to_tensor(results['gt_semantic_seg'][None,
|
| 210 |
+
...].astype(np.int64)),
|
| 211 |
+
stack=True)
|
| 212 |
+
return results
|
| 213 |
+
|
| 214 |
+
def __repr__(self):
|
| 215 |
+
return self.__class__.__name__
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@PIPELINES.register_module()
|
| 219 |
+
class Collect(object):
|
| 220 |
+
"""Collect data from the loader relevant to the specific task.
|
| 221 |
+
|
| 222 |
+
This is usually the last stage of the data loader pipeline. Typically keys
|
| 223 |
+
is set to some subset of "img", "gt_semantic_seg".
|
| 224 |
+
|
| 225 |
+
The "img_meta" item is always populated. The contents of the "img_meta"
|
| 226 |
+
dictionary depends on "meta_keys". By default this includes:
|
| 227 |
+
|
| 228 |
+
- "img_shape": shape of the image input to the network as a tuple
|
| 229 |
+
(h, w, c). Note that images may be zero padded on the bottom/right
|
| 230 |
+
if the batch tensor is larger than this shape.
|
| 231 |
+
|
| 232 |
+
- "scale_factor": a float indicating the preprocessing scale
|
| 233 |
+
|
| 234 |
+
- "flip": a boolean indicating if image flip transform was used
|
| 235 |
+
|
| 236 |
+
- "filename": path to the image file
|
| 237 |
+
|
| 238 |
+
- "ori_shape": original shape of the image as a tuple (h, w, c)
|
| 239 |
+
|
| 240 |
+
- "pad_shape": image shape after padding
|
| 241 |
+
|
| 242 |
+
- "img_norm_cfg": a dict of normalization information:
|
| 243 |
+
- mean - per channel mean subtraction
|
| 244 |
+
- std - per channel std divisor
|
| 245 |
+
- to_rgb - bool indicating if bgr was converted to rgb
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
keys (Sequence[str]): Keys of results to be collected in ``data``.
|
| 249 |
+
meta_keys (Sequence[str], optional): Meta keys to be converted to
|
| 250 |
+
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
|
| 251 |
+
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
|
| 252 |
+
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
|
| 253 |
+
'img_norm_cfg')``
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
def __init__(self,
|
| 257 |
+
keys,
|
| 258 |
+
meta_keys=('filename', 'ori_filename', 'ori_shape',
|
| 259 |
+
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
| 260 |
+
'flip_direction', 'img_norm_cfg')):
|
| 261 |
+
self.keys = keys
|
| 262 |
+
self.meta_keys = meta_keys
|
| 263 |
+
|
| 264 |
+
def __call__(self, results):
|
| 265 |
+
"""Call function to collect keys in results. The keys in ``meta_keys``
|
| 266 |
+
will be converted to :obj:mmcv.DataContainer.
|
| 267 |
+
|
| 268 |
+
Args:
|
| 269 |
+
results (dict): Result dict contains the data to collect.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
dict: The result dict contains the following keys
|
| 273 |
+
- keys in``self.keys``
|
| 274 |
+
- ``img_metas``
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
data = {}
|
| 278 |
+
img_meta = {}
|
| 279 |
+
for key in self.meta_keys:
|
| 280 |
+
img_meta[key] = results[key]
|
| 281 |
+
data['img_metas'] = DC(img_meta, cpu_only=True)
|
| 282 |
+
for key in self.keys:
|
| 283 |
+
data[key] = results[key]
|
| 284 |
+
return data
|
| 285 |
+
|
| 286 |
+
def __repr__(self):
|
| 287 |
+
return self.__class__.__name__ + \
|
| 288 |
+
f'(keys={self.keys}, meta_keys={self.meta_keys})'
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/loading.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class LoadImageFromFile(object):
|
| 11 |
+
"""Load an image from file.
|
| 12 |
+
|
| 13 |
+
Required keys are "img_prefix" and "img_info" (a dict that must contain the
|
| 14 |
+
key "filename"). Added or updated keys are "filename", "img", "img_shape",
|
| 15 |
+
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
|
| 16 |
+
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
to_float32 (bool): Whether to convert the loaded image to a float32
|
| 20 |
+
numpy array. If set to False, the loaded image is an uint8 array.
|
| 21 |
+
Defaults to False.
|
| 22 |
+
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
|
| 23 |
+
Defaults to 'color'.
|
| 24 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 25 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 26 |
+
Defaults to ``dict(backend='disk')``.
|
| 27 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 28 |
+
'cv2'
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self,
|
| 32 |
+
to_float32=False,
|
| 33 |
+
color_type='color',
|
| 34 |
+
file_client_args=dict(backend='disk'),
|
| 35 |
+
imdecode_backend='cv2'):
|
| 36 |
+
self.to_float32 = to_float32
|
| 37 |
+
self.color_type = color_type
|
| 38 |
+
self.file_client_args = file_client_args.copy()
|
| 39 |
+
self.file_client = None
|
| 40 |
+
self.imdecode_backend = imdecode_backend
|
| 41 |
+
|
| 42 |
+
def __call__(self, results):
|
| 43 |
+
"""Call functions to load image and get image meta information.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
dict: The dict contains loaded image and meta information.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
if self.file_client is None:
|
| 53 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 54 |
+
|
| 55 |
+
if results.get('img_prefix') is not None:
|
| 56 |
+
filename = osp.join(results['img_prefix'],
|
| 57 |
+
results['img_info']['filename'])
|
| 58 |
+
else:
|
| 59 |
+
filename = results['img_info']['filename']
|
| 60 |
+
img_bytes = self.file_client.get(filename)
|
| 61 |
+
img = mmcv.imfrombytes(
|
| 62 |
+
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
|
| 63 |
+
if self.to_float32:
|
| 64 |
+
img = img.astype(np.float32)
|
| 65 |
+
|
| 66 |
+
results['filename'] = filename
|
| 67 |
+
results['ori_filename'] = results['img_info']['filename']
|
| 68 |
+
results['img'] = img
|
| 69 |
+
results['img_shape'] = img.shape
|
| 70 |
+
results['ori_shape'] = img.shape
|
| 71 |
+
# Set initial values for default meta_keys
|
| 72 |
+
results['pad_shape'] = img.shape
|
| 73 |
+
results['scale_factor'] = 1.0
|
| 74 |
+
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
|
| 75 |
+
results['img_norm_cfg'] = dict(
|
| 76 |
+
mean=np.zeros(num_channels, dtype=np.float32),
|
| 77 |
+
std=np.ones(num_channels, dtype=np.float32),
|
| 78 |
+
to_rgb=False)
|
| 79 |
+
return results
|
| 80 |
+
|
| 81 |
+
def __repr__(self):
|
| 82 |
+
repr_str = self.__class__.__name__
|
| 83 |
+
repr_str += f'(to_float32={self.to_float32},'
|
| 84 |
+
repr_str += f"color_type='{self.color_type}',"
|
| 85 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 86 |
+
return repr_str
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@PIPELINES.register_module()
|
| 90 |
+
class LoadAnnotations(object):
|
| 91 |
+
"""Load annotations for semantic segmentation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
reduce_zero_label (bool): Whether reduce all label value by 1.
|
| 95 |
+
Usually used for datasets where 0 is background label.
|
| 96 |
+
Default: False.
|
| 97 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 98 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 99 |
+
Defaults to ``dict(backend='disk')``.
|
| 100 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 101 |
+
'pillow'
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self,
|
| 105 |
+
reduce_zero_label=False,
|
| 106 |
+
file_client_args=dict(backend='disk'),
|
| 107 |
+
imdecode_backend='pillow'):
|
| 108 |
+
self.reduce_zero_label = reduce_zero_label
|
| 109 |
+
self.file_client_args = file_client_args.copy()
|
| 110 |
+
self.file_client = None
|
| 111 |
+
self.imdecode_backend = imdecode_backend
|
| 112 |
+
|
| 113 |
+
def __call__(self, results):
|
| 114 |
+
"""Call function to load multiple types annotations.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
dict: The dict contains loaded semantic segmentation annotations.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
if self.file_client is None:
|
| 124 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 125 |
+
|
| 126 |
+
if results.get('seg_prefix', None) is not None:
|
| 127 |
+
filename = osp.join(results['seg_prefix'],
|
| 128 |
+
results['ann_info']['seg_map'])
|
| 129 |
+
else:
|
| 130 |
+
filename = results['ann_info']['seg_map']
|
| 131 |
+
img_bytes = self.file_client.get(filename)
|
| 132 |
+
gt_semantic_seg = mmcv.imfrombytes(
|
| 133 |
+
img_bytes, flag='unchanged',
|
| 134 |
+
backend=self.imdecode_backend).squeeze().astype(np.uint8)
|
| 135 |
+
# modify if custom classes
|
| 136 |
+
if results.get('label_map', None) is not None:
|
| 137 |
+
for old_id, new_id in results['label_map'].items():
|
| 138 |
+
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
|
| 139 |
+
# reduce zero_label
|
| 140 |
+
if self.reduce_zero_label:
|
| 141 |
+
# avoid using underflow conversion
|
| 142 |
+
gt_semantic_seg[gt_semantic_seg == 0] = 255
|
| 143 |
+
gt_semantic_seg = gt_semantic_seg - 1
|
| 144 |
+
gt_semantic_seg[gt_semantic_seg == 254] = 255
|
| 145 |
+
results['gt_semantic_seg'] = gt_semantic_seg
|
| 146 |
+
results['seg_fields'].append('gt_semantic_seg')
|
| 147 |
+
return results
|
| 148 |
+
|
| 149 |
+
def __repr__(self):
|
| 150 |
+
repr_str = self.__class__.__name__
|
| 151 |
+
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
|
| 152 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 153 |
+
return repr_str
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/test_time_aug.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
from .compose import Compose
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class MultiScaleFlipAug(object):
|
| 11 |
+
"""Test-time augmentation with multiple scales and flipping.
|
| 12 |
+
|
| 13 |
+
An example configuration is as followed:
|
| 14 |
+
|
| 15 |
+
.. code-block::
|
| 16 |
+
|
| 17 |
+
img_scale=(2048, 1024),
|
| 18 |
+
img_ratios=[0.5, 1.0],
|
| 19 |
+
flip=True,
|
| 20 |
+
transforms=[
|
| 21 |
+
dict(type='Resize', keep_ratio=True),
|
| 22 |
+
dict(type='RandomFlip'),
|
| 23 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 24 |
+
dict(type='Pad', size_divisor=32),
|
| 25 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 26 |
+
dict(type='Collect', keys=['img']),
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
After MultiScaleFLipAug with above configuration, the results are wrapped
|
| 30 |
+
into lists of the same length as followed:
|
| 31 |
+
|
| 32 |
+
.. code-block::
|
| 33 |
+
|
| 34 |
+
dict(
|
| 35 |
+
img=[...],
|
| 36 |
+
img_shape=[...],
|
| 37 |
+
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
|
| 38 |
+
flip=[False, True, False, True]
|
| 39 |
+
...
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
transforms (list[dict]): Transforms to apply in each augmentation.
|
| 44 |
+
img_scale (None | tuple | list[tuple]): Images scales for resizing.
|
| 45 |
+
img_ratios (float | list[float]): Image ratios for resizing
|
| 46 |
+
flip (bool): Whether apply flip augmentation. Default: False.
|
| 47 |
+
flip_direction (str | list[str]): Flip augmentation directions,
|
| 48 |
+
options are "horizontal" and "vertical". If flip_direction is list,
|
| 49 |
+
multiple flip augmentations will be applied.
|
| 50 |
+
It has no effect when flip == False. Default: "horizontal".
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self,
|
| 54 |
+
transforms,
|
| 55 |
+
img_scale,
|
| 56 |
+
img_ratios=None,
|
| 57 |
+
flip=False,
|
| 58 |
+
flip_direction='horizontal'):
|
| 59 |
+
self.transforms = Compose(transforms)
|
| 60 |
+
if img_ratios is not None:
|
| 61 |
+
img_ratios = img_ratios if isinstance(img_ratios,
|
| 62 |
+
list) else [img_ratios]
|
| 63 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 64 |
+
if img_scale is None:
|
| 65 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 66 |
+
self.img_scale = None
|
| 67 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 68 |
+
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
|
| 69 |
+
img_ratios, float):
|
| 70 |
+
assert len(img_scale) == 2
|
| 71 |
+
# mode 2: given a scale and a range of image ratio
|
| 72 |
+
self.img_scale = [(int(img_scale[0] * ratio),
|
| 73 |
+
int(img_scale[1] * ratio))
|
| 74 |
+
for ratio in img_ratios]
|
| 75 |
+
else:
|
| 76 |
+
# mode 3: given multiple scales
|
| 77 |
+
self.img_scale = img_scale if isinstance(img_scale,
|
| 78 |
+
list) else [img_scale]
|
| 79 |
+
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None
|
| 80 |
+
self.flip = flip
|
| 81 |
+
self.img_ratios = img_ratios
|
| 82 |
+
self.flip_direction = flip_direction if isinstance(
|
| 83 |
+
flip_direction, list) else [flip_direction]
|
| 84 |
+
assert mmcv.is_list_of(self.flip_direction, str)
|
| 85 |
+
if not self.flip and self.flip_direction != ['horizontal']:
|
| 86 |
+
warnings.warn(
|
| 87 |
+
'flip_direction has no effect when flip is set to False')
|
| 88 |
+
if (self.flip
|
| 89 |
+
and not any([t['type'] == 'RandomFlip' for t in transforms])):
|
| 90 |
+
warnings.warn(
|
| 91 |
+
'flip has no effect when RandomFlip is not in transforms')
|
| 92 |
+
|
| 93 |
+
def __call__(self, results):
|
| 94 |
+
"""Call function to apply test time augment transforms on results.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
results (dict): Result dict contains the data to transform.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
dict[str: list]: The augmented data, where each value is wrapped
|
| 101 |
+
into a list.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
aug_data = []
|
| 105 |
+
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
|
| 106 |
+
h, w = results['img'].shape[:2]
|
| 107 |
+
img_scale = [(int(w * ratio), int(h * ratio))
|
| 108 |
+
for ratio in self.img_ratios]
|
| 109 |
+
else:
|
| 110 |
+
img_scale = self.img_scale
|
| 111 |
+
flip_aug = [False, True] if self.flip else [False]
|
| 112 |
+
for scale in img_scale:
|
| 113 |
+
for flip in flip_aug:
|
| 114 |
+
for direction in self.flip_direction:
|
| 115 |
+
_results = results.copy()
|
| 116 |
+
_results['scale'] = scale
|
| 117 |
+
_results['flip'] = flip
|
| 118 |
+
_results['flip_direction'] = direction
|
| 119 |
+
data = self.transforms(_results)
|
| 120 |
+
aug_data.append(data)
|
| 121 |
+
# list of dict to dict of list
|
| 122 |
+
aug_data_dict = {key: [] for key in aug_data[0]}
|
| 123 |
+
for data in aug_data:
|
| 124 |
+
for key, val in data.items():
|
| 125 |
+
aug_data_dict[key].append(val)
|
| 126 |
+
return aug_data_dict
|
| 127 |
+
|
| 128 |
+
def __repr__(self):
|
| 129 |
+
repr_str = self.__class__.__name__
|
| 130 |
+
repr_str += f'(transforms={self.transforms}, '
|
| 131 |
+
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
|
| 132 |
+
repr_str += f'flip_direction={self.flip_direction}'
|
| 133 |
+
return repr_str
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/pipelines/transforms.py
ADDED
|
@@ -0,0 +1,889 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import annotator.uniformer.mmcv as mmcv
|
| 2 |
+
import numpy as np
|
| 3 |
+
from annotator.uniformer.mmcv.utils import deprecated_api_warning, is_tuple_of
|
| 4 |
+
from numpy import random
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class Resize(object):
|
| 11 |
+
"""Resize images & seg.
|
| 12 |
+
|
| 13 |
+
This transform resizes the input image to some scale. If the input dict
|
| 14 |
+
contains the key "scale", then the scale in the input dict is used,
|
| 15 |
+
otherwise the specified scale in the init method is used.
|
| 16 |
+
|
| 17 |
+
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
|
| 18 |
+
(multi-scale). There are 4 multiscale modes:
|
| 19 |
+
|
| 20 |
+
- ``ratio_range is not None``:
|
| 21 |
+
1. When img_scale is None, img_scale is the shape of image in results
|
| 22 |
+
(img_scale = results['img'].shape[:2]) and the image is resized based
|
| 23 |
+
on the original size. (mode 1)
|
| 24 |
+
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
|
| 25 |
+
the ratio range and multiply it with the image scale. (mode 2)
|
| 26 |
+
|
| 27 |
+
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
|
| 28 |
+
scale from the a range. (mode 3)
|
| 29 |
+
|
| 30 |
+
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
|
| 31 |
+
scale from multiple scales. (mode 4)
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
img_scale (tuple or list[tuple]): Images scales for resizing.
|
| 35 |
+
multiscale_mode (str): Either "range" or "value".
|
| 36 |
+
ratio_range (tuple[float]): (min_ratio, max_ratio)
|
| 37 |
+
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
|
| 38 |
+
image.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self,
|
| 42 |
+
img_scale=None,
|
| 43 |
+
multiscale_mode='range',
|
| 44 |
+
ratio_range=None,
|
| 45 |
+
keep_ratio=True):
|
| 46 |
+
if img_scale is None:
|
| 47 |
+
self.img_scale = None
|
| 48 |
+
else:
|
| 49 |
+
if isinstance(img_scale, list):
|
| 50 |
+
self.img_scale = img_scale
|
| 51 |
+
else:
|
| 52 |
+
self.img_scale = [img_scale]
|
| 53 |
+
assert mmcv.is_list_of(self.img_scale, tuple)
|
| 54 |
+
|
| 55 |
+
if ratio_range is not None:
|
| 56 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 57 |
+
# mode 2: given a scale and a range of image ratio
|
| 58 |
+
assert self.img_scale is None or len(self.img_scale) == 1
|
| 59 |
+
else:
|
| 60 |
+
# mode 3 and 4: given multiple scales or a range of scales
|
| 61 |
+
assert multiscale_mode in ['value', 'range']
|
| 62 |
+
|
| 63 |
+
self.multiscale_mode = multiscale_mode
|
| 64 |
+
self.ratio_range = ratio_range
|
| 65 |
+
self.keep_ratio = keep_ratio
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def random_select(img_scales):
|
| 69 |
+
"""Randomly select an img_scale from given candidates.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
img_scales (list[tuple]): Images scales for selection.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
|
| 76 |
+
where ``img_scale`` is the selected image scale and
|
| 77 |
+
``scale_idx`` is the selected index in the given candidates.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
assert mmcv.is_list_of(img_scales, tuple)
|
| 81 |
+
scale_idx = np.random.randint(len(img_scales))
|
| 82 |
+
img_scale = img_scales[scale_idx]
|
| 83 |
+
return img_scale, scale_idx
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
def random_sample(img_scales):
|
| 87 |
+
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
img_scales (list[tuple]): Images scale range for sampling.
|
| 91 |
+
There must be two tuples in img_scales, which specify the lower
|
| 92 |
+
and upper bound of image scales.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
(tuple, None): Returns a tuple ``(img_scale, None)``, where
|
| 96 |
+
``img_scale`` is sampled scale and None is just a placeholder
|
| 97 |
+
to be consistent with :func:`random_select`.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
|
| 101 |
+
img_scale_long = [max(s) for s in img_scales]
|
| 102 |
+
img_scale_short = [min(s) for s in img_scales]
|
| 103 |
+
long_edge = np.random.randint(
|
| 104 |
+
min(img_scale_long),
|
| 105 |
+
max(img_scale_long) + 1)
|
| 106 |
+
short_edge = np.random.randint(
|
| 107 |
+
min(img_scale_short),
|
| 108 |
+
max(img_scale_short) + 1)
|
| 109 |
+
img_scale = (long_edge, short_edge)
|
| 110 |
+
return img_scale, None
|
| 111 |
+
|
| 112 |
+
@staticmethod
|
| 113 |
+
def random_sample_ratio(img_scale, ratio_range):
|
| 114 |
+
"""Randomly sample an img_scale when ``ratio_range`` is specified.
|
| 115 |
+
|
| 116 |
+
A ratio will be randomly sampled from the range specified by
|
| 117 |
+
``ratio_range``. Then it would be multiplied with ``img_scale`` to
|
| 118 |
+
generate sampled scale.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img_scale (tuple): Images scale base to multiply with ratio.
|
| 122 |
+
ratio_range (tuple[float]): The minimum and maximum ratio to scale
|
| 123 |
+
the ``img_scale``.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
(tuple, None): Returns a tuple ``(scale, None)``, where
|
| 127 |
+
``scale`` is sampled ratio multiplied with ``img_scale`` and
|
| 128 |
+
None is just a placeholder to be consistent with
|
| 129 |
+
:func:`random_select`.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
assert isinstance(img_scale, tuple) and len(img_scale) == 2
|
| 133 |
+
min_ratio, max_ratio = ratio_range
|
| 134 |
+
assert min_ratio <= max_ratio
|
| 135 |
+
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
|
| 136 |
+
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
|
| 137 |
+
return scale, None
|
| 138 |
+
|
| 139 |
+
def _random_scale(self, results):
|
| 140 |
+
"""Randomly sample an img_scale according to ``ratio_range`` and
|
| 141 |
+
``multiscale_mode``.
|
| 142 |
+
|
| 143 |
+
If ``ratio_range`` is specified, a ratio will be sampled and be
|
| 144 |
+
multiplied with ``img_scale``.
|
| 145 |
+
If multiple scales are specified by ``img_scale``, a scale will be
|
| 146 |
+
sampled according to ``multiscale_mode``.
|
| 147 |
+
Otherwise, single scale will be used.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
results (dict): Result dict from :obj:`dataset`.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
dict: Two new keys 'scale` and 'scale_idx` are added into
|
| 154 |
+
``results``, which would be used by subsequent pipelines.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
if self.ratio_range is not None:
|
| 158 |
+
if self.img_scale is None:
|
| 159 |
+
h, w = results['img'].shape[:2]
|
| 160 |
+
scale, scale_idx = self.random_sample_ratio((w, h),
|
| 161 |
+
self.ratio_range)
|
| 162 |
+
else:
|
| 163 |
+
scale, scale_idx = self.random_sample_ratio(
|
| 164 |
+
self.img_scale[0], self.ratio_range)
|
| 165 |
+
elif len(self.img_scale) == 1:
|
| 166 |
+
scale, scale_idx = self.img_scale[0], 0
|
| 167 |
+
elif self.multiscale_mode == 'range':
|
| 168 |
+
scale, scale_idx = self.random_sample(self.img_scale)
|
| 169 |
+
elif self.multiscale_mode == 'value':
|
| 170 |
+
scale, scale_idx = self.random_select(self.img_scale)
|
| 171 |
+
else:
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
results['scale'] = scale
|
| 175 |
+
results['scale_idx'] = scale_idx
|
| 176 |
+
|
| 177 |
+
def _resize_img(self, results):
|
| 178 |
+
"""Resize images with ``results['scale']``."""
|
| 179 |
+
if self.keep_ratio:
|
| 180 |
+
img, scale_factor = mmcv.imrescale(
|
| 181 |
+
results['img'], results['scale'], return_scale=True)
|
| 182 |
+
# the w_scale and h_scale has minor difference
|
| 183 |
+
# a real fix should be done in the mmcv.imrescale in the future
|
| 184 |
+
new_h, new_w = img.shape[:2]
|
| 185 |
+
h, w = results['img'].shape[:2]
|
| 186 |
+
w_scale = new_w / w
|
| 187 |
+
h_scale = new_h / h
|
| 188 |
+
else:
|
| 189 |
+
img, w_scale, h_scale = mmcv.imresize(
|
| 190 |
+
results['img'], results['scale'], return_scale=True)
|
| 191 |
+
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
|
| 192 |
+
dtype=np.float32)
|
| 193 |
+
results['img'] = img
|
| 194 |
+
results['img_shape'] = img.shape
|
| 195 |
+
results['pad_shape'] = img.shape # in case that there is no padding
|
| 196 |
+
results['scale_factor'] = scale_factor
|
| 197 |
+
results['keep_ratio'] = self.keep_ratio
|
| 198 |
+
|
| 199 |
+
def _resize_seg(self, results):
|
| 200 |
+
"""Resize semantic segmentation map with ``results['scale']``."""
|
| 201 |
+
for key in results.get('seg_fields', []):
|
| 202 |
+
if self.keep_ratio:
|
| 203 |
+
gt_seg = mmcv.imrescale(
|
| 204 |
+
results[key], results['scale'], interpolation='nearest')
|
| 205 |
+
else:
|
| 206 |
+
gt_seg = mmcv.imresize(
|
| 207 |
+
results[key], results['scale'], interpolation='nearest')
|
| 208 |
+
results[key] = gt_seg
|
| 209 |
+
|
| 210 |
+
def __call__(self, results):
|
| 211 |
+
"""Call function to resize images, bounding boxes, masks, semantic
|
| 212 |
+
segmentation map.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
results (dict): Result dict from loading pipeline.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
|
| 219 |
+
'keep_ratio' keys are added into result dict.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
if 'scale' not in results:
|
| 223 |
+
self._random_scale(results)
|
| 224 |
+
self._resize_img(results)
|
| 225 |
+
self._resize_seg(results)
|
| 226 |
+
return results
|
| 227 |
+
|
| 228 |
+
def __repr__(self):
|
| 229 |
+
repr_str = self.__class__.__name__
|
| 230 |
+
repr_str += (f'(img_scale={self.img_scale}, '
|
| 231 |
+
f'multiscale_mode={self.multiscale_mode}, '
|
| 232 |
+
f'ratio_range={self.ratio_range}, '
|
| 233 |
+
f'keep_ratio={self.keep_ratio})')
|
| 234 |
+
return repr_str
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@PIPELINES.register_module()
|
| 238 |
+
class RandomFlip(object):
|
| 239 |
+
"""Flip the image & seg.
|
| 240 |
+
|
| 241 |
+
If the input dict contains the key "flip", then the flag will be used,
|
| 242 |
+
otherwise it will be randomly decided by a ratio specified in the init
|
| 243 |
+
method.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
prob (float, optional): The flipping probability. Default: None.
|
| 247 |
+
direction(str, optional): The flipping direction. Options are
|
| 248 |
+
'horizontal' and 'vertical'. Default: 'horizontal'.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
|
| 252 |
+
def __init__(self, prob=None, direction='horizontal'):
|
| 253 |
+
self.prob = prob
|
| 254 |
+
self.direction = direction
|
| 255 |
+
if prob is not None:
|
| 256 |
+
assert prob >= 0 and prob <= 1
|
| 257 |
+
assert direction in ['horizontal', 'vertical']
|
| 258 |
+
|
| 259 |
+
def __call__(self, results):
|
| 260 |
+
"""Call function to flip bounding boxes, masks, semantic segmentation
|
| 261 |
+
maps.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
results (dict): Result dict from loading pipeline.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
dict: Flipped results, 'flip', 'flip_direction' keys are added into
|
| 268 |
+
result dict.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
if 'flip' not in results:
|
| 272 |
+
flip = True if np.random.rand() < self.prob else False
|
| 273 |
+
results['flip'] = flip
|
| 274 |
+
if 'flip_direction' not in results:
|
| 275 |
+
results['flip_direction'] = self.direction
|
| 276 |
+
if results['flip']:
|
| 277 |
+
# flip image
|
| 278 |
+
results['img'] = mmcv.imflip(
|
| 279 |
+
results['img'], direction=results['flip_direction'])
|
| 280 |
+
|
| 281 |
+
# flip segs
|
| 282 |
+
for key in results.get('seg_fields', []):
|
| 283 |
+
# use copy() to make numpy stride positive
|
| 284 |
+
results[key] = mmcv.imflip(
|
| 285 |
+
results[key], direction=results['flip_direction']).copy()
|
| 286 |
+
return results
|
| 287 |
+
|
| 288 |
+
def __repr__(self):
|
| 289 |
+
return self.__class__.__name__ + f'(prob={self.prob})'
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@PIPELINES.register_module()
|
| 293 |
+
class Pad(object):
|
| 294 |
+
"""Pad the image & mask.
|
| 295 |
+
|
| 296 |
+
There are two padding modes: (1) pad to a fixed size and (2) pad to the
|
| 297 |
+
minimum size that is divisible by some number.
|
| 298 |
+
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
size (tuple, optional): Fixed padding size.
|
| 302 |
+
size_divisor (int, optional): The divisor of padded size.
|
| 303 |
+
pad_val (float, optional): Padding value. Default: 0.
|
| 304 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 305 |
+
Default: 255.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self,
|
| 309 |
+
size=None,
|
| 310 |
+
size_divisor=None,
|
| 311 |
+
pad_val=0,
|
| 312 |
+
seg_pad_val=255):
|
| 313 |
+
self.size = size
|
| 314 |
+
self.size_divisor = size_divisor
|
| 315 |
+
self.pad_val = pad_val
|
| 316 |
+
self.seg_pad_val = seg_pad_val
|
| 317 |
+
# only one of size and size_divisor should be valid
|
| 318 |
+
assert size is not None or size_divisor is not None
|
| 319 |
+
assert size is None or size_divisor is None
|
| 320 |
+
|
| 321 |
+
def _pad_img(self, results):
|
| 322 |
+
"""Pad images according to ``self.size``."""
|
| 323 |
+
if self.size is not None:
|
| 324 |
+
padded_img = mmcv.impad(
|
| 325 |
+
results['img'], shape=self.size, pad_val=self.pad_val)
|
| 326 |
+
elif self.size_divisor is not None:
|
| 327 |
+
padded_img = mmcv.impad_to_multiple(
|
| 328 |
+
results['img'], self.size_divisor, pad_val=self.pad_val)
|
| 329 |
+
results['img'] = padded_img
|
| 330 |
+
results['pad_shape'] = padded_img.shape
|
| 331 |
+
results['pad_fixed_size'] = self.size
|
| 332 |
+
results['pad_size_divisor'] = self.size_divisor
|
| 333 |
+
|
| 334 |
+
def _pad_seg(self, results):
|
| 335 |
+
"""Pad masks according to ``results['pad_shape']``."""
|
| 336 |
+
for key in results.get('seg_fields', []):
|
| 337 |
+
results[key] = mmcv.impad(
|
| 338 |
+
results[key],
|
| 339 |
+
shape=results['pad_shape'][:2],
|
| 340 |
+
pad_val=self.seg_pad_val)
|
| 341 |
+
|
| 342 |
+
def __call__(self, results):
|
| 343 |
+
"""Call function to pad images, masks, semantic segmentation maps.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
results (dict): Result dict from loading pipeline.
|
| 347 |
+
|
| 348 |
+
Returns:
|
| 349 |
+
dict: Updated result dict.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
self._pad_img(results)
|
| 353 |
+
self._pad_seg(results)
|
| 354 |
+
return results
|
| 355 |
+
|
| 356 |
+
def __repr__(self):
|
| 357 |
+
repr_str = self.__class__.__name__
|
| 358 |
+
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
|
| 359 |
+
f'pad_val={self.pad_val})'
|
| 360 |
+
return repr_str
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
@PIPELINES.register_module()
|
| 364 |
+
class Normalize(object):
|
| 365 |
+
"""Normalize the image.
|
| 366 |
+
|
| 367 |
+
Added key is "img_norm_cfg".
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
mean (sequence): Mean values of 3 channels.
|
| 371 |
+
std (sequence): Std values of 3 channels.
|
| 372 |
+
to_rgb (bool): Whether to convert the image from BGR to RGB,
|
| 373 |
+
default is true.
|
| 374 |
+
"""
|
| 375 |
+
|
| 376 |
+
def __init__(self, mean, std, to_rgb=True):
|
| 377 |
+
self.mean = np.array(mean, dtype=np.float32)
|
| 378 |
+
self.std = np.array(std, dtype=np.float32)
|
| 379 |
+
self.to_rgb = to_rgb
|
| 380 |
+
|
| 381 |
+
def __call__(self, results):
|
| 382 |
+
"""Call function to normalize images.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
results (dict): Result dict from loading pipeline.
|
| 386 |
+
|
| 387 |
+
Returns:
|
| 388 |
+
dict: Normalized results, 'img_norm_cfg' key is added into
|
| 389 |
+
result dict.
|
| 390 |
+
"""
|
| 391 |
+
|
| 392 |
+
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
|
| 393 |
+
self.to_rgb)
|
| 394 |
+
results['img_norm_cfg'] = dict(
|
| 395 |
+
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
|
| 396 |
+
return results
|
| 397 |
+
|
| 398 |
+
def __repr__(self):
|
| 399 |
+
repr_str = self.__class__.__name__
|
| 400 |
+
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
|
| 401 |
+
f'{self.to_rgb})'
|
| 402 |
+
return repr_str
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
@PIPELINES.register_module()
|
| 406 |
+
class Rerange(object):
|
| 407 |
+
"""Rerange the image pixel value.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
min_value (float or int): Minimum value of the reranged image.
|
| 411 |
+
Default: 0.
|
| 412 |
+
max_value (float or int): Maximum value of the reranged image.
|
| 413 |
+
Default: 255.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, min_value=0, max_value=255):
|
| 417 |
+
assert isinstance(min_value, float) or isinstance(min_value, int)
|
| 418 |
+
assert isinstance(max_value, float) or isinstance(max_value, int)
|
| 419 |
+
assert min_value < max_value
|
| 420 |
+
self.min_value = min_value
|
| 421 |
+
self.max_value = max_value
|
| 422 |
+
|
| 423 |
+
def __call__(self, results):
|
| 424 |
+
"""Call function to rerange images.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
results (dict): Result dict from loading pipeline.
|
| 428 |
+
Returns:
|
| 429 |
+
dict: Reranged results.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
img = results['img']
|
| 433 |
+
img_min_value = np.min(img)
|
| 434 |
+
img_max_value = np.max(img)
|
| 435 |
+
|
| 436 |
+
assert img_min_value < img_max_value
|
| 437 |
+
# rerange to [0, 1]
|
| 438 |
+
img = (img - img_min_value) / (img_max_value - img_min_value)
|
| 439 |
+
# rerange to [min_value, max_value]
|
| 440 |
+
img = img * (self.max_value - self.min_value) + self.min_value
|
| 441 |
+
results['img'] = img
|
| 442 |
+
|
| 443 |
+
return results
|
| 444 |
+
|
| 445 |
+
def __repr__(self):
|
| 446 |
+
repr_str = self.__class__.__name__
|
| 447 |
+
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
|
| 448 |
+
return repr_str
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@PIPELINES.register_module()
|
| 452 |
+
class CLAHE(object):
|
| 453 |
+
"""Use CLAHE method to process the image.
|
| 454 |
+
|
| 455 |
+
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
|
| 456 |
+
Graphics Gems, 1994:474-485.` for more information.
|
| 457 |
+
|
| 458 |
+
Args:
|
| 459 |
+
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
|
| 460 |
+
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
|
| 461 |
+
Input image will be divided into equally sized rectangular tiles.
|
| 462 |
+
It defines the number of tiles in row and column. Default: (8, 8).
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
|
| 466 |
+
assert isinstance(clip_limit, (float, int))
|
| 467 |
+
self.clip_limit = clip_limit
|
| 468 |
+
assert is_tuple_of(tile_grid_size, int)
|
| 469 |
+
assert len(tile_grid_size) == 2
|
| 470 |
+
self.tile_grid_size = tile_grid_size
|
| 471 |
+
|
| 472 |
+
def __call__(self, results):
|
| 473 |
+
"""Call function to Use CLAHE method process images.
|
| 474 |
+
|
| 475 |
+
Args:
|
| 476 |
+
results (dict): Result dict from loading pipeline.
|
| 477 |
+
|
| 478 |
+
Returns:
|
| 479 |
+
dict: Processed results.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
for i in range(results['img'].shape[2]):
|
| 483 |
+
results['img'][:, :, i] = mmcv.clahe(
|
| 484 |
+
np.array(results['img'][:, :, i], dtype=np.uint8),
|
| 485 |
+
self.clip_limit, self.tile_grid_size)
|
| 486 |
+
|
| 487 |
+
return results
|
| 488 |
+
|
| 489 |
+
def __repr__(self):
|
| 490 |
+
repr_str = self.__class__.__name__
|
| 491 |
+
repr_str += f'(clip_limit={self.clip_limit}, '\
|
| 492 |
+
f'tile_grid_size={self.tile_grid_size})'
|
| 493 |
+
return repr_str
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
@PIPELINES.register_module()
|
| 497 |
+
class RandomCrop(object):
|
| 498 |
+
"""Random crop the image & seg.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
crop_size (tuple): Expected size after cropping, (h, w).
|
| 502 |
+
cat_max_ratio (float): The maximum ratio that single category could
|
| 503 |
+
occupy.
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
|
| 507 |
+
assert crop_size[0] > 0 and crop_size[1] > 0
|
| 508 |
+
self.crop_size = crop_size
|
| 509 |
+
self.cat_max_ratio = cat_max_ratio
|
| 510 |
+
self.ignore_index = ignore_index
|
| 511 |
+
|
| 512 |
+
def get_crop_bbox(self, img):
|
| 513 |
+
"""Randomly get a crop bounding box."""
|
| 514 |
+
margin_h = max(img.shape[0] - self.crop_size[0], 0)
|
| 515 |
+
margin_w = max(img.shape[1] - self.crop_size[1], 0)
|
| 516 |
+
offset_h = np.random.randint(0, margin_h + 1)
|
| 517 |
+
offset_w = np.random.randint(0, margin_w + 1)
|
| 518 |
+
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
|
| 519 |
+
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
|
| 520 |
+
|
| 521 |
+
return crop_y1, crop_y2, crop_x1, crop_x2
|
| 522 |
+
|
| 523 |
+
def crop(self, img, crop_bbox):
|
| 524 |
+
"""Crop from ``img``"""
|
| 525 |
+
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
|
| 526 |
+
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
|
| 527 |
+
return img
|
| 528 |
+
|
| 529 |
+
def __call__(self, results):
|
| 530 |
+
"""Call function to randomly crop images, semantic segmentation maps.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
results (dict): Result dict from loading pipeline.
|
| 534 |
+
|
| 535 |
+
Returns:
|
| 536 |
+
dict: Randomly cropped results, 'img_shape' key in result dict is
|
| 537 |
+
updated according to crop size.
|
| 538 |
+
"""
|
| 539 |
+
|
| 540 |
+
img = results['img']
|
| 541 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 542 |
+
if self.cat_max_ratio < 1.:
|
| 543 |
+
# Repeat 10 times
|
| 544 |
+
for _ in range(10):
|
| 545 |
+
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
|
| 546 |
+
labels, cnt = np.unique(seg_temp, return_counts=True)
|
| 547 |
+
cnt = cnt[labels != self.ignore_index]
|
| 548 |
+
if len(cnt) > 1 and np.max(cnt) / np.sum(
|
| 549 |
+
cnt) < self.cat_max_ratio:
|
| 550 |
+
break
|
| 551 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 552 |
+
|
| 553 |
+
# crop the image
|
| 554 |
+
img = self.crop(img, crop_bbox)
|
| 555 |
+
img_shape = img.shape
|
| 556 |
+
results['img'] = img
|
| 557 |
+
results['img_shape'] = img_shape
|
| 558 |
+
|
| 559 |
+
# crop semantic seg
|
| 560 |
+
for key in results.get('seg_fields', []):
|
| 561 |
+
results[key] = self.crop(results[key], crop_bbox)
|
| 562 |
+
|
| 563 |
+
return results
|
| 564 |
+
|
| 565 |
+
def __repr__(self):
|
| 566 |
+
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@PIPELINES.register_module()
|
| 570 |
+
class RandomRotate(object):
|
| 571 |
+
"""Rotate the image & seg.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
prob (float): The rotation probability.
|
| 575 |
+
degree (float, tuple[float]): Range of degrees to select from. If
|
| 576 |
+
degree is a number instead of tuple like (min, max),
|
| 577 |
+
the range of degree will be (``-degree``, ``+degree``)
|
| 578 |
+
pad_val (float, optional): Padding value of image. Default: 0.
|
| 579 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 580 |
+
Default: 255.
|
| 581 |
+
center (tuple[float], optional): Center point (w, h) of the rotation in
|
| 582 |
+
the source image. If not specified, the center of the image will be
|
| 583 |
+
used. Default: None.
|
| 584 |
+
auto_bound (bool): Whether to adjust the image size to cover the whole
|
| 585 |
+
rotated image. Default: False
|
| 586 |
+
"""
|
| 587 |
+
|
| 588 |
+
def __init__(self,
|
| 589 |
+
prob,
|
| 590 |
+
degree,
|
| 591 |
+
pad_val=0,
|
| 592 |
+
seg_pad_val=255,
|
| 593 |
+
center=None,
|
| 594 |
+
auto_bound=False):
|
| 595 |
+
self.prob = prob
|
| 596 |
+
assert prob >= 0 and prob <= 1
|
| 597 |
+
if isinstance(degree, (float, int)):
|
| 598 |
+
assert degree > 0, f'degree {degree} should be positive'
|
| 599 |
+
self.degree = (-degree, degree)
|
| 600 |
+
else:
|
| 601 |
+
self.degree = degree
|
| 602 |
+
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
|
| 603 |
+
f'tuple of (min, max)'
|
| 604 |
+
self.pal_val = pad_val
|
| 605 |
+
self.seg_pad_val = seg_pad_val
|
| 606 |
+
self.center = center
|
| 607 |
+
self.auto_bound = auto_bound
|
| 608 |
+
|
| 609 |
+
def __call__(self, results):
|
| 610 |
+
"""Call function to rotate image, semantic segmentation maps.
|
| 611 |
+
|
| 612 |
+
Args:
|
| 613 |
+
results (dict): Result dict from loading pipeline.
|
| 614 |
+
|
| 615 |
+
Returns:
|
| 616 |
+
dict: Rotated results.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
rotate = True if np.random.rand() < self.prob else False
|
| 620 |
+
degree = np.random.uniform(min(*self.degree), max(*self.degree))
|
| 621 |
+
if rotate:
|
| 622 |
+
# rotate image
|
| 623 |
+
results['img'] = mmcv.imrotate(
|
| 624 |
+
results['img'],
|
| 625 |
+
angle=degree,
|
| 626 |
+
border_value=self.pal_val,
|
| 627 |
+
center=self.center,
|
| 628 |
+
auto_bound=self.auto_bound)
|
| 629 |
+
|
| 630 |
+
# rotate segs
|
| 631 |
+
for key in results.get('seg_fields', []):
|
| 632 |
+
results[key] = mmcv.imrotate(
|
| 633 |
+
results[key],
|
| 634 |
+
angle=degree,
|
| 635 |
+
border_value=self.seg_pad_val,
|
| 636 |
+
center=self.center,
|
| 637 |
+
auto_bound=self.auto_bound,
|
| 638 |
+
interpolation='nearest')
|
| 639 |
+
return results
|
| 640 |
+
|
| 641 |
+
def __repr__(self):
|
| 642 |
+
repr_str = self.__class__.__name__
|
| 643 |
+
repr_str += f'(prob={self.prob}, ' \
|
| 644 |
+
f'degree={self.degree}, ' \
|
| 645 |
+
f'pad_val={self.pal_val}, ' \
|
| 646 |
+
f'seg_pad_val={self.seg_pad_val}, ' \
|
| 647 |
+
f'center={self.center}, ' \
|
| 648 |
+
f'auto_bound={self.auto_bound})'
|
| 649 |
+
return repr_str
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
@PIPELINES.register_module()
|
| 653 |
+
class RGB2Gray(object):
|
| 654 |
+
"""Convert RGB image to grayscale image.
|
| 655 |
+
|
| 656 |
+
This transform calculate the weighted mean of input image channels with
|
| 657 |
+
``weights`` and then expand the channels to ``out_channels``. When
|
| 658 |
+
``out_channels`` is None, the number of output channels is the same as
|
| 659 |
+
input channels.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
out_channels (int): Expected number of output channels after
|
| 663 |
+
transforming. Default: None.
|
| 664 |
+
weights (tuple[float]): The weights to calculate the weighted mean.
|
| 665 |
+
Default: (0.299, 0.587, 0.114).
|
| 666 |
+
"""
|
| 667 |
+
|
| 668 |
+
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
|
| 669 |
+
assert out_channels is None or out_channels > 0
|
| 670 |
+
self.out_channels = out_channels
|
| 671 |
+
assert isinstance(weights, tuple)
|
| 672 |
+
for item in weights:
|
| 673 |
+
assert isinstance(item, (float, int))
|
| 674 |
+
self.weights = weights
|
| 675 |
+
|
| 676 |
+
def __call__(self, results):
|
| 677 |
+
"""Call function to convert RGB image to grayscale image.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
results (dict): Result dict from loading pipeline.
|
| 681 |
+
|
| 682 |
+
Returns:
|
| 683 |
+
dict: Result dict with grayscale image.
|
| 684 |
+
"""
|
| 685 |
+
img = results['img']
|
| 686 |
+
assert len(img.shape) == 3
|
| 687 |
+
assert img.shape[2] == len(self.weights)
|
| 688 |
+
weights = np.array(self.weights).reshape((1, 1, -1))
|
| 689 |
+
img = (img * weights).sum(2, keepdims=True)
|
| 690 |
+
if self.out_channels is None:
|
| 691 |
+
img = img.repeat(weights.shape[2], axis=2)
|
| 692 |
+
else:
|
| 693 |
+
img = img.repeat(self.out_channels, axis=2)
|
| 694 |
+
|
| 695 |
+
results['img'] = img
|
| 696 |
+
results['img_shape'] = img.shape
|
| 697 |
+
|
| 698 |
+
return results
|
| 699 |
+
|
| 700 |
+
def __repr__(self):
|
| 701 |
+
repr_str = self.__class__.__name__
|
| 702 |
+
repr_str += f'(out_channels={self.out_channels}, ' \
|
| 703 |
+
f'weights={self.weights})'
|
| 704 |
+
return repr_str
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
@PIPELINES.register_module()
|
| 708 |
+
class AdjustGamma(object):
|
| 709 |
+
"""Using gamma correction to process the image.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
gamma (float or int): Gamma value used in gamma correction.
|
| 713 |
+
Default: 1.0.
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
def __init__(self, gamma=1.0):
|
| 717 |
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
| 718 |
+
assert gamma > 0
|
| 719 |
+
self.gamma = gamma
|
| 720 |
+
inv_gamma = 1.0 / gamma
|
| 721 |
+
self.table = np.array([(i / 255.0)**inv_gamma * 255
|
| 722 |
+
for i in np.arange(256)]).astype('uint8')
|
| 723 |
+
|
| 724 |
+
def __call__(self, results):
|
| 725 |
+
"""Call function to process the image with gamma correction.
|
| 726 |
+
|
| 727 |
+
Args:
|
| 728 |
+
results (dict): Result dict from loading pipeline.
|
| 729 |
+
|
| 730 |
+
Returns:
|
| 731 |
+
dict: Processed results.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
results['img'] = mmcv.lut_transform(
|
| 735 |
+
np.array(results['img'], dtype=np.uint8), self.table)
|
| 736 |
+
|
| 737 |
+
return results
|
| 738 |
+
|
| 739 |
+
def __repr__(self):
|
| 740 |
+
return self.__class__.__name__ + f'(gamma={self.gamma})'
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
@PIPELINES.register_module()
|
| 744 |
+
class SegRescale(object):
|
| 745 |
+
"""Rescale semantic segmentation maps.
|
| 746 |
+
|
| 747 |
+
Args:
|
| 748 |
+
scale_factor (float): The scale factor of the final output.
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
def __init__(self, scale_factor=1):
|
| 752 |
+
self.scale_factor = scale_factor
|
| 753 |
+
|
| 754 |
+
def __call__(self, results):
|
| 755 |
+
"""Call function to scale the semantic segmentation map.
|
| 756 |
+
|
| 757 |
+
Args:
|
| 758 |
+
results (dict): Result dict from loading pipeline.
|
| 759 |
+
|
| 760 |
+
Returns:
|
| 761 |
+
dict: Result dict with semantic segmentation map scaled.
|
| 762 |
+
"""
|
| 763 |
+
for key in results.get('seg_fields', []):
|
| 764 |
+
if self.scale_factor != 1:
|
| 765 |
+
results[key] = mmcv.imrescale(
|
| 766 |
+
results[key], self.scale_factor, interpolation='nearest')
|
| 767 |
+
return results
|
| 768 |
+
|
| 769 |
+
def __repr__(self):
|
| 770 |
+
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@PIPELINES.register_module()
|
| 774 |
+
class PhotoMetricDistortion(object):
|
| 775 |
+
"""Apply photometric distortion to image sequentially, every transformation
|
| 776 |
+
is applied with a probability of 0.5. The position of random contrast is in
|
| 777 |
+
second or second to last.
|
| 778 |
+
|
| 779 |
+
1. random brightness
|
| 780 |
+
2. random contrast (mode 0)
|
| 781 |
+
3. convert color from BGR to HSV
|
| 782 |
+
4. random saturation
|
| 783 |
+
5. random hue
|
| 784 |
+
6. convert color from HSV to BGR
|
| 785 |
+
7. random contrast (mode 1)
|
| 786 |
+
|
| 787 |
+
Args:
|
| 788 |
+
brightness_delta (int): delta of brightness.
|
| 789 |
+
contrast_range (tuple): range of contrast.
|
| 790 |
+
saturation_range (tuple): range of saturation.
|
| 791 |
+
hue_delta (int): delta of hue.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
def __init__(self,
|
| 795 |
+
brightness_delta=32,
|
| 796 |
+
contrast_range=(0.5, 1.5),
|
| 797 |
+
saturation_range=(0.5, 1.5),
|
| 798 |
+
hue_delta=18):
|
| 799 |
+
self.brightness_delta = brightness_delta
|
| 800 |
+
self.contrast_lower, self.contrast_upper = contrast_range
|
| 801 |
+
self.saturation_lower, self.saturation_upper = saturation_range
|
| 802 |
+
self.hue_delta = hue_delta
|
| 803 |
+
|
| 804 |
+
def convert(self, img, alpha=1, beta=0):
|
| 805 |
+
"""Multiple with alpha and add beat with clip."""
|
| 806 |
+
img = img.astype(np.float32) * alpha + beta
|
| 807 |
+
img = np.clip(img, 0, 255)
|
| 808 |
+
return img.astype(np.uint8)
|
| 809 |
+
|
| 810 |
+
def brightness(self, img):
|
| 811 |
+
"""Brightness distortion."""
|
| 812 |
+
if random.randint(2):
|
| 813 |
+
return self.convert(
|
| 814 |
+
img,
|
| 815 |
+
beta=random.uniform(-self.brightness_delta,
|
| 816 |
+
self.brightness_delta))
|
| 817 |
+
return img
|
| 818 |
+
|
| 819 |
+
def contrast(self, img):
|
| 820 |
+
"""Contrast distortion."""
|
| 821 |
+
if random.randint(2):
|
| 822 |
+
return self.convert(
|
| 823 |
+
img,
|
| 824 |
+
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
|
| 825 |
+
return img
|
| 826 |
+
|
| 827 |
+
def saturation(self, img):
|
| 828 |
+
"""Saturation distortion."""
|
| 829 |
+
if random.randint(2):
|
| 830 |
+
img = mmcv.bgr2hsv(img)
|
| 831 |
+
img[:, :, 1] = self.convert(
|
| 832 |
+
img[:, :, 1],
|
| 833 |
+
alpha=random.uniform(self.saturation_lower,
|
| 834 |
+
self.saturation_upper))
|
| 835 |
+
img = mmcv.hsv2bgr(img)
|
| 836 |
+
return img
|
| 837 |
+
|
| 838 |
+
def hue(self, img):
|
| 839 |
+
"""Hue distortion."""
|
| 840 |
+
if random.randint(2):
|
| 841 |
+
img = mmcv.bgr2hsv(img)
|
| 842 |
+
img[:, :,
|
| 843 |
+
0] = (img[:, :, 0].astype(int) +
|
| 844 |
+
random.randint(-self.hue_delta, self.hue_delta)) % 180
|
| 845 |
+
img = mmcv.hsv2bgr(img)
|
| 846 |
+
return img
|
| 847 |
+
|
| 848 |
+
def __call__(self, results):
|
| 849 |
+
"""Call function to perform photometric distortion on images.
|
| 850 |
+
|
| 851 |
+
Args:
|
| 852 |
+
results (dict): Result dict from loading pipeline.
|
| 853 |
+
|
| 854 |
+
Returns:
|
| 855 |
+
dict: Result dict with images distorted.
|
| 856 |
+
"""
|
| 857 |
+
|
| 858 |
+
img = results['img']
|
| 859 |
+
# random brightness
|
| 860 |
+
img = self.brightness(img)
|
| 861 |
+
|
| 862 |
+
# mode == 0 --> do random contrast first
|
| 863 |
+
# mode == 1 --> do random contrast last
|
| 864 |
+
mode = random.randint(2)
|
| 865 |
+
if mode == 1:
|
| 866 |
+
img = self.contrast(img)
|
| 867 |
+
|
| 868 |
+
# random saturation
|
| 869 |
+
img = self.saturation(img)
|
| 870 |
+
|
| 871 |
+
# random hue
|
| 872 |
+
img = self.hue(img)
|
| 873 |
+
|
| 874 |
+
# random contrast
|
| 875 |
+
if mode == 0:
|
| 876 |
+
img = self.contrast(img)
|
| 877 |
+
|
| 878 |
+
results['img'] = img
|
| 879 |
+
return results
|
| 880 |
+
|
| 881 |
+
def __repr__(self):
|
| 882 |
+
repr_str = self.__class__.__name__
|
| 883 |
+
repr_str += (f'(brightness_delta={self.brightness_delta}, '
|
| 884 |
+
f'contrast_range=({self.contrast_lower}, '
|
| 885 |
+
f'{self.contrast_upper}), '
|
| 886 |
+
f'saturation_range=({self.saturation_lower}, '
|
| 887 |
+
f'{self.saturation_upper}), '
|
| 888 |
+
f'hue_delta={self.hue_delta})')
|
| 889 |
+
return repr_str
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/stare.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class STAREDataset(CustomDataset):
|
| 9 |
+
"""STARE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for STARE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.ah.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(STAREDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.ah.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/datasets/voc.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class PascalVOCDataset(CustomDataset):
|
| 9 |
+
"""Pascal VOC dataset.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
split (str): Split txt file for Pascal VOC.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
|
| 16 |
+
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
|
| 17 |
+
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
|
| 18 |
+
'train', 'tvmonitor')
|
| 19 |
+
|
| 20 |
+
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
|
| 21 |
+
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
|
| 22 |
+
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
|
| 23 |
+
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
|
| 24 |
+
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
|
| 25 |
+
|
| 26 |
+
def __init__(self, split, **kwargs):
|
| 27 |
+
super(PascalVOCDataset, self).__init__(
|
| 28 |
+
img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
|
| 29 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .backbones import * # noqa: F401,F403
|
| 2 |
+
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
|
| 3 |
+
build_head, build_loss, build_segmentor)
|
| 4 |
+
from .decode_heads import * # noqa: F401,F403
|
| 5 |
+
from .losses import * # noqa: F401,F403
|
| 6 |
+
from .necks import * # noqa: F401,F403
|
| 7 |
+
from .segmentors import * # noqa: F401,F403
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
|
| 11 |
+
'build_head', 'build_loss', 'build_segmentor'
|
| 12 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .cgnet import CGNet
|
| 2 |
+
# from .fast_scnn import FastSCNN
|
| 3 |
+
from .hrnet import HRNet
|
| 4 |
+
from .mobilenet_v2 import MobileNetV2
|
| 5 |
+
from .mobilenet_v3 import MobileNetV3
|
| 6 |
+
from .resnest import ResNeSt
|
| 7 |
+
from .resnet import ResNet, ResNetV1c, ResNetV1d
|
| 8 |
+
from .resnext import ResNeXt
|
| 9 |
+
from .unet import UNet
|
| 10 |
+
from .vit import VisionTransformer
|
| 11 |
+
from .uniformer import UniFormer
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet',
|
| 15 |
+
'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3',
|
| 16 |
+
'VisionTransformer', 'UniFormer'
|
| 17 |
+
]
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/cgnet.py
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.utils.checkpoint as cp
|
| 4 |
+
from annotator.uniformer.mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
|
| 5 |
+
constant_init, kaiming_init)
|
| 6 |
+
from annotator.uniformer.mmcv.runner import load_checkpoint
|
| 7 |
+
from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm
|
| 8 |
+
|
| 9 |
+
from annotator.uniformer.mmseg.utils import get_root_logger
|
| 10 |
+
from ..builder import BACKBONES
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class GlobalContextExtractor(nn.Module):
|
| 14 |
+
"""Global Context Extractor for CGNet.
|
| 15 |
+
|
| 16 |
+
This class is employed to refine the joint feature of both local feature
|
| 17 |
+
and surrounding context.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
channel (int): Number of input feature channels.
|
| 21 |
+
reduction (int): Reductions for global context extractor. Default: 16.
|
| 22 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 23 |
+
memory while slowing down the training speed. Default: False.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, channel, reduction=16, with_cp=False):
|
| 27 |
+
super(GlobalContextExtractor, self).__init__()
|
| 28 |
+
self.channel = channel
|
| 29 |
+
self.reduction = reduction
|
| 30 |
+
assert reduction >= 1 and channel >= reduction
|
| 31 |
+
self.with_cp = with_cp
|
| 32 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
| 33 |
+
self.fc = nn.Sequential(
|
| 34 |
+
nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True),
|
| 35 |
+
nn.Linear(channel // reduction, channel), nn.Sigmoid())
|
| 36 |
+
|
| 37 |
+
def forward(self, x):
|
| 38 |
+
|
| 39 |
+
def _inner_forward(x):
|
| 40 |
+
num_batch, num_channel = x.size()[:2]
|
| 41 |
+
y = self.avg_pool(x).view(num_batch, num_channel)
|
| 42 |
+
y = self.fc(y).view(num_batch, num_channel, 1, 1)
|
| 43 |
+
return x * y
|
| 44 |
+
|
| 45 |
+
if self.with_cp and x.requires_grad:
|
| 46 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 47 |
+
else:
|
| 48 |
+
out = _inner_forward(x)
|
| 49 |
+
|
| 50 |
+
return out
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class ContextGuidedBlock(nn.Module):
|
| 54 |
+
"""Context Guided Block for CGNet.
|
| 55 |
+
|
| 56 |
+
This class consists of four components: local feature extractor,
|
| 57 |
+
surrounding feature extractor, joint feature extractor and global
|
| 58 |
+
context extractor.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
in_channels (int): Number of input feature channels.
|
| 62 |
+
out_channels (int): Number of output feature channels.
|
| 63 |
+
dilation (int): Dilation rate for surrounding context extractor.
|
| 64 |
+
Default: 2.
|
| 65 |
+
reduction (int): Reduction for global context extractor. Default: 16.
|
| 66 |
+
skip_connect (bool): Add input to output or not. Default: True.
|
| 67 |
+
downsample (bool): Downsample the input to 1/2 or not. Default: False.
|
| 68 |
+
conv_cfg (dict): Config dict for convolution layer.
|
| 69 |
+
Default: None, which means using conv2d.
|
| 70 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 71 |
+
Default: dict(type='BN', requires_grad=True).
|
| 72 |
+
act_cfg (dict): Config dict for activation layer.
|
| 73 |
+
Default: dict(type='PReLU').
|
| 74 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 75 |
+
memory while slowing down the training speed. Default: False.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self,
|
| 79 |
+
in_channels,
|
| 80 |
+
out_channels,
|
| 81 |
+
dilation=2,
|
| 82 |
+
reduction=16,
|
| 83 |
+
skip_connect=True,
|
| 84 |
+
downsample=False,
|
| 85 |
+
conv_cfg=None,
|
| 86 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 87 |
+
act_cfg=dict(type='PReLU'),
|
| 88 |
+
with_cp=False):
|
| 89 |
+
super(ContextGuidedBlock, self).__init__()
|
| 90 |
+
self.with_cp = with_cp
|
| 91 |
+
self.downsample = downsample
|
| 92 |
+
|
| 93 |
+
channels = out_channels if downsample else out_channels // 2
|
| 94 |
+
if 'type' in act_cfg and act_cfg['type'] == 'PReLU':
|
| 95 |
+
act_cfg['num_parameters'] = channels
|
| 96 |
+
kernel_size = 3 if downsample else 1
|
| 97 |
+
stride = 2 if downsample else 1
|
| 98 |
+
padding = (kernel_size - 1) // 2
|
| 99 |
+
|
| 100 |
+
self.conv1x1 = ConvModule(
|
| 101 |
+
in_channels,
|
| 102 |
+
channels,
|
| 103 |
+
kernel_size,
|
| 104 |
+
stride,
|
| 105 |
+
padding,
|
| 106 |
+
conv_cfg=conv_cfg,
|
| 107 |
+
norm_cfg=norm_cfg,
|
| 108 |
+
act_cfg=act_cfg)
|
| 109 |
+
|
| 110 |
+
self.f_loc = build_conv_layer(
|
| 111 |
+
conv_cfg,
|
| 112 |
+
channels,
|
| 113 |
+
channels,
|
| 114 |
+
kernel_size=3,
|
| 115 |
+
padding=1,
|
| 116 |
+
groups=channels,
|
| 117 |
+
bias=False)
|
| 118 |
+
self.f_sur = build_conv_layer(
|
| 119 |
+
conv_cfg,
|
| 120 |
+
channels,
|
| 121 |
+
channels,
|
| 122 |
+
kernel_size=3,
|
| 123 |
+
padding=dilation,
|
| 124 |
+
groups=channels,
|
| 125 |
+
dilation=dilation,
|
| 126 |
+
bias=False)
|
| 127 |
+
|
| 128 |
+
self.bn = build_norm_layer(norm_cfg, 2 * channels)[1]
|
| 129 |
+
self.activate = nn.PReLU(2 * channels)
|
| 130 |
+
|
| 131 |
+
if downsample:
|
| 132 |
+
self.bottleneck = build_conv_layer(
|
| 133 |
+
conv_cfg,
|
| 134 |
+
2 * channels,
|
| 135 |
+
out_channels,
|
| 136 |
+
kernel_size=1,
|
| 137 |
+
bias=False)
|
| 138 |
+
|
| 139 |
+
self.skip_connect = skip_connect and not downsample
|
| 140 |
+
self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp)
|
| 141 |
+
|
| 142 |
+
def forward(self, x):
|
| 143 |
+
|
| 144 |
+
def _inner_forward(x):
|
| 145 |
+
out = self.conv1x1(x)
|
| 146 |
+
loc = self.f_loc(out)
|
| 147 |
+
sur = self.f_sur(out)
|
| 148 |
+
|
| 149 |
+
joi_feat = torch.cat([loc, sur], 1) # the joint feature
|
| 150 |
+
joi_feat = self.bn(joi_feat)
|
| 151 |
+
joi_feat = self.activate(joi_feat)
|
| 152 |
+
if self.downsample:
|
| 153 |
+
joi_feat = self.bottleneck(joi_feat) # channel = out_channels
|
| 154 |
+
# f_glo is employed to refine the joint feature
|
| 155 |
+
out = self.f_glo(joi_feat)
|
| 156 |
+
|
| 157 |
+
if self.skip_connect:
|
| 158 |
+
return x + out
|
| 159 |
+
else:
|
| 160 |
+
return out
|
| 161 |
+
|
| 162 |
+
if self.with_cp and x.requires_grad:
|
| 163 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 164 |
+
else:
|
| 165 |
+
out = _inner_forward(x)
|
| 166 |
+
|
| 167 |
+
return out
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class InputInjection(nn.Module):
|
| 171 |
+
"""Downsampling module for CGNet."""
|
| 172 |
+
|
| 173 |
+
def __init__(self, num_downsampling):
|
| 174 |
+
super(InputInjection, self).__init__()
|
| 175 |
+
self.pool = nn.ModuleList()
|
| 176 |
+
for i in range(num_downsampling):
|
| 177 |
+
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
|
| 178 |
+
|
| 179 |
+
def forward(self, x):
|
| 180 |
+
for pool in self.pool:
|
| 181 |
+
x = pool(x)
|
| 182 |
+
return x
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@BACKBONES.register_module()
|
| 186 |
+
class CGNet(nn.Module):
|
| 187 |
+
"""CGNet backbone.
|
| 188 |
+
|
| 189 |
+
A Light-weight Context Guided Network for Semantic Segmentation
|
| 190 |
+
arXiv: https://arxiv.org/abs/1811.08201
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
in_channels (int): Number of input image channels. Normally 3.
|
| 194 |
+
num_channels (tuple[int]): Numbers of feature channels at each stages.
|
| 195 |
+
Default: (32, 64, 128).
|
| 196 |
+
num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2.
|
| 197 |
+
Default: (3, 21).
|
| 198 |
+
dilations (tuple[int]): Dilation rate for surrounding context
|
| 199 |
+
extractors at stage 1 and stage 2. Default: (2, 4).
|
| 200 |
+
reductions (tuple[int]): Reductions for global context extractors at
|
| 201 |
+
stage 1 and stage 2. Default: (8, 16).
|
| 202 |
+
conv_cfg (dict): Config dict for convolution layer.
|
| 203 |
+
Default: None, which means using conv2d.
|
| 204 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 205 |
+
Default: dict(type='BN', requires_grad=True).
|
| 206 |
+
act_cfg (dict): Config dict for activation layer.
|
| 207 |
+
Default: dict(type='PReLU').
|
| 208 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 209 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 210 |
+
and its variants only. Default: False.
|
| 211 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 212 |
+
memory while slowing down the training speed. Default: False.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self,
|
| 216 |
+
in_channels=3,
|
| 217 |
+
num_channels=(32, 64, 128),
|
| 218 |
+
num_blocks=(3, 21),
|
| 219 |
+
dilations=(2, 4),
|
| 220 |
+
reductions=(8, 16),
|
| 221 |
+
conv_cfg=None,
|
| 222 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 223 |
+
act_cfg=dict(type='PReLU'),
|
| 224 |
+
norm_eval=False,
|
| 225 |
+
with_cp=False):
|
| 226 |
+
|
| 227 |
+
super(CGNet, self).__init__()
|
| 228 |
+
self.in_channels = in_channels
|
| 229 |
+
self.num_channels = num_channels
|
| 230 |
+
assert isinstance(self.num_channels, tuple) and len(
|
| 231 |
+
self.num_channels) == 3
|
| 232 |
+
self.num_blocks = num_blocks
|
| 233 |
+
assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2
|
| 234 |
+
self.dilations = dilations
|
| 235 |
+
assert isinstance(self.dilations, tuple) and len(self.dilations) == 2
|
| 236 |
+
self.reductions = reductions
|
| 237 |
+
assert isinstance(self.reductions, tuple) and len(self.reductions) == 2
|
| 238 |
+
self.conv_cfg = conv_cfg
|
| 239 |
+
self.norm_cfg = norm_cfg
|
| 240 |
+
self.act_cfg = act_cfg
|
| 241 |
+
if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU':
|
| 242 |
+
self.act_cfg['num_parameters'] = num_channels[0]
|
| 243 |
+
self.norm_eval = norm_eval
|
| 244 |
+
self.with_cp = with_cp
|
| 245 |
+
|
| 246 |
+
cur_channels = in_channels
|
| 247 |
+
self.stem = nn.ModuleList()
|
| 248 |
+
for i in range(3):
|
| 249 |
+
self.stem.append(
|
| 250 |
+
ConvModule(
|
| 251 |
+
cur_channels,
|
| 252 |
+
num_channels[0],
|
| 253 |
+
3,
|
| 254 |
+
2 if i == 0 else 1,
|
| 255 |
+
padding=1,
|
| 256 |
+
conv_cfg=conv_cfg,
|
| 257 |
+
norm_cfg=norm_cfg,
|
| 258 |
+
act_cfg=act_cfg))
|
| 259 |
+
cur_channels = num_channels[0]
|
| 260 |
+
|
| 261 |
+
self.inject_2x = InputInjection(1) # down-sample for Input, factor=2
|
| 262 |
+
self.inject_4x = InputInjection(2) # down-sample for Input, factor=4
|
| 263 |
+
|
| 264 |
+
cur_channels += in_channels
|
| 265 |
+
self.norm_prelu_0 = nn.Sequential(
|
| 266 |
+
build_norm_layer(norm_cfg, cur_channels)[1],
|
| 267 |
+
nn.PReLU(cur_channels))
|
| 268 |
+
|
| 269 |
+
# stage 1
|
| 270 |
+
self.level1 = nn.ModuleList()
|
| 271 |
+
for i in range(num_blocks[0]):
|
| 272 |
+
self.level1.append(
|
| 273 |
+
ContextGuidedBlock(
|
| 274 |
+
cur_channels if i == 0 else num_channels[1],
|
| 275 |
+
num_channels[1],
|
| 276 |
+
dilations[0],
|
| 277 |
+
reductions[0],
|
| 278 |
+
downsample=(i == 0),
|
| 279 |
+
conv_cfg=conv_cfg,
|
| 280 |
+
norm_cfg=norm_cfg,
|
| 281 |
+
act_cfg=act_cfg,
|
| 282 |
+
with_cp=with_cp)) # CG block
|
| 283 |
+
|
| 284 |
+
cur_channels = 2 * num_channels[1] + in_channels
|
| 285 |
+
self.norm_prelu_1 = nn.Sequential(
|
| 286 |
+
build_norm_layer(norm_cfg, cur_channels)[1],
|
| 287 |
+
nn.PReLU(cur_channels))
|
| 288 |
+
|
| 289 |
+
# stage 2
|
| 290 |
+
self.level2 = nn.ModuleList()
|
| 291 |
+
for i in range(num_blocks[1]):
|
| 292 |
+
self.level2.append(
|
| 293 |
+
ContextGuidedBlock(
|
| 294 |
+
cur_channels if i == 0 else num_channels[2],
|
| 295 |
+
num_channels[2],
|
| 296 |
+
dilations[1],
|
| 297 |
+
reductions[1],
|
| 298 |
+
downsample=(i == 0),
|
| 299 |
+
conv_cfg=conv_cfg,
|
| 300 |
+
norm_cfg=norm_cfg,
|
| 301 |
+
act_cfg=act_cfg,
|
| 302 |
+
with_cp=with_cp)) # CG block
|
| 303 |
+
|
| 304 |
+
cur_channels = 2 * num_channels[2]
|
| 305 |
+
self.norm_prelu_2 = nn.Sequential(
|
| 306 |
+
build_norm_layer(norm_cfg, cur_channels)[1],
|
| 307 |
+
nn.PReLU(cur_channels))
|
| 308 |
+
|
| 309 |
+
def forward(self, x):
|
| 310 |
+
output = []
|
| 311 |
+
|
| 312 |
+
# stage 0
|
| 313 |
+
inp_2x = self.inject_2x(x)
|
| 314 |
+
inp_4x = self.inject_4x(x)
|
| 315 |
+
for layer in self.stem:
|
| 316 |
+
x = layer(x)
|
| 317 |
+
x = self.norm_prelu_0(torch.cat([x, inp_2x], 1))
|
| 318 |
+
output.append(x)
|
| 319 |
+
|
| 320 |
+
# stage 1
|
| 321 |
+
for i, layer in enumerate(self.level1):
|
| 322 |
+
x = layer(x)
|
| 323 |
+
if i == 0:
|
| 324 |
+
down1 = x
|
| 325 |
+
x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1))
|
| 326 |
+
output.append(x)
|
| 327 |
+
|
| 328 |
+
# stage 2
|
| 329 |
+
for i, layer in enumerate(self.level2):
|
| 330 |
+
x = layer(x)
|
| 331 |
+
if i == 0:
|
| 332 |
+
down2 = x
|
| 333 |
+
x = self.norm_prelu_2(torch.cat([down2, x], 1))
|
| 334 |
+
output.append(x)
|
| 335 |
+
|
| 336 |
+
return output
|
| 337 |
+
|
| 338 |
+
def init_weights(self, pretrained=None):
|
| 339 |
+
"""Initialize the weights in backbone.
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
pretrained (str, optional): Path to pre-trained weights.
|
| 343 |
+
Defaults to None.
|
| 344 |
+
"""
|
| 345 |
+
if isinstance(pretrained, str):
|
| 346 |
+
logger = get_root_logger()
|
| 347 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 348 |
+
elif pretrained is None:
|
| 349 |
+
for m in self.modules():
|
| 350 |
+
if isinstance(m, (nn.Conv2d, nn.Linear)):
|
| 351 |
+
kaiming_init(m)
|
| 352 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 353 |
+
constant_init(m, 1)
|
| 354 |
+
elif isinstance(m, nn.PReLU):
|
| 355 |
+
constant_init(m, 0)
|
| 356 |
+
else:
|
| 357 |
+
raise TypeError('pretrained must be a str or None')
|
| 358 |
+
|
| 359 |
+
def train(self, mode=True):
|
| 360 |
+
"""Convert the model into training mode will keeping the normalization
|
| 361 |
+
layer freezed."""
|
| 362 |
+
super(CGNet, self).train(mode)
|
| 363 |
+
if mode and self.norm_eval:
|
| 364 |
+
for m in self.modules():
|
| 365 |
+
# trick: eval have effect on BatchNorm only
|
| 366 |
+
if isinstance(m, _BatchNorm):
|
| 367 |
+
m.eval()
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/fast_scnn.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, constant_init,
|
| 4 |
+
kaiming_init)
|
| 5 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 6 |
+
|
| 7 |
+
from annotator.uniformer.mmseg.models.decode_heads.psp_head import PPM
|
| 8 |
+
from annotator.uniformer.mmseg.ops import resize
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils.inverted_residual import InvertedResidual
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class LearningToDownsample(nn.Module):
|
| 14 |
+
"""Learning to downsample module.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
in_channels (int): Number of input channels.
|
| 18 |
+
dw_channels (tuple[int]): Number of output channels of the first and
|
| 19 |
+
the second depthwise conv (dwconv) layers.
|
| 20 |
+
out_channels (int): Number of output channels of the whole
|
| 21 |
+
'learning to downsample' module.
|
| 22 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 23 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 24 |
+
dict(type='BN')
|
| 25 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 26 |
+
dict(type='ReLU')
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self,
|
| 30 |
+
in_channels,
|
| 31 |
+
dw_channels,
|
| 32 |
+
out_channels,
|
| 33 |
+
conv_cfg=None,
|
| 34 |
+
norm_cfg=dict(type='BN'),
|
| 35 |
+
act_cfg=dict(type='ReLU')):
|
| 36 |
+
super(LearningToDownsample, self).__init__()
|
| 37 |
+
self.conv_cfg = conv_cfg
|
| 38 |
+
self.norm_cfg = norm_cfg
|
| 39 |
+
self.act_cfg = act_cfg
|
| 40 |
+
dw_channels1 = dw_channels[0]
|
| 41 |
+
dw_channels2 = dw_channels[1]
|
| 42 |
+
|
| 43 |
+
self.conv = ConvModule(
|
| 44 |
+
in_channels,
|
| 45 |
+
dw_channels1,
|
| 46 |
+
3,
|
| 47 |
+
stride=2,
|
| 48 |
+
conv_cfg=self.conv_cfg,
|
| 49 |
+
norm_cfg=self.norm_cfg,
|
| 50 |
+
act_cfg=self.act_cfg)
|
| 51 |
+
self.dsconv1 = DepthwiseSeparableConvModule(
|
| 52 |
+
dw_channels1,
|
| 53 |
+
dw_channels2,
|
| 54 |
+
kernel_size=3,
|
| 55 |
+
stride=2,
|
| 56 |
+
padding=1,
|
| 57 |
+
norm_cfg=self.norm_cfg)
|
| 58 |
+
self.dsconv2 = DepthwiseSeparableConvModule(
|
| 59 |
+
dw_channels2,
|
| 60 |
+
out_channels,
|
| 61 |
+
kernel_size=3,
|
| 62 |
+
stride=2,
|
| 63 |
+
padding=1,
|
| 64 |
+
norm_cfg=self.norm_cfg)
|
| 65 |
+
|
| 66 |
+
def forward(self, x):
|
| 67 |
+
x = self.conv(x)
|
| 68 |
+
x = self.dsconv1(x)
|
| 69 |
+
x = self.dsconv2(x)
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class GlobalFeatureExtractor(nn.Module):
|
| 74 |
+
"""Global feature extractor module.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
in_channels (int): Number of input channels of the GFE module.
|
| 78 |
+
Default: 64
|
| 79 |
+
block_channels (tuple[int]): Tuple of ints. Each int specifies the
|
| 80 |
+
number of output channels of each Inverted Residual module.
|
| 81 |
+
Default: (64, 96, 128)
|
| 82 |
+
out_channels(int): Number of output channels of the GFE module.
|
| 83 |
+
Default: 128
|
| 84 |
+
expand_ratio (int): Adjusts number of channels of the hidden layer
|
| 85 |
+
in InvertedResidual by this amount.
|
| 86 |
+
Default: 6
|
| 87 |
+
num_blocks (tuple[int]): Tuple of ints. Each int specifies the
|
| 88 |
+
number of times each Inverted Residual module is repeated.
|
| 89 |
+
The repeated Inverted Residual modules are called a 'group'.
|
| 90 |
+
Default: (3, 3, 3)
|
| 91 |
+
strides (tuple[int]): Tuple of ints. Each int specifies
|
| 92 |
+
the downsampling factor of each 'group'.
|
| 93 |
+
Default: (2, 2, 1)
|
| 94 |
+
pool_scales (tuple[int]): Tuple of ints. Each int specifies
|
| 95 |
+
the parameter required in 'global average pooling' within PPM.
|
| 96 |
+
Default: (1, 2, 3, 6)
|
| 97 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 98 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 99 |
+
dict(type='BN')
|
| 100 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 101 |
+
dict(type='ReLU')
|
| 102 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 103 |
+
Default: False
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(self,
|
| 107 |
+
in_channels=64,
|
| 108 |
+
block_channels=(64, 96, 128),
|
| 109 |
+
out_channels=128,
|
| 110 |
+
expand_ratio=6,
|
| 111 |
+
num_blocks=(3, 3, 3),
|
| 112 |
+
strides=(2, 2, 1),
|
| 113 |
+
pool_scales=(1, 2, 3, 6),
|
| 114 |
+
conv_cfg=None,
|
| 115 |
+
norm_cfg=dict(type='BN'),
|
| 116 |
+
act_cfg=dict(type='ReLU'),
|
| 117 |
+
align_corners=False):
|
| 118 |
+
super(GlobalFeatureExtractor, self).__init__()
|
| 119 |
+
self.conv_cfg = conv_cfg
|
| 120 |
+
self.norm_cfg = norm_cfg
|
| 121 |
+
self.act_cfg = act_cfg
|
| 122 |
+
assert len(block_channels) == len(num_blocks) == 3
|
| 123 |
+
self.bottleneck1 = self._make_layer(in_channels, block_channels[0],
|
| 124 |
+
num_blocks[0], strides[0],
|
| 125 |
+
expand_ratio)
|
| 126 |
+
self.bottleneck2 = self._make_layer(block_channels[0],
|
| 127 |
+
block_channels[1], num_blocks[1],
|
| 128 |
+
strides[1], expand_ratio)
|
| 129 |
+
self.bottleneck3 = self._make_layer(block_channels[1],
|
| 130 |
+
block_channels[2], num_blocks[2],
|
| 131 |
+
strides[2], expand_ratio)
|
| 132 |
+
self.ppm = PPM(
|
| 133 |
+
pool_scales,
|
| 134 |
+
block_channels[2],
|
| 135 |
+
block_channels[2] // 4,
|
| 136 |
+
conv_cfg=self.conv_cfg,
|
| 137 |
+
norm_cfg=self.norm_cfg,
|
| 138 |
+
act_cfg=self.act_cfg,
|
| 139 |
+
align_corners=align_corners)
|
| 140 |
+
self.out = ConvModule(
|
| 141 |
+
block_channels[2] * 2,
|
| 142 |
+
out_channels,
|
| 143 |
+
1,
|
| 144 |
+
conv_cfg=self.conv_cfg,
|
| 145 |
+
norm_cfg=self.norm_cfg,
|
| 146 |
+
act_cfg=self.act_cfg)
|
| 147 |
+
|
| 148 |
+
def _make_layer(self,
|
| 149 |
+
in_channels,
|
| 150 |
+
out_channels,
|
| 151 |
+
blocks,
|
| 152 |
+
stride=1,
|
| 153 |
+
expand_ratio=6):
|
| 154 |
+
layers = [
|
| 155 |
+
InvertedResidual(
|
| 156 |
+
in_channels,
|
| 157 |
+
out_channels,
|
| 158 |
+
stride,
|
| 159 |
+
expand_ratio,
|
| 160 |
+
norm_cfg=self.norm_cfg)
|
| 161 |
+
]
|
| 162 |
+
for i in range(1, blocks):
|
| 163 |
+
layers.append(
|
| 164 |
+
InvertedResidual(
|
| 165 |
+
out_channels,
|
| 166 |
+
out_channels,
|
| 167 |
+
1,
|
| 168 |
+
expand_ratio,
|
| 169 |
+
norm_cfg=self.norm_cfg))
|
| 170 |
+
return nn.Sequential(*layers)
|
| 171 |
+
|
| 172 |
+
def forward(self, x):
|
| 173 |
+
x = self.bottleneck1(x)
|
| 174 |
+
x = self.bottleneck2(x)
|
| 175 |
+
x = self.bottleneck3(x)
|
| 176 |
+
x = torch.cat([x, *self.ppm(x)], dim=1)
|
| 177 |
+
x = self.out(x)
|
| 178 |
+
return x
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class FeatureFusionModule(nn.Module):
|
| 182 |
+
"""Feature fusion module.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
higher_in_channels (int): Number of input channels of the
|
| 186 |
+
higher-resolution branch.
|
| 187 |
+
lower_in_channels (int): Number of input channels of the
|
| 188 |
+
lower-resolution branch.
|
| 189 |
+
out_channels (int): Number of output channels.
|
| 190 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 191 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 192 |
+
dict(type='BN')
|
| 193 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 194 |
+
dict(type='ReLU')
|
| 195 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 196 |
+
Default: False
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(self,
|
| 200 |
+
higher_in_channels,
|
| 201 |
+
lower_in_channels,
|
| 202 |
+
out_channels,
|
| 203 |
+
conv_cfg=None,
|
| 204 |
+
norm_cfg=dict(type='BN'),
|
| 205 |
+
act_cfg=dict(type='ReLU'),
|
| 206 |
+
align_corners=False):
|
| 207 |
+
super(FeatureFusionModule, self).__init__()
|
| 208 |
+
self.conv_cfg = conv_cfg
|
| 209 |
+
self.norm_cfg = norm_cfg
|
| 210 |
+
self.act_cfg = act_cfg
|
| 211 |
+
self.align_corners = align_corners
|
| 212 |
+
self.dwconv = ConvModule(
|
| 213 |
+
lower_in_channels,
|
| 214 |
+
out_channels,
|
| 215 |
+
1,
|
| 216 |
+
conv_cfg=self.conv_cfg,
|
| 217 |
+
norm_cfg=self.norm_cfg,
|
| 218 |
+
act_cfg=self.act_cfg)
|
| 219 |
+
self.conv_lower_res = ConvModule(
|
| 220 |
+
out_channels,
|
| 221 |
+
out_channels,
|
| 222 |
+
1,
|
| 223 |
+
conv_cfg=self.conv_cfg,
|
| 224 |
+
norm_cfg=self.norm_cfg,
|
| 225 |
+
act_cfg=None)
|
| 226 |
+
self.conv_higher_res = ConvModule(
|
| 227 |
+
higher_in_channels,
|
| 228 |
+
out_channels,
|
| 229 |
+
1,
|
| 230 |
+
conv_cfg=self.conv_cfg,
|
| 231 |
+
norm_cfg=self.norm_cfg,
|
| 232 |
+
act_cfg=None)
|
| 233 |
+
self.relu = nn.ReLU(True)
|
| 234 |
+
|
| 235 |
+
def forward(self, higher_res_feature, lower_res_feature):
|
| 236 |
+
lower_res_feature = resize(
|
| 237 |
+
lower_res_feature,
|
| 238 |
+
size=higher_res_feature.size()[2:],
|
| 239 |
+
mode='bilinear',
|
| 240 |
+
align_corners=self.align_corners)
|
| 241 |
+
lower_res_feature = self.dwconv(lower_res_feature)
|
| 242 |
+
lower_res_feature = self.conv_lower_res(lower_res_feature)
|
| 243 |
+
|
| 244 |
+
higher_res_feature = self.conv_higher_res(higher_res_feature)
|
| 245 |
+
out = higher_res_feature + lower_res_feature
|
| 246 |
+
return self.relu(out)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@BACKBONES.register_module()
|
| 250 |
+
class FastSCNN(nn.Module):
|
| 251 |
+
"""Fast-SCNN Backbone.
|
| 252 |
+
|
| 253 |
+
Args:
|
| 254 |
+
in_channels (int): Number of input image channels. Default: 3.
|
| 255 |
+
downsample_dw_channels (tuple[int]): Number of output channels after
|
| 256 |
+
the first conv layer & the second conv layer in
|
| 257 |
+
Learning-To-Downsample (LTD) module.
|
| 258 |
+
Default: (32, 48).
|
| 259 |
+
global_in_channels (int): Number of input channels of
|
| 260 |
+
Global Feature Extractor(GFE).
|
| 261 |
+
Equal to number of output channels of LTD.
|
| 262 |
+
Default: 64.
|
| 263 |
+
global_block_channels (tuple[int]): Tuple of integers that describe
|
| 264 |
+
the output channels for each of the MobileNet-v2 bottleneck
|
| 265 |
+
residual blocks in GFE.
|
| 266 |
+
Default: (64, 96, 128).
|
| 267 |
+
global_block_strides (tuple[int]): Tuple of integers
|
| 268 |
+
that describe the strides (downsampling factors) for each of the
|
| 269 |
+
MobileNet-v2 bottleneck residual blocks in GFE.
|
| 270 |
+
Default: (2, 2, 1).
|
| 271 |
+
global_out_channels (int): Number of output channels of GFE.
|
| 272 |
+
Default: 128.
|
| 273 |
+
higher_in_channels (int): Number of input channels of the higher
|
| 274 |
+
resolution branch in FFM.
|
| 275 |
+
Equal to global_in_channels.
|
| 276 |
+
Default: 64.
|
| 277 |
+
lower_in_channels (int): Number of input channels of the lower
|
| 278 |
+
resolution branch in FFM.
|
| 279 |
+
Equal to global_out_channels.
|
| 280 |
+
Default: 128.
|
| 281 |
+
fusion_out_channels (int): Number of output channels of FFM.
|
| 282 |
+
Default: 128.
|
| 283 |
+
out_indices (tuple): Tuple of indices of list
|
| 284 |
+
[higher_res_features, lower_res_features, fusion_output].
|
| 285 |
+
Often set to (0,1,2) to enable aux. heads.
|
| 286 |
+
Default: (0, 1, 2).
|
| 287 |
+
conv_cfg (dict | None): Config of conv layers. Default: None
|
| 288 |
+
norm_cfg (dict | None): Config of norm layers. Default:
|
| 289 |
+
dict(type='BN')
|
| 290 |
+
act_cfg (dict): Config of activation layers. Default:
|
| 291 |
+
dict(type='ReLU')
|
| 292 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
| 293 |
+
Default: False
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
def __init__(self,
|
| 297 |
+
in_channels=3,
|
| 298 |
+
downsample_dw_channels=(32, 48),
|
| 299 |
+
global_in_channels=64,
|
| 300 |
+
global_block_channels=(64, 96, 128),
|
| 301 |
+
global_block_strides=(2, 2, 1),
|
| 302 |
+
global_out_channels=128,
|
| 303 |
+
higher_in_channels=64,
|
| 304 |
+
lower_in_channels=128,
|
| 305 |
+
fusion_out_channels=128,
|
| 306 |
+
out_indices=(0, 1, 2),
|
| 307 |
+
conv_cfg=None,
|
| 308 |
+
norm_cfg=dict(type='BN'),
|
| 309 |
+
act_cfg=dict(type='ReLU'),
|
| 310 |
+
align_corners=False):
|
| 311 |
+
|
| 312 |
+
super(FastSCNN, self).__init__()
|
| 313 |
+
if global_in_channels != higher_in_channels:
|
| 314 |
+
raise AssertionError('Global Input Channels must be the same \
|
| 315 |
+
with Higher Input Channels!')
|
| 316 |
+
elif global_out_channels != lower_in_channels:
|
| 317 |
+
raise AssertionError('Global Output Channels must be the same \
|
| 318 |
+
with Lower Input Channels!')
|
| 319 |
+
|
| 320 |
+
self.in_channels = in_channels
|
| 321 |
+
self.downsample_dw_channels1 = downsample_dw_channels[0]
|
| 322 |
+
self.downsample_dw_channels2 = downsample_dw_channels[1]
|
| 323 |
+
self.global_in_channels = global_in_channels
|
| 324 |
+
self.global_block_channels = global_block_channels
|
| 325 |
+
self.global_block_strides = global_block_strides
|
| 326 |
+
self.global_out_channels = global_out_channels
|
| 327 |
+
self.higher_in_channels = higher_in_channels
|
| 328 |
+
self.lower_in_channels = lower_in_channels
|
| 329 |
+
self.fusion_out_channels = fusion_out_channels
|
| 330 |
+
self.out_indices = out_indices
|
| 331 |
+
self.conv_cfg = conv_cfg
|
| 332 |
+
self.norm_cfg = norm_cfg
|
| 333 |
+
self.act_cfg = act_cfg
|
| 334 |
+
self.align_corners = align_corners
|
| 335 |
+
self.learning_to_downsample = LearningToDownsample(
|
| 336 |
+
in_channels,
|
| 337 |
+
downsample_dw_channels,
|
| 338 |
+
global_in_channels,
|
| 339 |
+
conv_cfg=self.conv_cfg,
|
| 340 |
+
norm_cfg=self.norm_cfg,
|
| 341 |
+
act_cfg=self.act_cfg)
|
| 342 |
+
self.global_feature_extractor = GlobalFeatureExtractor(
|
| 343 |
+
global_in_channels,
|
| 344 |
+
global_block_channels,
|
| 345 |
+
global_out_channels,
|
| 346 |
+
strides=self.global_block_strides,
|
| 347 |
+
conv_cfg=self.conv_cfg,
|
| 348 |
+
norm_cfg=self.norm_cfg,
|
| 349 |
+
act_cfg=self.act_cfg,
|
| 350 |
+
align_corners=self.align_corners)
|
| 351 |
+
self.feature_fusion = FeatureFusionModule(
|
| 352 |
+
higher_in_channels,
|
| 353 |
+
lower_in_channels,
|
| 354 |
+
fusion_out_channels,
|
| 355 |
+
conv_cfg=self.conv_cfg,
|
| 356 |
+
norm_cfg=self.norm_cfg,
|
| 357 |
+
act_cfg=self.act_cfg,
|
| 358 |
+
align_corners=self.align_corners)
|
| 359 |
+
|
| 360 |
+
def init_weights(self, pretrained=None):
|
| 361 |
+
for m in self.modules():
|
| 362 |
+
if isinstance(m, nn.Conv2d):
|
| 363 |
+
kaiming_init(m)
|
| 364 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 365 |
+
constant_init(m, 1)
|
| 366 |
+
|
| 367 |
+
def forward(self, x):
|
| 368 |
+
higher_res_features = self.learning_to_downsample(x)
|
| 369 |
+
lower_res_features = self.global_feature_extractor(higher_res_features)
|
| 370 |
+
fusion_output = self.feature_fusion(higher_res_features,
|
| 371 |
+
lower_res_features)
|
| 372 |
+
|
| 373 |
+
outs = [higher_res_features, lower_res_features, fusion_output]
|
| 374 |
+
outs = [outs[i] for i in self.out_indices]
|
| 375 |
+
return tuple(outs)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/hrnet.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
from annotator.uniformer.mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
|
| 3 |
+
kaiming_init)
|
| 4 |
+
from annotator.uniformer.mmcv.runner import load_checkpoint
|
| 5 |
+
from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm
|
| 6 |
+
|
| 7 |
+
from annotator.uniformer.mmseg.ops import Upsample, resize
|
| 8 |
+
from annotator.uniformer.mmseg.utils import get_root_logger
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from .resnet import BasicBlock, Bottleneck
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class HRModule(nn.Module):
|
| 14 |
+
"""High-Resolution Module for HRNet.
|
| 15 |
+
|
| 16 |
+
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
|
| 17 |
+
is in this module.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
num_branches,
|
| 22 |
+
blocks,
|
| 23 |
+
num_blocks,
|
| 24 |
+
in_channels,
|
| 25 |
+
num_channels,
|
| 26 |
+
multiscale_output=True,
|
| 27 |
+
with_cp=False,
|
| 28 |
+
conv_cfg=None,
|
| 29 |
+
norm_cfg=dict(type='BN', requires_grad=True)):
|
| 30 |
+
super(HRModule, self).__init__()
|
| 31 |
+
self._check_branches(num_branches, num_blocks, in_channels,
|
| 32 |
+
num_channels)
|
| 33 |
+
|
| 34 |
+
self.in_channels = in_channels
|
| 35 |
+
self.num_branches = num_branches
|
| 36 |
+
|
| 37 |
+
self.multiscale_output = multiscale_output
|
| 38 |
+
self.norm_cfg = norm_cfg
|
| 39 |
+
self.conv_cfg = conv_cfg
|
| 40 |
+
self.with_cp = with_cp
|
| 41 |
+
self.branches = self._make_branches(num_branches, blocks, num_blocks,
|
| 42 |
+
num_channels)
|
| 43 |
+
self.fuse_layers = self._make_fuse_layers()
|
| 44 |
+
self.relu = nn.ReLU(inplace=False)
|
| 45 |
+
|
| 46 |
+
def _check_branches(self, num_branches, num_blocks, in_channels,
|
| 47 |
+
num_channels):
|
| 48 |
+
"""Check branches configuration."""
|
| 49 |
+
if num_branches != len(num_blocks):
|
| 50 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \
|
| 51 |
+
f'{len(num_blocks)})'
|
| 52 |
+
raise ValueError(error_msg)
|
| 53 |
+
|
| 54 |
+
if num_branches != len(num_channels):
|
| 55 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \
|
| 56 |
+
f'{len(num_channels)})'
|
| 57 |
+
raise ValueError(error_msg)
|
| 58 |
+
|
| 59 |
+
if num_branches != len(in_channels):
|
| 60 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \
|
| 61 |
+
f'{len(in_channels)})'
|
| 62 |
+
raise ValueError(error_msg)
|
| 63 |
+
|
| 64 |
+
def _make_one_branch(self,
|
| 65 |
+
branch_index,
|
| 66 |
+
block,
|
| 67 |
+
num_blocks,
|
| 68 |
+
num_channels,
|
| 69 |
+
stride=1):
|
| 70 |
+
"""Build one branch."""
|
| 71 |
+
downsample = None
|
| 72 |
+
if stride != 1 or \
|
| 73 |
+
self.in_channels[branch_index] != \
|
| 74 |
+
num_channels[branch_index] * block.expansion:
|
| 75 |
+
downsample = nn.Sequential(
|
| 76 |
+
build_conv_layer(
|
| 77 |
+
self.conv_cfg,
|
| 78 |
+
self.in_channels[branch_index],
|
| 79 |
+
num_channels[branch_index] * block.expansion,
|
| 80 |
+
kernel_size=1,
|
| 81 |
+
stride=stride,
|
| 82 |
+
bias=False),
|
| 83 |
+
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
|
| 84 |
+
block.expansion)[1])
|
| 85 |
+
|
| 86 |
+
layers = []
|
| 87 |
+
layers.append(
|
| 88 |
+
block(
|
| 89 |
+
self.in_channels[branch_index],
|
| 90 |
+
num_channels[branch_index],
|
| 91 |
+
stride,
|
| 92 |
+
downsample=downsample,
|
| 93 |
+
with_cp=self.with_cp,
|
| 94 |
+
norm_cfg=self.norm_cfg,
|
| 95 |
+
conv_cfg=self.conv_cfg))
|
| 96 |
+
self.in_channels[branch_index] = \
|
| 97 |
+
num_channels[branch_index] * block.expansion
|
| 98 |
+
for i in range(1, num_blocks[branch_index]):
|
| 99 |
+
layers.append(
|
| 100 |
+
block(
|
| 101 |
+
self.in_channels[branch_index],
|
| 102 |
+
num_channels[branch_index],
|
| 103 |
+
with_cp=self.with_cp,
|
| 104 |
+
norm_cfg=self.norm_cfg,
|
| 105 |
+
conv_cfg=self.conv_cfg))
|
| 106 |
+
|
| 107 |
+
return nn.Sequential(*layers)
|
| 108 |
+
|
| 109 |
+
def _make_branches(self, num_branches, block, num_blocks, num_channels):
|
| 110 |
+
"""Build multiple branch."""
|
| 111 |
+
branches = []
|
| 112 |
+
|
| 113 |
+
for i in range(num_branches):
|
| 114 |
+
branches.append(
|
| 115 |
+
self._make_one_branch(i, block, num_blocks, num_channels))
|
| 116 |
+
|
| 117 |
+
return nn.ModuleList(branches)
|
| 118 |
+
|
| 119 |
+
def _make_fuse_layers(self):
|
| 120 |
+
"""Build fuse layer."""
|
| 121 |
+
if self.num_branches == 1:
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
num_branches = self.num_branches
|
| 125 |
+
in_channels = self.in_channels
|
| 126 |
+
fuse_layers = []
|
| 127 |
+
num_out_branches = num_branches if self.multiscale_output else 1
|
| 128 |
+
for i in range(num_out_branches):
|
| 129 |
+
fuse_layer = []
|
| 130 |
+
for j in range(num_branches):
|
| 131 |
+
if j > i:
|
| 132 |
+
fuse_layer.append(
|
| 133 |
+
nn.Sequential(
|
| 134 |
+
build_conv_layer(
|
| 135 |
+
self.conv_cfg,
|
| 136 |
+
in_channels[j],
|
| 137 |
+
in_channels[i],
|
| 138 |
+
kernel_size=1,
|
| 139 |
+
stride=1,
|
| 140 |
+
padding=0,
|
| 141 |
+
bias=False),
|
| 142 |
+
build_norm_layer(self.norm_cfg, in_channels[i])[1],
|
| 143 |
+
# we set align_corners=False for HRNet
|
| 144 |
+
Upsample(
|
| 145 |
+
scale_factor=2**(j - i),
|
| 146 |
+
mode='bilinear',
|
| 147 |
+
align_corners=False)))
|
| 148 |
+
elif j == i:
|
| 149 |
+
fuse_layer.append(None)
|
| 150 |
+
else:
|
| 151 |
+
conv_downsamples = []
|
| 152 |
+
for k in range(i - j):
|
| 153 |
+
if k == i - j - 1:
|
| 154 |
+
conv_downsamples.append(
|
| 155 |
+
nn.Sequential(
|
| 156 |
+
build_conv_layer(
|
| 157 |
+
self.conv_cfg,
|
| 158 |
+
in_channels[j],
|
| 159 |
+
in_channels[i],
|
| 160 |
+
kernel_size=3,
|
| 161 |
+
stride=2,
|
| 162 |
+
padding=1,
|
| 163 |
+
bias=False),
|
| 164 |
+
build_norm_layer(self.norm_cfg,
|
| 165 |
+
in_channels[i])[1]))
|
| 166 |
+
else:
|
| 167 |
+
conv_downsamples.append(
|
| 168 |
+
nn.Sequential(
|
| 169 |
+
build_conv_layer(
|
| 170 |
+
self.conv_cfg,
|
| 171 |
+
in_channels[j],
|
| 172 |
+
in_channels[j],
|
| 173 |
+
kernel_size=3,
|
| 174 |
+
stride=2,
|
| 175 |
+
padding=1,
|
| 176 |
+
bias=False),
|
| 177 |
+
build_norm_layer(self.norm_cfg,
|
| 178 |
+
in_channels[j])[1],
|
| 179 |
+
nn.ReLU(inplace=False)))
|
| 180 |
+
fuse_layer.append(nn.Sequential(*conv_downsamples))
|
| 181 |
+
fuse_layers.append(nn.ModuleList(fuse_layer))
|
| 182 |
+
|
| 183 |
+
return nn.ModuleList(fuse_layers)
|
| 184 |
+
|
| 185 |
+
def forward(self, x):
|
| 186 |
+
"""Forward function."""
|
| 187 |
+
if self.num_branches == 1:
|
| 188 |
+
return [self.branches[0](x[0])]
|
| 189 |
+
|
| 190 |
+
for i in range(self.num_branches):
|
| 191 |
+
x[i] = self.branches[i](x[i])
|
| 192 |
+
|
| 193 |
+
x_fuse = []
|
| 194 |
+
for i in range(len(self.fuse_layers)):
|
| 195 |
+
y = 0
|
| 196 |
+
for j in range(self.num_branches):
|
| 197 |
+
if i == j:
|
| 198 |
+
y += x[j]
|
| 199 |
+
elif j > i:
|
| 200 |
+
y = y + resize(
|
| 201 |
+
self.fuse_layers[i][j](x[j]),
|
| 202 |
+
size=x[i].shape[2:],
|
| 203 |
+
mode='bilinear',
|
| 204 |
+
align_corners=False)
|
| 205 |
+
else:
|
| 206 |
+
y += self.fuse_layers[i][j](x[j])
|
| 207 |
+
x_fuse.append(self.relu(y))
|
| 208 |
+
return x_fuse
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
@BACKBONES.register_module()
|
| 212 |
+
class HRNet(nn.Module):
|
| 213 |
+
"""HRNet backbone.
|
| 214 |
+
|
| 215 |
+
High-Resolution Representations for Labeling Pixels and Regions
|
| 216 |
+
arXiv: https://arxiv.org/abs/1904.04514
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
extra (dict): detailed configuration for each stage of HRNet.
|
| 220 |
+
in_channels (int): Number of input image channels. Normally 3.
|
| 221 |
+
conv_cfg (dict): dictionary to construct and config conv layer.
|
| 222 |
+
norm_cfg (dict): dictionary to construct and config norm layer.
|
| 223 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 224 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 225 |
+
and its variants only.
|
| 226 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 227 |
+
memory while slowing down the training speed.
|
| 228 |
+
zero_init_residual (bool): whether to use zero init for last norm layer
|
| 229 |
+
in resblocks to let them behave as identity.
|
| 230 |
+
|
| 231 |
+
Example:
|
| 232 |
+
>>> from annotator.uniformer.mmseg.models import HRNet
|
| 233 |
+
>>> import torch
|
| 234 |
+
>>> extra = dict(
|
| 235 |
+
>>> stage1=dict(
|
| 236 |
+
>>> num_modules=1,
|
| 237 |
+
>>> num_branches=1,
|
| 238 |
+
>>> block='BOTTLENECK',
|
| 239 |
+
>>> num_blocks=(4, ),
|
| 240 |
+
>>> num_channels=(64, )),
|
| 241 |
+
>>> stage2=dict(
|
| 242 |
+
>>> num_modules=1,
|
| 243 |
+
>>> num_branches=2,
|
| 244 |
+
>>> block='BASIC',
|
| 245 |
+
>>> num_blocks=(4, 4),
|
| 246 |
+
>>> num_channels=(32, 64)),
|
| 247 |
+
>>> stage3=dict(
|
| 248 |
+
>>> num_modules=4,
|
| 249 |
+
>>> num_branches=3,
|
| 250 |
+
>>> block='BASIC',
|
| 251 |
+
>>> num_blocks=(4, 4, 4),
|
| 252 |
+
>>> num_channels=(32, 64, 128)),
|
| 253 |
+
>>> stage4=dict(
|
| 254 |
+
>>> num_modules=3,
|
| 255 |
+
>>> num_branches=4,
|
| 256 |
+
>>> block='BASIC',
|
| 257 |
+
>>> num_blocks=(4, 4, 4, 4),
|
| 258 |
+
>>> num_channels=(32, 64, 128, 256)))
|
| 259 |
+
>>> self = HRNet(extra, in_channels=1)
|
| 260 |
+
>>> self.eval()
|
| 261 |
+
>>> inputs = torch.rand(1, 1, 32, 32)
|
| 262 |
+
>>> level_outputs = self.forward(inputs)
|
| 263 |
+
>>> for level_out in level_outputs:
|
| 264 |
+
... print(tuple(level_out.shape))
|
| 265 |
+
(1, 32, 8, 8)
|
| 266 |
+
(1, 64, 4, 4)
|
| 267 |
+
(1, 128, 2, 2)
|
| 268 |
+
(1, 256, 1, 1)
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
|
| 272 |
+
|
| 273 |
+
def __init__(self,
|
| 274 |
+
extra,
|
| 275 |
+
in_channels=3,
|
| 276 |
+
conv_cfg=None,
|
| 277 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 278 |
+
norm_eval=False,
|
| 279 |
+
with_cp=False,
|
| 280 |
+
zero_init_residual=False):
|
| 281 |
+
super(HRNet, self).__init__()
|
| 282 |
+
self.extra = extra
|
| 283 |
+
self.conv_cfg = conv_cfg
|
| 284 |
+
self.norm_cfg = norm_cfg
|
| 285 |
+
self.norm_eval = norm_eval
|
| 286 |
+
self.with_cp = with_cp
|
| 287 |
+
self.zero_init_residual = zero_init_residual
|
| 288 |
+
|
| 289 |
+
# stem net
|
| 290 |
+
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
|
| 291 |
+
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
|
| 292 |
+
|
| 293 |
+
self.conv1 = build_conv_layer(
|
| 294 |
+
self.conv_cfg,
|
| 295 |
+
in_channels,
|
| 296 |
+
64,
|
| 297 |
+
kernel_size=3,
|
| 298 |
+
stride=2,
|
| 299 |
+
padding=1,
|
| 300 |
+
bias=False)
|
| 301 |
+
|
| 302 |
+
self.add_module(self.norm1_name, norm1)
|
| 303 |
+
self.conv2 = build_conv_layer(
|
| 304 |
+
self.conv_cfg,
|
| 305 |
+
64,
|
| 306 |
+
64,
|
| 307 |
+
kernel_size=3,
|
| 308 |
+
stride=2,
|
| 309 |
+
padding=1,
|
| 310 |
+
bias=False)
|
| 311 |
+
|
| 312 |
+
self.add_module(self.norm2_name, norm2)
|
| 313 |
+
self.relu = nn.ReLU(inplace=True)
|
| 314 |
+
|
| 315 |
+
# stage 1
|
| 316 |
+
self.stage1_cfg = self.extra['stage1']
|
| 317 |
+
num_channels = self.stage1_cfg['num_channels'][0]
|
| 318 |
+
block_type = self.stage1_cfg['block']
|
| 319 |
+
num_blocks = self.stage1_cfg['num_blocks'][0]
|
| 320 |
+
|
| 321 |
+
block = self.blocks_dict[block_type]
|
| 322 |
+
stage1_out_channels = num_channels * block.expansion
|
| 323 |
+
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
|
| 324 |
+
|
| 325 |
+
# stage 2
|
| 326 |
+
self.stage2_cfg = self.extra['stage2']
|
| 327 |
+
num_channels = self.stage2_cfg['num_channels']
|
| 328 |
+
block_type = self.stage2_cfg['block']
|
| 329 |
+
|
| 330 |
+
block = self.blocks_dict[block_type]
|
| 331 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 332 |
+
self.transition1 = self._make_transition_layer([stage1_out_channels],
|
| 333 |
+
num_channels)
|
| 334 |
+
self.stage2, pre_stage_channels = self._make_stage(
|
| 335 |
+
self.stage2_cfg, num_channels)
|
| 336 |
+
|
| 337 |
+
# stage 3
|
| 338 |
+
self.stage3_cfg = self.extra['stage3']
|
| 339 |
+
num_channels = self.stage3_cfg['num_channels']
|
| 340 |
+
block_type = self.stage3_cfg['block']
|
| 341 |
+
|
| 342 |
+
block = self.blocks_dict[block_type]
|
| 343 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 344 |
+
self.transition2 = self._make_transition_layer(pre_stage_channels,
|
| 345 |
+
num_channels)
|
| 346 |
+
self.stage3, pre_stage_channels = self._make_stage(
|
| 347 |
+
self.stage3_cfg, num_channels)
|
| 348 |
+
|
| 349 |
+
# stage 4
|
| 350 |
+
self.stage4_cfg = self.extra['stage4']
|
| 351 |
+
num_channels = self.stage4_cfg['num_channels']
|
| 352 |
+
block_type = self.stage4_cfg['block']
|
| 353 |
+
|
| 354 |
+
block = self.blocks_dict[block_type]
|
| 355 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 356 |
+
self.transition3 = self._make_transition_layer(pre_stage_channels,
|
| 357 |
+
num_channels)
|
| 358 |
+
self.stage4, pre_stage_channels = self._make_stage(
|
| 359 |
+
self.stage4_cfg, num_channels)
|
| 360 |
+
|
| 361 |
+
@property
|
| 362 |
+
def norm1(self):
|
| 363 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 364 |
+
return getattr(self, self.norm1_name)
|
| 365 |
+
|
| 366 |
+
@property
|
| 367 |
+
def norm2(self):
|
| 368 |
+
"""nn.Module: the normalization layer named "norm2" """
|
| 369 |
+
return getattr(self, self.norm2_name)
|
| 370 |
+
|
| 371 |
+
def _make_transition_layer(self, num_channels_pre_layer,
|
| 372 |
+
num_channels_cur_layer):
|
| 373 |
+
"""Make transition layer."""
|
| 374 |
+
num_branches_cur = len(num_channels_cur_layer)
|
| 375 |
+
num_branches_pre = len(num_channels_pre_layer)
|
| 376 |
+
|
| 377 |
+
transition_layers = []
|
| 378 |
+
for i in range(num_branches_cur):
|
| 379 |
+
if i < num_branches_pre:
|
| 380 |
+
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
|
| 381 |
+
transition_layers.append(
|
| 382 |
+
nn.Sequential(
|
| 383 |
+
build_conv_layer(
|
| 384 |
+
self.conv_cfg,
|
| 385 |
+
num_channels_pre_layer[i],
|
| 386 |
+
num_channels_cur_layer[i],
|
| 387 |
+
kernel_size=3,
|
| 388 |
+
stride=1,
|
| 389 |
+
padding=1,
|
| 390 |
+
bias=False),
|
| 391 |
+
build_norm_layer(self.norm_cfg,
|
| 392 |
+
num_channels_cur_layer[i])[1],
|
| 393 |
+
nn.ReLU(inplace=True)))
|
| 394 |
+
else:
|
| 395 |
+
transition_layers.append(None)
|
| 396 |
+
else:
|
| 397 |
+
conv_downsamples = []
|
| 398 |
+
for j in range(i + 1 - num_branches_pre):
|
| 399 |
+
in_channels = num_channels_pre_layer[-1]
|
| 400 |
+
out_channels = num_channels_cur_layer[i] \
|
| 401 |
+
if j == i - num_branches_pre else in_channels
|
| 402 |
+
conv_downsamples.append(
|
| 403 |
+
nn.Sequential(
|
| 404 |
+
build_conv_layer(
|
| 405 |
+
self.conv_cfg,
|
| 406 |
+
in_channels,
|
| 407 |
+
out_channels,
|
| 408 |
+
kernel_size=3,
|
| 409 |
+
stride=2,
|
| 410 |
+
padding=1,
|
| 411 |
+
bias=False),
|
| 412 |
+
build_norm_layer(self.norm_cfg, out_channels)[1],
|
| 413 |
+
nn.ReLU(inplace=True)))
|
| 414 |
+
transition_layers.append(nn.Sequential(*conv_downsamples))
|
| 415 |
+
|
| 416 |
+
return nn.ModuleList(transition_layers)
|
| 417 |
+
|
| 418 |
+
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
|
| 419 |
+
"""Make each layer."""
|
| 420 |
+
downsample = None
|
| 421 |
+
if stride != 1 or inplanes != planes * block.expansion:
|
| 422 |
+
downsample = nn.Sequential(
|
| 423 |
+
build_conv_layer(
|
| 424 |
+
self.conv_cfg,
|
| 425 |
+
inplanes,
|
| 426 |
+
planes * block.expansion,
|
| 427 |
+
kernel_size=1,
|
| 428 |
+
stride=stride,
|
| 429 |
+
bias=False),
|
| 430 |
+
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
|
| 431 |
+
|
| 432 |
+
layers = []
|
| 433 |
+
layers.append(
|
| 434 |
+
block(
|
| 435 |
+
inplanes,
|
| 436 |
+
planes,
|
| 437 |
+
stride,
|
| 438 |
+
downsample=downsample,
|
| 439 |
+
with_cp=self.with_cp,
|
| 440 |
+
norm_cfg=self.norm_cfg,
|
| 441 |
+
conv_cfg=self.conv_cfg))
|
| 442 |
+
inplanes = planes * block.expansion
|
| 443 |
+
for i in range(1, blocks):
|
| 444 |
+
layers.append(
|
| 445 |
+
block(
|
| 446 |
+
inplanes,
|
| 447 |
+
planes,
|
| 448 |
+
with_cp=self.with_cp,
|
| 449 |
+
norm_cfg=self.norm_cfg,
|
| 450 |
+
conv_cfg=self.conv_cfg))
|
| 451 |
+
|
| 452 |
+
return nn.Sequential(*layers)
|
| 453 |
+
|
| 454 |
+
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
|
| 455 |
+
"""Make each stage."""
|
| 456 |
+
num_modules = layer_config['num_modules']
|
| 457 |
+
num_branches = layer_config['num_branches']
|
| 458 |
+
num_blocks = layer_config['num_blocks']
|
| 459 |
+
num_channels = layer_config['num_channels']
|
| 460 |
+
block = self.blocks_dict[layer_config['block']]
|
| 461 |
+
|
| 462 |
+
hr_modules = []
|
| 463 |
+
for i in range(num_modules):
|
| 464 |
+
# multi_scale_output is only used for the last module
|
| 465 |
+
if not multiscale_output and i == num_modules - 1:
|
| 466 |
+
reset_multiscale_output = False
|
| 467 |
+
else:
|
| 468 |
+
reset_multiscale_output = True
|
| 469 |
+
|
| 470 |
+
hr_modules.append(
|
| 471 |
+
HRModule(
|
| 472 |
+
num_branches,
|
| 473 |
+
block,
|
| 474 |
+
num_blocks,
|
| 475 |
+
in_channels,
|
| 476 |
+
num_channels,
|
| 477 |
+
reset_multiscale_output,
|
| 478 |
+
with_cp=self.with_cp,
|
| 479 |
+
norm_cfg=self.norm_cfg,
|
| 480 |
+
conv_cfg=self.conv_cfg))
|
| 481 |
+
|
| 482 |
+
return nn.Sequential(*hr_modules), in_channels
|
| 483 |
+
|
| 484 |
+
def init_weights(self, pretrained=None):
|
| 485 |
+
"""Initialize the weights in backbone.
|
| 486 |
+
|
| 487 |
+
Args:
|
| 488 |
+
pretrained (str, optional): Path to pre-trained weights.
|
| 489 |
+
Defaults to None.
|
| 490 |
+
"""
|
| 491 |
+
if isinstance(pretrained, str):
|
| 492 |
+
logger = get_root_logger()
|
| 493 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 494 |
+
elif pretrained is None:
|
| 495 |
+
for m in self.modules():
|
| 496 |
+
if isinstance(m, nn.Conv2d):
|
| 497 |
+
kaiming_init(m)
|
| 498 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 499 |
+
constant_init(m, 1)
|
| 500 |
+
|
| 501 |
+
if self.zero_init_residual:
|
| 502 |
+
for m in self.modules():
|
| 503 |
+
if isinstance(m, Bottleneck):
|
| 504 |
+
constant_init(m.norm3, 0)
|
| 505 |
+
elif isinstance(m, BasicBlock):
|
| 506 |
+
constant_init(m.norm2, 0)
|
| 507 |
+
else:
|
| 508 |
+
raise TypeError('pretrained must be a str or None')
|
| 509 |
+
|
| 510 |
+
def forward(self, x):
|
| 511 |
+
"""Forward function."""
|
| 512 |
+
|
| 513 |
+
x = self.conv1(x)
|
| 514 |
+
x = self.norm1(x)
|
| 515 |
+
x = self.relu(x)
|
| 516 |
+
x = self.conv2(x)
|
| 517 |
+
x = self.norm2(x)
|
| 518 |
+
x = self.relu(x)
|
| 519 |
+
x = self.layer1(x)
|
| 520 |
+
|
| 521 |
+
x_list = []
|
| 522 |
+
for i in range(self.stage2_cfg['num_branches']):
|
| 523 |
+
if self.transition1[i] is not None:
|
| 524 |
+
x_list.append(self.transition1[i](x))
|
| 525 |
+
else:
|
| 526 |
+
x_list.append(x)
|
| 527 |
+
y_list = self.stage2(x_list)
|
| 528 |
+
|
| 529 |
+
x_list = []
|
| 530 |
+
for i in range(self.stage3_cfg['num_branches']):
|
| 531 |
+
if self.transition2[i] is not None:
|
| 532 |
+
x_list.append(self.transition2[i](y_list[-1]))
|
| 533 |
+
else:
|
| 534 |
+
x_list.append(y_list[i])
|
| 535 |
+
y_list = self.stage3(x_list)
|
| 536 |
+
|
| 537 |
+
x_list = []
|
| 538 |
+
for i in range(self.stage4_cfg['num_branches']):
|
| 539 |
+
if self.transition3[i] is not None:
|
| 540 |
+
x_list.append(self.transition3[i](y_list[-1]))
|
| 541 |
+
else:
|
| 542 |
+
x_list.append(y_list[i])
|
| 543 |
+
y_list = self.stage4(x_list)
|
| 544 |
+
|
| 545 |
+
return y_list
|
| 546 |
+
|
| 547 |
+
def train(self, mode=True):
|
| 548 |
+
"""Convert the model into training mode will keeping the normalization
|
| 549 |
+
layer freezed."""
|
| 550 |
+
super(HRNet, self).train(mode)
|
| 551 |
+
if mode and self.norm_eval:
|
| 552 |
+
for m in self.modules():
|
| 553 |
+
# trick: eval have effect on BatchNorm only
|
| 554 |
+
if isinstance(m, _BatchNorm):
|
| 555 |
+
m.eval()
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/mobilenet_v2.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from annotator.uniformer.mmcv.cnn import ConvModule, constant_init, kaiming_init
|
| 5 |
+
from annotator.uniformer.mmcv.runner import load_checkpoint
|
| 6 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 7 |
+
|
| 8 |
+
from ..builder import BACKBONES
|
| 9 |
+
from ..utils import InvertedResidual, make_divisible
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@BACKBONES.register_module()
|
| 13 |
+
class MobileNetV2(nn.Module):
|
| 14 |
+
"""MobileNetV2 backbone.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
widen_factor (float): Width multiplier, multiply number of
|
| 18 |
+
channels in each layer by this amount. Default: 1.0.
|
| 19 |
+
strides (Sequence[int], optional): Strides of the first block of each
|
| 20 |
+
layer. If not specified, default config in ``arch_setting`` will
|
| 21 |
+
be used.
|
| 22 |
+
dilations (Sequence[int]): Dilation of each layer.
|
| 23 |
+
out_indices (None or Sequence[int]): Output from which stages.
|
| 24 |
+
Default: (7, ).
|
| 25 |
+
frozen_stages (int): Stages to be frozen (all param fixed).
|
| 26 |
+
Default: -1, which means not freezing any parameters.
|
| 27 |
+
conv_cfg (dict): Config dict for convolution layer.
|
| 28 |
+
Default: None, which means using conv2d.
|
| 29 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 30 |
+
Default: dict(type='BN').
|
| 31 |
+
act_cfg (dict): Config dict for activation layer.
|
| 32 |
+
Default: dict(type='ReLU6').
|
| 33 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 34 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 35 |
+
and its variants only. Default: False.
|
| 36 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 37 |
+
memory while slowing down the training speed. Default: False.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
# Parameters to build layers. 3 parameters are needed to construct a
|
| 41 |
+
# layer, from left to right: expand_ratio, channel, num_blocks.
|
| 42 |
+
arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4],
|
| 43 |
+
[6, 96, 3], [6, 160, 3], [6, 320, 1]]
|
| 44 |
+
|
| 45 |
+
def __init__(self,
|
| 46 |
+
widen_factor=1.,
|
| 47 |
+
strides=(1, 2, 2, 2, 1, 2, 1),
|
| 48 |
+
dilations=(1, 1, 1, 1, 1, 1, 1),
|
| 49 |
+
out_indices=(1, 2, 4, 6),
|
| 50 |
+
frozen_stages=-1,
|
| 51 |
+
conv_cfg=None,
|
| 52 |
+
norm_cfg=dict(type='BN'),
|
| 53 |
+
act_cfg=dict(type='ReLU6'),
|
| 54 |
+
norm_eval=False,
|
| 55 |
+
with_cp=False):
|
| 56 |
+
super(MobileNetV2, self).__init__()
|
| 57 |
+
self.widen_factor = widen_factor
|
| 58 |
+
self.strides = strides
|
| 59 |
+
self.dilations = dilations
|
| 60 |
+
assert len(strides) == len(dilations) == len(self.arch_settings)
|
| 61 |
+
self.out_indices = out_indices
|
| 62 |
+
for index in out_indices:
|
| 63 |
+
if index not in range(0, 7):
|
| 64 |
+
raise ValueError('the item in out_indices must in '
|
| 65 |
+
f'range(0, 8). But received {index}')
|
| 66 |
+
|
| 67 |
+
if frozen_stages not in range(-1, 7):
|
| 68 |
+
raise ValueError('frozen_stages must be in range(-1, 7). '
|
| 69 |
+
f'But received {frozen_stages}')
|
| 70 |
+
self.out_indices = out_indices
|
| 71 |
+
self.frozen_stages = frozen_stages
|
| 72 |
+
self.conv_cfg = conv_cfg
|
| 73 |
+
self.norm_cfg = norm_cfg
|
| 74 |
+
self.act_cfg = act_cfg
|
| 75 |
+
self.norm_eval = norm_eval
|
| 76 |
+
self.with_cp = with_cp
|
| 77 |
+
|
| 78 |
+
self.in_channels = make_divisible(32 * widen_factor, 8)
|
| 79 |
+
|
| 80 |
+
self.conv1 = ConvModule(
|
| 81 |
+
in_channels=3,
|
| 82 |
+
out_channels=self.in_channels,
|
| 83 |
+
kernel_size=3,
|
| 84 |
+
stride=2,
|
| 85 |
+
padding=1,
|
| 86 |
+
conv_cfg=self.conv_cfg,
|
| 87 |
+
norm_cfg=self.norm_cfg,
|
| 88 |
+
act_cfg=self.act_cfg)
|
| 89 |
+
|
| 90 |
+
self.layers = []
|
| 91 |
+
|
| 92 |
+
for i, layer_cfg in enumerate(self.arch_settings):
|
| 93 |
+
expand_ratio, channel, num_blocks = layer_cfg
|
| 94 |
+
stride = self.strides[i]
|
| 95 |
+
dilation = self.dilations[i]
|
| 96 |
+
out_channels = make_divisible(channel * widen_factor, 8)
|
| 97 |
+
inverted_res_layer = self.make_layer(
|
| 98 |
+
out_channels=out_channels,
|
| 99 |
+
num_blocks=num_blocks,
|
| 100 |
+
stride=stride,
|
| 101 |
+
dilation=dilation,
|
| 102 |
+
expand_ratio=expand_ratio)
|
| 103 |
+
layer_name = f'layer{i + 1}'
|
| 104 |
+
self.add_module(layer_name, inverted_res_layer)
|
| 105 |
+
self.layers.append(layer_name)
|
| 106 |
+
|
| 107 |
+
def make_layer(self, out_channels, num_blocks, stride, dilation,
|
| 108 |
+
expand_ratio):
|
| 109 |
+
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
out_channels (int): out_channels of block.
|
| 113 |
+
num_blocks (int): Number of blocks.
|
| 114 |
+
stride (int): Stride of the first block.
|
| 115 |
+
dilation (int): Dilation of the first block.
|
| 116 |
+
expand_ratio (int): Expand the number of channels of the
|
| 117 |
+
hidden layer in InvertedResidual by this ratio.
|
| 118 |
+
"""
|
| 119 |
+
layers = []
|
| 120 |
+
for i in range(num_blocks):
|
| 121 |
+
layers.append(
|
| 122 |
+
InvertedResidual(
|
| 123 |
+
self.in_channels,
|
| 124 |
+
out_channels,
|
| 125 |
+
stride if i == 0 else 1,
|
| 126 |
+
expand_ratio=expand_ratio,
|
| 127 |
+
dilation=dilation if i == 0 else 1,
|
| 128 |
+
conv_cfg=self.conv_cfg,
|
| 129 |
+
norm_cfg=self.norm_cfg,
|
| 130 |
+
act_cfg=self.act_cfg,
|
| 131 |
+
with_cp=self.with_cp))
|
| 132 |
+
self.in_channels = out_channels
|
| 133 |
+
|
| 134 |
+
return nn.Sequential(*layers)
|
| 135 |
+
|
| 136 |
+
def init_weights(self, pretrained=None):
|
| 137 |
+
if isinstance(pretrained, str):
|
| 138 |
+
logger = logging.getLogger()
|
| 139 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 140 |
+
elif pretrained is None:
|
| 141 |
+
for m in self.modules():
|
| 142 |
+
if isinstance(m, nn.Conv2d):
|
| 143 |
+
kaiming_init(m)
|
| 144 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 145 |
+
constant_init(m, 1)
|
| 146 |
+
else:
|
| 147 |
+
raise TypeError('pretrained must be a str or None')
|
| 148 |
+
|
| 149 |
+
def forward(self, x):
|
| 150 |
+
x = self.conv1(x)
|
| 151 |
+
|
| 152 |
+
outs = []
|
| 153 |
+
for i, layer_name in enumerate(self.layers):
|
| 154 |
+
layer = getattr(self, layer_name)
|
| 155 |
+
x = layer(x)
|
| 156 |
+
if i in self.out_indices:
|
| 157 |
+
outs.append(x)
|
| 158 |
+
|
| 159 |
+
if len(outs) == 1:
|
| 160 |
+
return outs[0]
|
| 161 |
+
else:
|
| 162 |
+
return tuple(outs)
|
| 163 |
+
|
| 164 |
+
def _freeze_stages(self):
|
| 165 |
+
if self.frozen_stages >= 0:
|
| 166 |
+
for param in self.conv1.parameters():
|
| 167 |
+
param.requires_grad = False
|
| 168 |
+
for i in range(1, self.frozen_stages + 1):
|
| 169 |
+
layer = getattr(self, f'layer{i}')
|
| 170 |
+
layer.eval()
|
| 171 |
+
for param in layer.parameters():
|
| 172 |
+
param.requires_grad = False
|
| 173 |
+
|
| 174 |
+
def train(self, mode=True):
|
| 175 |
+
super(MobileNetV2, self).train(mode)
|
| 176 |
+
self._freeze_stages()
|
| 177 |
+
if mode and self.norm_eval:
|
| 178 |
+
for m in self.modules():
|
| 179 |
+
if isinstance(m, _BatchNorm):
|
| 180 |
+
m.eval()
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/mobilenet_v3.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import annotator.uniformer.mmcv as mmcv
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from annotator.uniformer.mmcv.cnn import ConvModule, constant_init, kaiming_init
|
| 6 |
+
from annotator.uniformer.mmcv.cnn.bricks import Conv2dAdaptivePadding
|
| 7 |
+
from annotator.uniformer.mmcv.runner import load_checkpoint
|
| 8 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
| 9 |
+
|
| 10 |
+
from ..builder import BACKBONES
|
| 11 |
+
from ..utils import InvertedResidualV3 as InvertedResidual
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@BACKBONES.register_module()
|
| 15 |
+
class MobileNetV3(nn.Module):
|
| 16 |
+
"""MobileNetV3 backbone.
|
| 17 |
+
|
| 18 |
+
This backbone is the improved implementation of `Searching for MobileNetV3
|
| 19 |
+
<https://ieeexplore.ieee.org/document/9008835>`_.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
arch (str): Architecture of mobilnetv3, from {'small', 'large'}.
|
| 23 |
+
Default: 'small'.
|
| 24 |
+
conv_cfg (dict): Config dict for convolution layer.
|
| 25 |
+
Default: None, which means using conv2d.
|
| 26 |
+
norm_cfg (dict): Config dict for normalization layer.
|
| 27 |
+
Default: dict(type='BN').
|
| 28 |
+
out_indices (tuple[int]): Output from which layer.
|
| 29 |
+
Default: (0, 1, 12).
|
| 30 |
+
frozen_stages (int): Stages to be frozen (all param fixed).
|
| 31 |
+
Default: -1, which means not freezing any parameters.
|
| 32 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 33 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 34 |
+
and its variants only. Default: False.
|
| 35 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save
|
| 36 |
+
some memory while slowing down the training speed.
|
| 37 |
+
Default: False.
|
| 38 |
+
"""
|
| 39 |
+
# Parameters to build each block:
|
| 40 |
+
# [kernel size, mid channels, out channels, with_se, act type, stride]
|
| 41 |
+
arch_settings = {
|
| 42 |
+
'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4
|
| 43 |
+
[3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8
|
| 44 |
+
[3, 88, 24, False, 'ReLU', 1],
|
| 45 |
+
[5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16
|
| 46 |
+
[5, 240, 40, True, 'HSwish', 1],
|
| 47 |
+
[5, 240, 40, True, 'HSwish', 1],
|
| 48 |
+
[5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16
|
| 49 |
+
[5, 144, 48, True, 'HSwish', 1],
|
| 50 |
+
[5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32
|
| 51 |
+
[5, 576, 96, True, 'HSwish', 1],
|
| 52 |
+
[5, 576, 96, True, 'HSwish', 1]],
|
| 53 |
+
'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2
|
| 54 |
+
[3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4
|
| 55 |
+
[3, 72, 24, False, 'ReLU', 1],
|
| 56 |
+
[5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8
|
| 57 |
+
[5, 120, 40, True, 'ReLU', 1],
|
| 58 |
+
[5, 120, 40, True, 'ReLU', 1],
|
| 59 |
+
[3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16
|
| 60 |
+
[3, 200, 80, False, 'HSwish', 1],
|
| 61 |
+
[3, 184, 80, False, 'HSwish', 1],
|
| 62 |
+
[3, 184, 80, False, 'HSwish', 1],
|
| 63 |
+
[3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16
|
| 64 |
+
[3, 672, 112, True, 'HSwish', 1],
|
| 65 |
+
[5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32
|
| 66 |
+
[5, 960, 160, True, 'HSwish', 1],
|
| 67 |
+
[5, 960, 160, True, 'HSwish', 1]]
|
| 68 |
+
} # yapf: disable
|
| 69 |
+
|
| 70 |
+
def __init__(self,
|
| 71 |
+
arch='small',
|
| 72 |
+
conv_cfg=None,
|
| 73 |
+
norm_cfg=dict(type='BN'),
|
| 74 |
+
out_indices=(0, 1, 12),
|
| 75 |
+
frozen_stages=-1,
|
| 76 |
+
reduction_factor=1,
|
| 77 |
+
norm_eval=False,
|
| 78 |
+
with_cp=False):
|
| 79 |
+
super(MobileNetV3, self).__init__()
|
| 80 |
+
assert arch in self.arch_settings
|
| 81 |
+
assert isinstance(reduction_factor, int) and reduction_factor > 0
|
| 82 |
+
assert mmcv.is_tuple_of(out_indices, int)
|
| 83 |
+
for index in out_indices:
|
| 84 |
+
if index not in range(0, len(self.arch_settings[arch]) + 2):
|
| 85 |
+
raise ValueError(
|
| 86 |
+
'the item in out_indices must in '
|
| 87 |
+
f'range(0, {len(self.arch_settings[arch])+2}). '
|
| 88 |
+
f'But received {index}')
|
| 89 |
+
|
| 90 |
+
if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2):
|
| 91 |
+
raise ValueError('frozen_stages must be in range(-1, '
|
| 92 |
+
f'{len(self.arch_settings[arch])+2}). '
|
| 93 |
+
f'But received {frozen_stages}')
|
| 94 |
+
self.arch = arch
|
| 95 |
+
self.conv_cfg = conv_cfg
|
| 96 |
+
self.norm_cfg = norm_cfg
|
| 97 |
+
self.out_indices = out_indices
|
| 98 |
+
self.frozen_stages = frozen_stages
|
| 99 |
+
self.reduction_factor = reduction_factor
|
| 100 |
+
self.norm_eval = norm_eval
|
| 101 |
+
self.with_cp = with_cp
|
| 102 |
+
self.layers = self._make_layer()
|
| 103 |
+
|
| 104 |
+
def _make_layer(self):
|
| 105 |
+
layers = []
|
| 106 |
+
|
| 107 |
+
# build the first layer (layer0)
|
| 108 |
+
in_channels = 16
|
| 109 |
+
layer = ConvModule(
|
| 110 |
+
in_channels=3,
|
| 111 |
+
out_channels=in_channels,
|
| 112 |
+
kernel_size=3,
|
| 113 |
+
stride=2,
|
| 114 |
+
padding=1,
|
| 115 |
+
conv_cfg=dict(type='Conv2dAdaptivePadding'),
|
| 116 |
+
norm_cfg=self.norm_cfg,
|
| 117 |
+
act_cfg=dict(type='HSwish'))
|
| 118 |
+
self.add_module('layer0', layer)
|
| 119 |
+
layers.append('layer0')
|
| 120 |
+
|
| 121 |
+
layer_setting = self.arch_settings[self.arch]
|
| 122 |
+
for i, params in enumerate(layer_setting):
|
| 123 |
+
(kernel_size, mid_channels, out_channels, with_se, act,
|
| 124 |
+
stride) = params
|
| 125 |
+
|
| 126 |
+
if self.arch == 'large' and i >= 12 or self.arch == 'small' and \
|
| 127 |
+
i >= 8:
|
| 128 |
+
mid_channels = mid_channels // self.reduction_factor
|
| 129 |
+
out_channels = out_channels // self.reduction_factor
|
| 130 |
+
|
| 131 |
+
if with_se:
|
| 132 |
+
se_cfg = dict(
|
| 133 |
+
channels=mid_channels,
|
| 134 |
+
ratio=4,
|
| 135 |
+
act_cfg=(dict(type='ReLU'),
|
| 136 |
+
dict(type='HSigmoid', bias=3.0, divisor=6.0)))
|
| 137 |
+
else:
|
| 138 |
+
se_cfg = None
|
| 139 |
+
|
| 140 |
+
layer = InvertedResidual(
|
| 141 |
+
in_channels=in_channels,
|
| 142 |
+
out_channels=out_channels,
|
| 143 |
+
mid_channels=mid_channels,
|
| 144 |
+
kernel_size=kernel_size,
|
| 145 |
+
stride=stride,
|
| 146 |
+
se_cfg=se_cfg,
|
| 147 |
+
with_expand_conv=(in_channels != mid_channels),
|
| 148 |
+
conv_cfg=self.conv_cfg,
|
| 149 |
+
norm_cfg=self.norm_cfg,
|
| 150 |
+
act_cfg=dict(type=act),
|
| 151 |
+
with_cp=self.with_cp)
|
| 152 |
+
in_channels = out_channels
|
| 153 |
+
layer_name = 'layer{}'.format(i + 1)
|
| 154 |
+
self.add_module(layer_name, layer)
|
| 155 |
+
layers.append(layer_name)
|
| 156 |
+
|
| 157 |
+
# build the last layer
|
| 158 |
+
# block5 layer12 os=32 for small model
|
| 159 |
+
# block6 layer16 os=32 for large model
|
| 160 |
+
layer = ConvModule(
|
| 161 |
+
in_channels=in_channels,
|
| 162 |
+
out_channels=576 if self.arch == 'small' else 960,
|
| 163 |
+
kernel_size=1,
|
| 164 |
+
stride=1,
|
| 165 |
+
dilation=4,
|
| 166 |
+
padding=0,
|
| 167 |
+
conv_cfg=self.conv_cfg,
|
| 168 |
+
norm_cfg=self.norm_cfg,
|
| 169 |
+
act_cfg=dict(type='HSwish'))
|
| 170 |
+
layer_name = 'layer{}'.format(len(layer_setting) + 1)
|
| 171 |
+
self.add_module(layer_name, layer)
|
| 172 |
+
layers.append(layer_name)
|
| 173 |
+
|
| 174 |
+
# next, convert backbone MobileNetV3 to a semantic segmentation version
|
| 175 |
+
if self.arch == 'small':
|
| 176 |
+
self.layer4.depthwise_conv.conv.stride = (1, 1)
|
| 177 |
+
self.layer9.depthwise_conv.conv.stride = (1, 1)
|
| 178 |
+
for i in range(4, len(layers)):
|
| 179 |
+
layer = getattr(self, layers[i])
|
| 180 |
+
if isinstance(layer, InvertedResidual):
|
| 181 |
+
modified_module = layer.depthwise_conv.conv
|
| 182 |
+
else:
|
| 183 |
+
modified_module = layer.conv
|
| 184 |
+
|
| 185 |
+
if i < 9:
|
| 186 |
+
modified_module.dilation = (2, 2)
|
| 187 |
+
pad = 2
|
| 188 |
+
else:
|
| 189 |
+
modified_module.dilation = (4, 4)
|
| 190 |
+
pad = 4
|
| 191 |
+
|
| 192 |
+
if not isinstance(modified_module, Conv2dAdaptivePadding):
|
| 193 |
+
# Adjust padding
|
| 194 |
+
pad *= (modified_module.kernel_size[0] - 1) // 2
|
| 195 |
+
modified_module.padding = (pad, pad)
|
| 196 |
+
else:
|
| 197 |
+
self.layer7.depthwise_conv.conv.stride = (1, 1)
|
| 198 |
+
self.layer13.depthwise_conv.conv.stride = (1, 1)
|
| 199 |
+
for i in range(7, len(layers)):
|
| 200 |
+
layer = getattr(self, layers[i])
|
| 201 |
+
if isinstance(layer, InvertedResidual):
|
| 202 |
+
modified_module = layer.depthwise_conv.conv
|
| 203 |
+
else:
|
| 204 |
+
modified_module = layer.conv
|
| 205 |
+
|
| 206 |
+
if i < 13:
|
| 207 |
+
modified_module.dilation = (2, 2)
|
| 208 |
+
pad = 2
|
| 209 |
+
else:
|
| 210 |
+
modified_module.dilation = (4, 4)
|
| 211 |
+
pad = 4
|
| 212 |
+
|
| 213 |
+
if not isinstance(modified_module, Conv2dAdaptivePadding):
|
| 214 |
+
# Adjust padding
|
| 215 |
+
pad *= (modified_module.kernel_size[0] - 1) // 2
|
| 216 |
+
modified_module.padding = (pad, pad)
|
| 217 |
+
|
| 218 |
+
return layers
|
| 219 |
+
|
| 220 |
+
def init_weights(self, pretrained=None):
|
| 221 |
+
if isinstance(pretrained, str):
|
| 222 |
+
logger = logging.getLogger()
|
| 223 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 224 |
+
elif pretrained is None:
|
| 225 |
+
for m in self.modules():
|
| 226 |
+
if isinstance(m, nn.Conv2d):
|
| 227 |
+
kaiming_init(m)
|
| 228 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 229 |
+
constant_init(m, 1)
|
| 230 |
+
else:
|
| 231 |
+
raise TypeError('pretrained must be a str or None')
|
| 232 |
+
|
| 233 |
+
def forward(self, x):
|
| 234 |
+
outs = []
|
| 235 |
+
for i, layer_name in enumerate(self.layers):
|
| 236 |
+
layer = getattr(self, layer_name)
|
| 237 |
+
x = layer(x)
|
| 238 |
+
if i in self.out_indices:
|
| 239 |
+
outs.append(x)
|
| 240 |
+
return outs
|
| 241 |
+
|
| 242 |
+
def _freeze_stages(self):
|
| 243 |
+
for i in range(self.frozen_stages + 1):
|
| 244 |
+
layer = getattr(self, f'layer{i}')
|
| 245 |
+
layer.eval()
|
| 246 |
+
for param in layer.parameters():
|
| 247 |
+
param.requires_grad = False
|
| 248 |
+
|
| 249 |
+
def train(self, mode=True):
|
| 250 |
+
super(MobileNetV3, self).train(mode)
|
| 251 |
+
self._freeze_stages()
|
| 252 |
+
if mode and self.norm_eval:
|
| 253 |
+
for m in self.modules():
|
| 254 |
+
if isinstance(m, _BatchNorm):
|
| 255 |
+
m.eval()
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnest.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import torch.utils.checkpoint as cp
|
| 7 |
+
from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer
|
| 8 |
+
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils import ResLayer
|
| 11 |
+
from .resnet import Bottleneck as _Bottleneck
|
| 12 |
+
from .resnet import ResNetV1d
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class RSoftmax(nn.Module):
|
| 16 |
+
"""Radix Softmax module in ``SplitAttentionConv2d``.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
radix (int): Radix of input.
|
| 20 |
+
groups (int): Groups of input.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, radix, groups):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.radix = radix
|
| 26 |
+
self.groups = groups
|
| 27 |
+
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
batch = x.size(0)
|
| 30 |
+
if self.radix > 1:
|
| 31 |
+
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
|
| 32 |
+
x = F.softmax(x, dim=1)
|
| 33 |
+
x = x.reshape(batch, -1)
|
| 34 |
+
else:
|
| 35 |
+
x = torch.sigmoid(x)
|
| 36 |
+
return x
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class SplitAttentionConv2d(nn.Module):
|
| 40 |
+
"""Split-Attention Conv2d in ResNeSt.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
in_channels (int): Same as nn.Conv2d.
|
| 44 |
+
out_channels (int): Same as nn.Conv2d.
|
| 45 |
+
kernel_size (int | tuple[int]): Same as nn.Conv2d.
|
| 46 |
+
stride (int | tuple[int]): Same as nn.Conv2d.
|
| 47 |
+
padding (int | tuple[int]): Same as nn.Conv2d.
|
| 48 |
+
dilation (int | tuple[int]): Same as nn.Conv2d.
|
| 49 |
+
groups (int): Same as nn.Conv2d.
|
| 50 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 51 |
+
reduction_factor (int): Reduction factor of inter_channels. Default: 4.
|
| 52 |
+
conv_cfg (dict): Config dict for convolution layer. Default: None,
|
| 53 |
+
which means using conv2d.
|
| 54 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 55 |
+
dcn (dict): Config dict for DCN. Default: None.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self,
|
| 59 |
+
in_channels,
|
| 60 |
+
channels,
|
| 61 |
+
kernel_size,
|
| 62 |
+
stride=1,
|
| 63 |
+
padding=0,
|
| 64 |
+
dilation=1,
|
| 65 |
+
groups=1,
|
| 66 |
+
radix=2,
|
| 67 |
+
reduction_factor=4,
|
| 68 |
+
conv_cfg=None,
|
| 69 |
+
norm_cfg=dict(type='BN'),
|
| 70 |
+
dcn=None):
|
| 71 |
+
super(SplitAttentionConv2d, self).__init__()
|
| 72 |
+
inter_channels = max(in_channels * radix // reduction_factor, 32)
|
| 73 |
+
self.radix = radix
|
| 74 |
+
self.groups = groups
|
| 75 |
+
self.channels = channels
|
| 76 |
+
self.with_dcn = dcn is not None
|
| 77 |
+
self.dcn = dcn
|
| 78 |
+
fallback_on_stride = False
|
| 79 |
+
if self.with_dcn:
|
| 80 |
+
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
|
| 81 |
+
if self.with_dcn and not fallback_on_stride:
|
| 82 |
+
assert conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 83 |
+
conv_cfg = dcn
|
| 84 |
+
self.conv = build_conv_layer(
|
| 85 |
+
conv_cfg,
|
| 86 |
+
in_channels,
|
| 87 |
+
channels * radix,
|
| 88 |
+
kernel_size,
|
| 89 |
+
stride=stride,
|
| 90 |
+
padding=padding,
|
| 91 |
+
dilation=dilation,
|
| 92 |
+
groups=groups * radix,
|
| 93 |
+
bias=False)
|
| 94 |
+
self.norm0_name, norm0 = build_norm_layer(
|
| 95 |
+
norm_cfg, channels * radix, postfix=0)
|
| 96 |
+
self.add_module(self.norm0_name, norm0)
|
| 97 |
+
self.relu = nn.ReLU(inplace=True)
|
| 98 |
+
self.fc1 = build_conv_layer(
|
| 99 |
+
None, channels, inter_channels, 1, groups=self.groups)
|
| 100 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 101 |
+
norm_cfg, inter_channels, postfix=1)
|
| 102 |
+
self.add_module(self.norm1_name, norm1)
|
| 103 |
+
self.fc2 = build_conv_layer(
|
| 104 |
+
None, inter_channels, channels * radix, 1, groups=self.groups)
|
| 105 |
+
self.rsoftmax = RSoftmax(radix, groups)
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def norm0(self):
|
| 109 |
+
"""nn.Module: the normalization layer named "norm0" """
|
| 110 |
+
return getattr(self, self.norm0_name)
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def norm1(self):
|
| 114 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 115 |
+
return getattr(self, self.norm1_name)
|
| 116 |
+
|
| 117 |
+
def forward(self, x):
|
| 118 |
+
x = self.conv(x)
|
| 119 |
+
x = self.norm0(x)
|
| 120 |
+
x = self.relu(x)
|
| 121 |
+
|
| 122 |
+
batch, rchannel = x.shape[:2]
|
| 123 |
+
batch = x.size(0)
|
| 124 |
+
if self.radix > 1:
|
| 125 |
+
splits = x.view(batch, self.radix, -1, *x.shape[2:])
|
| 126 |
+
gap = splits.sum(dim=1)
|
| 127 |
+
else:
|
| 128 |
+
gap = x
|
| 129 |
+
gap = F.adaptive_avg_pool2d(gap, 1)
|
| 130 |
+
gap = self.fc1(gap)
|
| 131 |
+
|
| 132 |
+
gap = self.norm1(gap)
|
| 133 |
+
gap = self.relu(gap)
|
| 134 |
+
|
| 135 |
+
atten = self.fc2(gap)
|
| 136 |
+
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
|
| 137 |
+
|
| 138 |
+
if self.radix > 1:
|
| 139 |
+
attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
|
| 140 |
+
out = torch.sum(attens * splits, dim=1)
|
| 141 |
+
else:
|
| 142 |
+
out = atten * x
|
| 143 |
+
return out.contiguous()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class Bottleneck(_Bottleneck):
|
| 147 |
+
"""Bottleneck block for ResNeSt.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
inplane (int): Input planes of this block.
|
| 151 |
+
planes (int): Middle planes of this block.
|
| 152 |
+
groups (int): Groups of conv2.
|
| 153 |
+
width_per_group (int): Width per group of conv2. 64x4d indicates
|
| 154 |
+
``groups=64, width_per_group=4`` and 32x8d indicates
|
| 155 |
+
``groups=32, width_per_group=8``.
|
| 156 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 157 |
+
reduction_factor (int): Reduction factor of inter_channels in
|
| 158 |
+
SplitAttentionConv2d. Default: 4.
|
| 159 |
+
avg_down_stride (bool): Whether to use average pool for stride in
|
| 160 |
+
Bottleneck. Default: True.
|
| 161 |
+
kwargs (dict): Key word arguments for base class.
|
| 162 |
+
"""
|
| 163 |
+
expansion = 4
|
| 164 |
+
|
| 165 |
+
def __init__(self,
|
| 166 |
+
inplanes,
|
| 167 |
+
planes,
|
| 168 |
+
groups=1,
|
| 169 |
+
base_width=4,
|
| 170 |
+
base_channels=64,
|
| 171 |
+
radix=2,
|
| 172 |
+
reduction_factor=4,
|
| 173 |
+
avg_down_stride=True,
|
| 174 |
+
**kwargs):
|
| 175 |
+
"""Bottleneck block for ResNeSt."""
|
| 176 |
+
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
| 177 |
+
|
| 178 |
+
if groups == 1:
|
| 179 |
+
width = self.planes
|
| 180 |
+
else:
|
| 181 |
+
width = math.floor(self.planes *
|
| 182 |
+
(base_width / base_channels)) * groups
|
| 183 |
+
|
| 184 |
+
self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
|
| 185 |
+
|
| 186 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 187 |
+
self.norm_cfg, width, postfix=1)
|
| 188 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 189 |
+
self.norm_cfg, self.planes * self.expansion, postfix=3)
|
| 190 |
+
|
| 191 |
+
self.conv1 = build_conv_layer(
|
| 192 |
+
self.conv_cfg,
|
| 193 |
+
self.inplanes,
|
| 194 |
+
width,
|
| 195 |
+
kernel_size=1,
|
| 196 |
+
stride=self.conv1_stride,
|
| 197 |
+
bias=False)
|
| 198 |
+
self.add_module(self.norm1_name, norm1)
|
| 199 |
+
self.with_modulated_dcn = False
|
| 200 |
+
self.conv2 = SplitAttentionConv2d(
|
| 201 |
+
width,
|
| 202 |
+
width,
|
| 203 |
+
kernel_size=3,
|
| 204 |
+
stride=1 if self.avg_down_stride else self.conv2_stride,
|
| 205 |
+
padding=self.dilation,
|
| 206 |
+
dilation=self.dilation,
|
| 207 |
+
groups=groups,
|
| 208 |
+
radix=radix,
|
| 209 |
+
reduction_factor=reduction_factor,
|
| 210 |
+
conv_cfg=self.conv_cfg,
|
| 211 |
+
norm_cfg=self.norm_cfg,
|
| 212 |
+
dcn=self.dcn)
|
| 213 |
+
delattr(self, self.norm2_name)
|
| 214 |
+
|
| 215 |
+
if self.avg_down_stride:
|
| 216 |
+
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
|
| 217 |
+
|
| 218 |
+
self.conv3 = build_conv_layer(
|
| 219 |
+
self.conv_cfg,
|
| 220 |
+
width,
|
| 221 |
+
self.planes * self.expansion,
|
| 222 |
+
kernel_size=1,
|
| 223 |
+
bias=False)
|
| 224 |
+
self.add_module(self.norm3_name, norm3)
|
| 225 |
+
|
| 226 |
+
def forward(self, x):
|
| 227 |
+
|
| 228 |
+
def _inner_forward(x):
|
| 229 |
+
identity = x
|
| 230 |
+
|
| 231 |
+
out = self.conv1(x)
|
| 232 |
+
out = self.norm1(out)
|
| 233 |
+
out = self.relu(out)
|
| 234 |
+
|
| 235 |
+
if self.with_plugins:
|
| 236 |
+
out = self.forward_plugin(out, self.after_conv1_plugin_names)
|
| 237 |
+
|
| 238 |
+
out = self.conv2(out)
|
| 239 |
+
|
| 240 |
+
if self.avg_down_stride:
|
| 241 |
+
out = self.avd_layer(out)
|
| 242 |
+
|
| 243 |
+
if self.with_plugins:
|
| 244 |
+
out = self.forward_plugin(out, self.after_conv2_plugin_names)
|
| 245 |
+
|
| 246 |
+
out = self.conv3(out)
|
| 247 |
+
out = self.norm3(out)
|
| 248 |
+
|
| 249 |
+
if self.with_plugins:
|
| 250 |
+
out = self.forward_plugin(out, self.after_conv3_plugin_names)
|
| 251 |
+
|
| 252 |
+
if self.downsample is not None:
|
| 253 |
+
identity = self.downsample(x)
|
| 254 |
+
|
| 255 |
+
out += identity
|
| 256 |
+
|
| 257 |
+
return out
|
| 258 |
+
|
| 259 |
+
if self.with_cp and x.requires_grad:
|
| 260 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 261 |
+
else:
|
| 262 |
+
out = _inner_forward(x)
|
| 263 |
+
|
| 264 |
+
out = self.relu(out)
|
| 265 |
+
|
| 266 |
+
return out
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
@BACKBONES.register_module()
|
| 270 |
+
class ResNeSt(ResNetV1d):
|
| 271 |
+
"""ResNeSt backbone.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
groups (int): Number of groups of Bottleneck. Default: 1
|
| 275 |
+
base_width (int): Base width of Bottleneck. Default: 4
|
| 276 |
+
radix (int): Radix of SpltAtConv2d. Default: 2
|
| 277 |
+
reduction_factor (int): Reduction factor of inter_channels in
|
| 278 |
+
SplitAttentionConv2d. Default: 4.
|
| 279 |
+
avg_down_stride (bool): Whether to use average pool for stride in
|
| 280 |
+
Bottleneck. Default: True.
|
| 281 |
+
kwargs (dict): Keyword arguments for ResNet.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
arch_settings = {
|
| 285 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 286 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 287 |
+
152: (Bottleneck, (3, 8, 36, 3)),
|
| 288 |
+
200: (Bottleneck, (3, 24, 36, 3))
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
def __init__(self,
|
| 292 |
+
groups=1,
|
| 293 |
+
base_width=4,
|
| 294 |
+
radix=2,
|
| 295 |
+
reduction_factor=4,
|
| 296 |
+
avg_down_stride=True,
|
| 297 |
+
**kwargs):
|
| 298 |
+
self.groups = groups
|
| 299 |
+
self.base_width = base_width
|
| 300 |
+
self.radix = radix
|
| 301 |
+
self.reduction_factor = reduction_factor
|
| 302 |
+
self.avg_down_stride = avg_down_stride
|
| 303 |
+
super(ResNeSt, self).__init__(**kwargs)
|
| 304 |
+
|
| 305 |
+
def make_res_layer(self, **kwargs):
|
| 306 |
+
"""Pack all blocks in a stage into a ``ResLayer``."""
|
| 307 |
+
return ResLayer(
|
| 308 |
+
groups=self.groups,
|
| 309 |
+
base_width=self.base_width,
|
| 310 |
+
base_channels=self.base_channels,
|
| 311 |
+
radix=self.radix,
|
| 312 |
+
reduction_factor=self.reduction_factor,
|
| 313 |
+
avg_down_stride=self.avg_down_stride,
|
| 314 |
+
**kwargs)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnet.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
import torch.utils.checkpoint as cp
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
|
| 4 |
+
constant_init, kaiming_init)
|
| 5 |
+
from annotator.uniformer.mmcv.runner import load_checkpoint
|
| 6 |
+
from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm
|
| 7 |
+
|
| 8 |
+
from annotator.uniformer.mmseg.utils import get_root_logger
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from ..utils import ResLayer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BasicBlock(nn.Module):
|
| 14 |
+
"""Basic block for ResNet."""
|
| 15 |
+
|
| 16 |
+
expansion = 1
|
| 17 |
+
|
| 18 |
+
def __init__(self,
|
| 19 |
+
inplanes,
|
| 20 |
+
planes,
|
| 21 |
+
stride=1,
|
| 22 |
+
dilation=1,
|
| 23 |
+
downsample=None,
|
| 24 |
+
style='pytorch',
|
| 25 |
+
with_cp=False,
|
| 26 |
+
conv_cfg=None,
|
| 27 |
+
norm_cfg=dict(type='BN'),
|
| 28 |
+
dcn=None,
|
| 29 |
+
plugins=None):
|
| 30 |
+
super(BasicBlock, self).__init__()
|
| 31 |
+
assert dcn is None, 'Not implemented yet.'
|
| 32 |
+
assert plugins is None, 'Not implemented yet.'
|
| 33 |
+
|
| 34 |
+
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
|
| 35 |
+
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
|
| 36 |
+
|
| 37 |
+
self.conv1 = build_conv_layer(
|
| 38 |
+
conv_cfg,
|
| 39 |
+
inplanes,
|
| 40 |
+
planes,
|
| 41 |
+
3,
|
| 42 |
+
stride=stride,
|
| 43 |
+
padding=dilation,
|
| 44 |
+
dilation=dilation,
|
| 45 |
+
bias=False)
|
| 46 |
+
self.add_module(self.norm1_name, norm1)
|
| 47 |
+
self.conv2 = build_conv_layer(
|
| 48 |
+
conv_cfg, planes, planes, 3, padding=1, bias=False)
|
| 49 |
+
self.add_module(self.norm2_name, norm2)
|
| 50 |
+
|
| 51 |
+
self.relu = nn.ReLU(inplace=True)
|
| 52 |
+
self.downsample = downsample
|
| 53 |
+
self.stride = stride
|
| 54 |
+
self.dilation = dilation
|
| 55 |
+
self.with_cp = with_cp
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def norm1(self):
|
| 59 |
+
"""nn.Module: normalization layer after the first convolution layer"""
|
| 60 |
+
return getattr(self, self.norm1_name)
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def norm2(self):
|
| 64 |
+
"""nn.Module: normalization layer after the second convolution layer"""
|
| 65 |
+
return getattr(self, self.norm2_name)
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
"""Forward function."""
|
| 69 |
+
|
| 70 |
+
def _inner_forward(x):
|
| 71 |
+
identity = x
|
| 72 |
+
|
| 73 |
+
out = self.conv1(x)
|
| 74 |
+
out = self.norm1(out)
|
| 75 |
+
out = self.relu(out)
|
| 76 |
+
|
| 77 |
+
out = self.conv2(out)
|
| 78 |
+
out = self.norm2(out)
|
| 79 |
+
|
| 80 |
+
if self.downsample is not None:
|
| 81 |
+
identity = self.downsample(x)
|
| 82 |
+
|
| 83 |
+
out += identity
|
| 84 |
+
|
| 85 |
+
return out
|
| 86 |
+
|
| 87 |
+
if self.with_cp and x.requires_grad:
|
| 88 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 89 |
+
else:
|
| 90 |
+
out = _inner_forward(x)
|
| 91 |
+
|
| 92 |
+
out = self.relu(out)
|
| 93 |
+
|
| 94 |
+
return out
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class Bottleneck(nn.Module):
|
| 98 |
+
"""Bottleneck block for ResNet.
|
| 99 |
+
|
| 100 |
+
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
|
| 101 |
+
"caffe", the stride-two layer is the first 1x1 conv layer.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
expansion = 4
|
| 105 |
+
|
| 106 |
+
def __init__(self,
|
| 107 |
+
inplanes,
|
| 108 |
+
planes,
|
| 109 |
+
stride=1,
|
| 110 |
+
dilation=1,
|
| 111 |
+
downsample=None,
|
| 112 |
+
style='pytorch',
|
| 113 |
+
with_cp=False,
|
| 114 |
+
conv_cfg=None,
|
| 115 |
+
norm_cfg=dict(type='BN'),
|
| 116 |
+
dcn=None,
|
| 117 |
+
plugins=None):
|
| 118 |
+
super(Bottleneck, self).__init__()
|
| 119 |
+
assert style in ['pytorch', 'caffe']
|
| 120 |
+
assert dcn is None or isinstance(dcn, dict)
|
| 121 |
+
assert plugins is None or isinstance(plugins, list)
|
| 122 |
+
if plugins is not None:
|
| 123 |
+
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
|
| 124 |
+
assert all(p['position'] in allowed_position for p in plugins)
|
| 125 |
+
|
| 126 |
+
self.inplanes = inplanes
|
| 127 |
+
self.planes = planes
|
| 128 |
+
self.stride = stride
|
| 129 |
+
self.dilation = dilation
|
| 130 |
+
self.style = style
|
| 131 |
+
self.with_cp = with_cp
|
| 132 |
+
self.conv_cfg = conv_cfg
|
| 133 |
+
self.norm_cfg = norm_cfg
|
| 134 |
+
self.dcn = dcn
|
| 135 |
+
self.with_dcn = dcn is not None
|
| 136 |
+
self.plugins = plugins
|
| 137 |
+
self.with_plugins = plugins is not None
|
| 138 |
+
|
| 139 |
+
if self.with_plugins:
|
| 140 |
+
# collect plugins for conv1/conv2/conv3
|
| 141 |
+
self.after_conv1_plugins = [
|
| 142 |
+
plugin['cfg'] for plugin in plugins
|
| 143 |
+
if plugin['position'] == 'after_conv1'
|
| 144 |
+
]
|
| 145 |
+
self.after_conv2_plugins = [
|
| 146 |
+
plugin['cfg'] for plugin in plugins
|
| 147 |
+
if plugin['position'] == 'after_conv2'
|
| 148 |
+
]
|
| 149 |
+
self.after_conv3_plugins = [
|
| 150 |
+
plugin['cfg'] for plugin in plugins
|
| 151 |
+
if plugin['position'] == 'after_conv3'
|
| 152 |
+
]
|
| 153 |
+
|
| 154 |
+
if self.style == 'pytorch':
|
| 155 |
+
self.conv1_stride = 1
|
| 156 |
+
self.conv2_stride = stride
|
| 157 |
+
else:
|
| 158 |
+
self.conv1_stride = stride
|
| 159 |
+
self.conv2_stride = 1
|
| 160 |
+
|
| 161 |
+
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
|
| 162 |
+
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
|
| 163 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 164 |
+
norm_cfg, planes * self.expansion, postfix=3)
|
| 165 |
+
|
| 166 |
+
self.conv1 = build_conv_layer(
|
| 167 |
+
conv_cfg,
|
| 168 |
+
inplanes,
|
| 169 |
+
planes,
|
| 170 |
+
kernel_size=1,
|
| 171 |
+
stride=self.conv1_stride,
|
| 172 |
+
bias=False)
|
| 173 |
+
self.add_module(self.norm1_name, norm1)
|
| 174 |
+
fallback_on_stride = False
|
| 175 |
+
if self.with_dcn:
|
| 176 |
+
fallback_on_stride = dcn.pop('fallback_on_stride', False)
|
| 177 |
+
if not self.with_dcn or fallback_on_stride:
|
| 178 |
+
self.conv2 = build_conv_layer(
|
| 179 |
+
conv_cfg,
|
| 180 |
+
planes,
|
| 181 |
+
planes,
|
| 182 |
+
kernel_size=3,
|
| 183 |
+
stride=self.conv2_stride,
|
| 184 |
+
padding=dilation,
|
| 185 |
+
dilation=dilation,
|
| 186 |
+
bias=False)
|
| 187 |
+
else:
|
| 188 |
+
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 189 |
+
self.conv2 = build_conv_layer(
|
| 190 |
+
dcn,
|
| 191 |
+
planes,
|
| 192 |
+
planes,
|
| 193 |
+
kernel_size=3,
|
| 194 |
+
stride=self.conv2_stride,
|
| 195 |
+
padding=dilation,
|
| 196 |
+
dilation=dilation,
|
| 197 |
+
bias=False)
|
| 198 |
+
|
| 199 |
+
self.add_module(self.norm2_name, norm2)
|
| 200 |
+
self.conv3 = build_conv_layer(
|
| 201 |
+
conv_cfg,
|
| 202 |
+
planes,
|
| 203 |
+
planes * self.expansion,
|
| 204 |
+
kernel_size=1,
|
| 205 |
+
bias=False)
|
| 206 |
+
self.add_module(self.norm3_name, norm3)
|
| 207 |
+
|
| 208 |
+
self.relu = nn.ReLU(inplace=True)
|
| 209 |
+
self.downsample = downsample
|
| 210 |
+
|
| 211 |
+
if self.with_plugins:
|
| 212 |
+
self.after_conv1_plugin_names = self.make_block_plugins(
|
| 213 |
+
planes, self.after_conv1_plugins)
|
| 214 |
+
self.after_conv2_plugin_names = self.make_block_plugins(
|
| 215 |
+
planes, self.after_conv2_plugins)
|
| 216 |
+
self.after_conv3_plugin_names = self.make_block_plugins(
|
| 217 |
+
planes * self.expansion, self.after_conv3_plugins)
|
| 218 |
+
|
| 219 |
+
def make_block_plugins(self, in_channels, plugins):
|
| 220 |
+
"""make plugins for block.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
in_channels (int): Input channels of plugin.
|
| 224 |
+
plugins (list[dict]): List of plugins cfg to build.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
list[str]: List of the names of plugin.
|
| 228 |
+
"""
|
| 229 |
+
assert isinstance(plugins, list)
|
| 230 |
+
plugin_names = []
|
| 231 |
+
for plugin in plugins:
|
| 232 |
+
plugin = plugin.copy()
|
| 233 |
+
name, layer = build_plugin_layer(
|
| 234 |
+
plugin,
|
| 235 |
+
in_channels=in_channels,
|
| 236 |
+
postfix=plugin.pop('postfix', ''))
|
| 237 |
+
assert not hasattr(self, name), f'duplicate plugin {name}'
|
| 238 |
+
self.add_module(name, layer)
|
| 239 |
+
plugin_names.append(name)
|
| 240 |
+
return plugin_names
|
| 241 |
+
|
| 242 |
+
def forward_plugin(self, x, plugin_names):
|
| 243 |
+
"""Forward function for plugins."""
|
| 244 |
+
out = x
|
| 245 |
+
for name in plugin_names:
|
| 246 |
+
out = getattr(self, name)(x)
|
| 247 |
+
return out
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def norm1(self):
|
| 251 |
+
"""nn.Module: normalization layer after the first convolution layer"""
|
| 252 |
+
return getattr(self, self.norm1_name)
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def norm2(self):
|
| 256 |
+
"""nn.Module: normalization layer after the second convolution layer"""
|
| 257 |
+
return getattr(self, self.norm2_name)
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def norm3(self):
|
| 261 |
+
"""nn.Module: normalization layer after the third convolution layer"""
|
| 262 |
+
return getattr(self, self.norm3_name)
|
| 263 |
+
|
| 264 |
+
def forward(self, x):
|
| 265 |
+
"""Forward function."""
|
| 266 |
+
|
| 267 |
+
def _inner_forward(x):
|
| 268 |
+
identity = x
|
| 269 |
+
|
| 270 |
+
out = self.conv1(x)
|
| 271 |
+
out = self.norm1(out)
|
| 272 |
+
out = self.relu(out)
|
| 273 |
+
|
| 274 |
+
if self.with_plugins:
|
| 275 |
+
out = self.forward_plugin(out, self.after_conv1_plugin_names)
|
| 276 |
+
|
| 277 |
+
out = self.conv2(out)
|
| 278 |
+
out = self.norm2(out)
|
| 279 |
+
out = self.relu(out)
|
| 280 |
+
|
| 281 |
+
if self.with_plugins:
|
| 282 |
+
out = self.forward_plugin(out, self.after_conv2_plugin_names)
|
| 283 |
+
|
| 284 |
+
out = self.conv3(out)
|
| 285 |
+
out = self.norm3(out)
|
| 286 |
+
|
| 287 |
+
if self.with_plugins:
|
| 288 |
+
out = self.forward_plugin(out, self.after_conv3_plugin_names)
|
| 289 |
+
|
| 290 |
+
if self.downsample is not None:
|
| 291 |
+
identity = self.downsample(x)
|
| 292 |
+
|
| 293 |
+
out += identity
|
| 294 |
+
|
| 295 |
+
return out
|
| 296 |
+
|
| 297 |
+
if self.with_cp and x.requires_grad:
|
| 298 |
+
out = cp.checkpoint(_inner_forward, x)
|
| 299 |
+
else:
|
| 300 |
+
out = _inner_forward(x)
|
| 301 |
+
|
| 302 |
+
out = self.relu(out)
|
| 303 |
+
|
| 304 |
+
return out
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@BACKBONES.register_module()
|
| 308 |
+
class ResNet(nn.Module):
|
| 309 |
+
"""ResNet backbone.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
| 313 |
+
in_channels (int): Number of input image channels. Default" 3.
|
| 314 |
+
stem_channels (int): Number of stem channels. Default: 64.
|
| 315 |
+
base_channels (int): Number of base channels of res layer. Default: 64.
|
| 316 |
+
num_stages (int): Resnet stages, normally 4.
|
| 317 |
+
strides (Sequence[int]): Strides of the first block of each stage.
|
| 318 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 319 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 320 |
+
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
| 321 |
+
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
| 322 |
+
the first 1x1 conv layer.
|
| 323 |
+
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
|
| 324 |
+
avg_down (bool): Use AvgPool instead of stride conv when
|
| 325 |
+
downsampling in the bottleneck.
|
| 326 |
+
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
| 327 |
+
-1 means not freezing any parameters.
|
| 328 |
+
norm_cfg (dict): Dictionary to construct and config norm layer.
|
| 329 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 330 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 331 |
+
and its variants only.
|
| 332 |
+
plugins (list[dict]): List of plugins for stages, each dict contains:
|
| 333 |
+
|
| 334 |
+
- cfg (dict, required): Cfg dict to build plugin.
|
| 335 |
+
|
| 336 |
+
- position (str, required): Position inside block to insert plugin,
|
| 337 |
+
options: 'after_conv1', 'after_conv2', 'after_conv3'.
|
| 338 |
+
|
| 339 |
+
- stages (tuple[bool], optional): Stages to apply plugin, length
|
| 340 |
+
should be same as 'num_stages'
|
| 341 |
+
multi_grid (Sequence[int]|None): Multi grid dilation rates of last
|
| 342 |
+
stage. Default: None
|
| 343 |
+
contract_dilation (bool): Whether contract first dilation of each layer
|
| 344 |
+
Default: False
|
| 345 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 346 |
+
memory while slowing down the training speed.
|
| 347 |
+
zero_init_residual (bool): Whether to use zero init for last norm layer
|
| 348 |
+
in resblocks to let them behave as identity.
|
| 349 |
+
|
| 350 |
+
Example:
|
| 351 |
+
>>> from annotator.uniformer.mmseg.models import ResNet
|
| 352 |
+
>>> import torch
|
| 353 |
+
>>> self = ResNet(depth=18)
|
| 354 |
+
>>> self.eval()
|
| 355 |
+
>>> inputs = torch.rand(1, 3, 32, 32)
|
| 356 |
+
>>> level_outputs = self.forward(inputs)
|
| 357 |
+
>>> for level_out in level_outputs:
|
| 358 |
+
... print(tuple(level_out.shape))
|
| 359 |
+
(1, 64, 8, 8)
|
| 360 |
+
(1, 128, 4, 4)
|
| 361 |
+
(1, 256, 2, 2)
|
| 362 |
+
(1, 512, 1, 1)
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
arch_settings = {
|
| 366 |
+
18: (BasicBlock, (2, 2, 2, 2)),
|
| 367 |
+
34: (BasicBlock, (3, 4, 6, 3)),
|
| 368 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 369 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 370 |
+
152: (Bottleneck, (3, 8, 36, 3))
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
def __init__(self,
|
| 374 |
+
depth,
|
| 375 |
+
in_channels=3,
|
| 376 |
+
stem_channels=64,
|
| 377 |
+
base_channels=64,
|
| 378 |
+
num_stages=4,
|
| 379 |
+
strides=(1, 2, 2, 2),
|
| 380 |
+
dilations=(1, 1, 1, 1),
|
| 381 |
+
out_indices=(0, 1, 2, 3),
|
| 382 |
+
style='pytorch',
|
| 383 |
+
deep_stem=False,
|
| 384 |
+
avg_down=False,
|
| 385 |
+
frozen_stages=-1,
|
| 386 |
+
conv_cfg=None,
|
| 387 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 388 |
+
norm_eval=False,
|
| 389 |
+
dcn=None,
|
| 390 |
+
stage_with_dcn=(False, False, False, False),
|
| 391 |
+
plugins=None,
|
| 392 |
+
multi_grid=None,
|
| 393 |
+
contract_dilation=False,
|
| 394 |
+
with_cp=False,
|
| 395 |
+
zero_init_residual=True):
|
| 396 |
+
super(ResNet, self).__init__()
|
| 397 |
+
if depth not in self.arch_settings:
|
| 398 |
+
raise KeyError(f'invalid depth {depth} for resnet')
|
| 399 |
+
self.depth = depth
|
| 400 |
+
self.stem_channels = stem_channels
|
| 401 |
+
self.base_channels = base_channels
|
| 402 |
+
self.num_stages = num_stages
|
| 403 |
+
assert num_stages >= 1 and num_stages <= 4
|
| 404 |
+
self.strides = strides
|
| 405 |
+
self.dilations = dilations
|
| 406 |
+
assert len(strides) == len(dilations) == num_stages
|
| 407 |
+
self.out_indices = out_indices
|
| 408 |
+
assert max(out_indices) < num_stages
|
| 409 |
+
self.style = style
|
| 410 |
+
self.deep_stem = deep_stem
|
| 411 |
+
self.avg_down = avg_down
|
| 412 |
+
self.frozen_stages = frozen_stages
|
| 413 |
+
self.conv_cfg = conv_cfg
|
| 414 |
+
self.norm_cfg = norm_cfg
|
| 415 |
+
self.with_cp = with_cp
|
| 416 |
+
self.norm_eval = norm_eval
|
| 417 |
+
self.dcn = dcn
|
| 418 |
+
self.stage_with_dcn = stage_with_dcn
|
| 419 |
+
if dcn is not None:
|
| 420 |
+
assert len(stage_with_dcn) == num_stages
|
| 421 |
+
self.plugins = plugins
|
| 422 |
+
self.multi_grid = multi_grid
|
| 423 |
+
self.contract_dilation = contract_dilation
|
| 424 |
+
self.zero_init_residual = zero_init_residual
|
| 425 |
+
self.block, stage_blocks = self.arch_settings[depth]
|
| 426 |
+
self.stage_blocks = stage_blocks[:num_stages]
|
| 427 |
+
self.inplanes = stem_channels
|
| 428 |
+
|
| 429 |
+
self._make_stem_layer(in_channels, stem_channels)
|
| 430 |
+
|
| 431 |
+
self.res_layers = []
|
| 432 |
+
for i, num_blocks in enumerate(self.stage_blocks):
|
| 433 |
+
stride = strides[i]
|
| 434 |
+
dilation = dilations[i]
|
| 435 |
+
dcn = self.dcn if self.stage_with_dcn[i] else None
|
| 436 |
+
if plugins is not None:
|
| 437 |
+
stage_plugins = self.make_stage_plugins(plugins, i)
|
| 438 |
+
else:
|
| 439 |
+
stage_plugins = None
|
| 440 |
+
# multi grid is applied to last layer only
|
| 441 |
+
stage_multi_grid = multi_grid if i == len(
|
| 442 |
+
self.stage_blocks) - 1 else None
|
| 443 |
+
planes = base_channels * 2**i
|
| 444 |
+
res_layer = self.make_res_layer(
|
| 445 |
+
block=self.block,
|
| 446 |
+
inplanes=self.inplanes,
|
| 447 |
+
planes=planes,
|
| 448 |
+
num_blocks=num_blocks,
|
| 449 |
+
stride=stride,
|
| 450 |
+
dilation=dilation,
|
| 451 |
+
style=self.style,
|
| 452 |
+
avg_down=self.avg_down,
|
| 453 |
+
with_cp=with_cp,
|
| 454 |
+
conv_cfg=conv_cfg,
|
| 455 |
+
norm_cfg=norm_cfg,
|
| 456 |
+
dcn=dcn,
|
| 457 |
+
plugins=stage_plugins,
|
| 458 |
+
multi_grid=stage_multi_grid,
|
| 459 |
+
contract_dilation=contract_dilation)
|
| 460 |
+
self.inplanes = planes * self.block.expansion
|
| 461 |
+
layer_name = f'layer{i+1}'
|
| 462 |
+
self.add_module(layer_name, res_layer)
|
| 463 |
+
self.res_layers.append(layer_name)
|
| 464 |
+
|
| 465 |
+
self._freeze_stages()
|
| 466 |
+
|
| 467 |
+
self.feat_dim = self.block.expansion * base_channels * 2**(
|
| 468 |
+
len(self.stage_blocks) - 1)
|
| 469 |
+
|
| 470 |
+
def make_stage_plugins(self, plugins, stage_idx):
|
| 471 |
+
"""make plugins for ResNet 'stage_idx'th stage .
|
| 472 |
+
|
| 473 |
+
Currently we support to insert 'context_block',
|
| 474 |
+
'empirical_attention_block', 'nonlocal_block' into the backbone like
|
| 475 |
+
ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
|
| 476 |
+
Bottleneck.
|
| 477 |
+
|
| 478 |
+
An example of plugins format could be :
|
| 479 |
+
>>> plugins=[
|
| 480 |
+
... dict(cfg=dict(type='xxx', arg1='xxx'),
|
| 481 |
+
... stages=(False, True, True, True),
|
| 482 |
+
... position='after_conv2'),
|
| 483 |
+
... dict(cfg=dict(type='yyy'),
|
| 484 |
+
... stages=(True, True, True, True),
|
| 485 |
+
... position='after_conv3'),
|
| 486 |
+
... dict(cfg=dict(type='zzz', postfix='1'),
|
| 487 |
+
... stages=(True, True, True, True),
|
| 488 |
+
... position='after_conv3'),
|
| 489 |
+
... dict(cfg=dict(type='zzz', postfix='2'),
|
| 490 |
+
... stages=(True, True, True, True),
|
| 491 |
+
... position='after_conv3')
|
| 492 |
+
... ]
|
| 493 |
+
>>> self = ResNet(depth=18)
|
| 494 |
+
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
|
| 495 |
+
>>> assert len(stage_plugins) == 3
|
| 496 |
+
|
| 497 |
+
Suppose 'stage_idx=0', the structure of blocks in the stage would be:
|
| 498 |
+
conv1-> conv2->conv3->yyy->zzz1->zzz2
|
| 499 |
+
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
|
| 500 |
+
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
|
| 501 |
+
|
| 502 |
+
If stages is missing, the plugin would be applied to all stages.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
plugins (list[dict]): List of plugins cfg to build. The postfix is
|
| 506 |
+
required if multiple same type plugins are inserted.
|
| 507 |
+
stage_idx (int): Index of stage to build
|
| 508 |
+
|
| 509 |
+
Returns:
|
| 510 |
+
list[dict]: Plugins for current stage
|
| 511 |
+
"""
|
| 512 |
+
stage_plugins = []
|
| 513 |
+
for plugin in plugins:
|
| 514 |
+
plugin = plugin.copy()
|
| 515 |
+
stages = plugin.pop('stages', None)
|
| 516 |
+
assert stages is None or len(stages) == self.num_stages
|
| 517 |
+
# whether to insert plugin into current stage
|
| 518 |
+
if stages is None or stages[stage_idx]:
|
| 519 |
+
stage_plugins.append(plugin)
|
| 520 |
+
|
| 521 |
+
return stage_plugins
|
| 522 |
+
|
| 523 |
+
def make_res_layer(self, **kwargs):
|
| 524 |
+
"""Pack all blocks in a stage into a ``ResLayer``."""
|
| 525 |
+
return ResLayer(**kwargs)
|
| 526 |
+
|
| 527 |
+
@property
|
| 528 |
+
def norm1(self):
|
| 529 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 530 |
+
return getattr(self, self.norm1_name)
|
| 531 |
+
|
| 532 |
+
def _make_stem_layer(self, in_channels, stem_channels):
|
| 533 |
+
"""Make stem layer for ResNet."""
|
| 534 |
+
if self.deep_stem:
|
| 535 |
+
self.stem = nn.Sequential(
|
| 536 |
+
build_conv_layer(
|
| 537 |
+
self.conv_cfg,
|
| 538 |
+
in_channels,
|
| 539 |
+
stem_channels // 2,
|
| 540 |
+
kernel_size=3,
|
| 541 |
+
stride=2,
|
| 542 |
+
padding=1,
|
| 543 |
+
bias=False),
|
| 544 |
+
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
|
| 545 |
+
nn.ReLU(inplace=True),
|
| 546 |
+
build_conv_layer(
|
| 547 |
+
self.conv_cfg,
|
| 548 |
+
stem_channels // 2,
|
| 549 |
+
stem_channels // 2,
|
| 550 |
+
kernel_size=3,
|
| 551 |
+
stride=1,
|
| 552 |
+
padding=1,
|
| 553 |
+
bias=False),
|
| 554 |
+
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
|
| 555 |
+
nn.ReLU(inplace=True),
|
| 556 |
+
build_conv_layer(
|
| 557 |
+
self.conv_cfg,
|
| 558 |
+
stem_channels // 2,
|
| 559 |
+
stem_channels,
|
| 560 |
+
kernel_size=3,
|
| 561 |
+
stride=1,
|
| 562 |
+
padding=1,
|
| 563 |
+
bias=False),
|
| 564 |
+
build_norm_layer(self.norm_cfg, stem_channels)[1],
|
| 565 |
+
nn.ReLU(inplace=True))
|
| 566 |
+
else:
|
| 567 |
+
self.conv1 = build_conv_layer(
|
| 568 |
+
self.conv_cfg,
|
| 569 |
+
in_channels,
|
| 570 |
+
stem_channels,
|
| 571 |
+
kernel_size=7,
|
| 572 |
+
stride=2,
|
| 573 |
+
padding=3,
|
| 574 |
+
bias=False)
|
| 575 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 576 |
+
self.norm_cfg, stem_channels, postfix=1)
|
| 577 |
+
self.add_module(self.norm1_name, norm1)
|
| 578 |
+
self.relu = nn.ReLU(inplace=True)
|
| 579 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 580 |
+
|
| 581 |
+
def _freeze_stages(self):
|
| 582 |
+
"""Freeze stages param and norm stats."""
|
| 583 |
+
if self.frozen_stages >= 0:
|
| 584 |
+
if self.deep_stem:
|
| 585 |
+
self.stem.eval()
|
| 586 |
+
for param in self.stem.parameters():
|
| 587 |
+
param.requires_grad = False
|
| 588 |
+
else:
|
| 589 |
+
self.norm1.eval()
|
| 590 |
+
for m in [self.conv1, self.norm1]:
|
| 591 |
+
for param in m.parameters():
|
| 592 |
+
param.requires_grad = False
|
| 593 |
+
|
| 594 |
+
for i in range(1, self.frozen_stages + 1):
|
| 595 |
+
m = getattr(self, f'layer{i}')
|
| 596 |
+
m.eval()
|
| 597 |
+
for param in m.parameters():
|
| 598 |
+
param.requires_grad = False
|
| 599 |
+
|
| 600 |
+
def init_weights(self, pretrained=None):
|
| 601 |
+
"""Initialize the weights in backbone.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
pretrained (str, optional): Path to pre-trained weights.
|
| 605 |
+
Defaults to None.
|
| 606 |
+
"""
|
| 607 |
+
if isinstance(pretrained, str):
|
| 608 |
+
logger = get_root_logger()
|
| 609 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 610 |
+
elif pretrained is None:
|
| 611 |
+
for m in self.modules():
|
| 612 |
+
if isinstance(m, nn.Conv2d):
|
| 613 |
+
kaiming_init(m)
|
| 614 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 615 |
+
constant_init(m, 1)
|
| 616 |
+
|
| 617 |
+
if self.dcn is not None:
|
| 618 |
+
for m in self.modules():
|
| 619 |
+
if isinstance(m, Bottleneck) and hasattr(
|
| 620 |
+
m, 'conv2_offset'):
|
| 621 |
+
constant_init(m.conv2_offset, 0)
|
| 622 |
+
|
| 623 |
+
if self.zero_init_residual:
|
| 624 |
+
for m in self.modules():
|
| 625 |
+
if isinstance(m, Bottleneck):
|
| 626 |
+
constant_init(m.norm3, 0)
|
| 627 |
+
elif isinstance(m, BasicBlock):
|
| 628 |
+
constant_init(m.norm2, 0)
|
| 629 |
+
else:
|
| 630 |
+
raise TypeError('pretrained must be a str or None')
|
| 631 |
+
|
| 632 |
+
def forward(self, x):
|
| 633 |
+
"""Forward function."""
|
| 634 |
+
if self.deep_stem:
|
| 635 |
+
x = self.stem(x)
|
| 636 |
+
else:
|
| 637 |
+
x = self.conv1(x)
|
| 638 |
+
x = self.norm1(x)
|
| 639 |
+
x = self.relu(x)
|
| 640 |
+
x = self.maxpool(x)
|
| 641 |
+
outs = []
|
| 642 |
+
for i, layer_name in enumerate(self.res_layers):
|
| 643 |
+
res_layer = getattr(self, layer_name)
|
| 644 |
+
x = res_layer(x)
|
| 645 |
+
if i in self.out_indices:
|
| 646 |
+
outs.append(x)
|
| 647 |
+
return tuple(outs)
|
| 648 |
+
|
| 649 |
+
def train(self, mode=True):
|
| 650 |
+
"""Convert the model into training mode while keep normalization layer
|
| 651 |
+
freezed."""
|
| 652 |
+
super(ResNet, self).train(mode)
|
| 653 |
+
self._freeze_stages()
|
| 654 |
+
if mode and self.norm_eval:
|
| 655 |
+
for m in self.modules():
|
| 656 |
+
# trick: eval have effect on BatchNorm only
|
| 657 |
+
if isinstance(m, _BatchNorm):
|
| 658 |
+
m.eval()
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
@BACKBONES.register_module()
|
| 662 |
+
class ResNetV1c(ResNet):
|
| 663 |
+
"""ResNetV1c variant described in [1]_.
|
| 664 |
+
|
| 665 |
+
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
|
| 666 |
+
in the input stem with three 3x3 convs.
|
| 667 |
+
|
| 668 |
+
References:
|
| 669 |
+
.. [1] https://arxiv.org/pdf/1812.01187.pdf
|
| 670 |
+
"""
|
| 671 |
+
|
| 672 |
+
def __init__(self, **kwargs):
|
| 673 |
+
super(ResNetV1c, self).__init__(
|
| 674 |
+
deep_stem=True, avg_down=False, **kwargs)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
@BACKBONES.register_module()
|
| 678 |
+
class ResNetV1d(ResNet):
|
| 679 |
+
"""ResNetV1d variant described in [1]_.
|
| 680 |
+
|
| 681 |
+
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
|
| 682 |
+
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
|
| 683 |
+
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
|
| 684 |
+
"""
|
| 685 |
+
|
| 686 |
+
def __init__(self, **kwargs):
|
| 687 |
+
super(ResNetV1d, self).__init__(
|
| 688 |
+
deep_stem=True, avg_down=True, **kwargs)
|
Text2Video-Zero-main/annotator/uniformer/mmseg/models/backbones/resnext.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer
|
| 4 |
+
|
| 5 |
+
from ..builder import BACKBONES
|
| 6 |
+
from ..utils import ResLayer
|
| 7 |
+
from .resnet import Bottleneck as _Bottleneck
|
| 8 |
+
from .resnet import ResNet
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Bottleneck(_Bottleneck):
|
| 12 |
+
"""Bottleneck block for ResNeXt.
|
| 13 |
+
|
| 14 |
+
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
|
| 15 |
+
"caffe", the stride-two layer is the first 1x1 conv layer.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self,
|
| 19 |
+
inplanes,
|
| 20 |
+
planes,
|
| 21 |
+
groups=1,
|
| 22 |
+
base_width=4,
|
| 23 |
+
base_channels=64,
|
| 24 |
+
**kwargs):
|
| 25 |
+
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
|
| 26 |
+
|
| 27 |
+
if groups == 1:
|
| 28 |
+
width = self.planes
|
| 29 |
+
else:
|
| 30 |
+
width = math.floor(self.planes *
|
| 31 |
+
(base_width / base_channels)) * groups
|
| 32 |
+
|
| 33 |
+
self.norm1_name, norm1 = build_norm_layer(
|
| 34 |
+
self.norm_cfg, width, postfix=1)
|
| 35 |
+
self.norm2_name, norm2 = build_norm_layer(
|
| 36 |
+
self.norm_cfg, width, postfix=2)
|
| 37 |
+
self.norm3_name, norm3 = build_norm_layer(
|
| 38 |
+
self.norm_cfg, self.planes * self.expansion, postfix=3)
|
| 39 |
+
|
| 40 |
+
self.conv1 = build_conv_layer(
|
| 41 |
+
self.conv_cfg,
|
| 42 |
+
self.inplanes,
|
| 43 |
+
width,
|
| 44 |
+
kernel_size=1,
|
| 45 |
+
stride=self.conv1_stride,
|
| 46 |
+
bias=False)
|
| 47 |
+
self.add_module(self.norm1_name, norm1)
|
| 48 |
+
fallback_on_stride = False
|
| 49 |
+
self.with_modulated_dcn = False
|
| 50 |
+
if self.with_dcn:
|
| 51 |
+
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
|
| 52 |
+
if not self.with_dcn or fallback_on_stride:
|
| 53 |
+
self.conv2 = build_conv_layer(
|
| 54 |
+
self.conv_cfg,
|
| 55 |
+
width,
|
| 56 |
+
width,
|
| 57 |
+
kernel_size=3,
|
| 58 |
+
stride=self.conv2_stride,
|
| 59 |
+
padding=self.dilation,
|
| 60 |
+
dilation=self.dilation,
|
| 61 |
+
groups=groups,
|
| 62 |
+
bias=False)
|
| 63 |
+
else:
|
| 64 |
+
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
|
| 65 |
+
self.conv2 = build_conv_layer(
|
| 66 |
+
self.dcn,
|
| 67 |
+
width,
|
| 68 |
+
width,
|
| 69 |
+
kernel_size=3,
|
| 70 |
+
stride=self.conv2_stride,
|
| 71 |
+
padding=self.dilation,
|
| 72 |
+
dilation=self.dilation,
|
| 73 |
+
groups=groups,
|
| 74 |
+
bias=False)
|
| 75 |
+
|
| 76 |
+
self.add_module(self.norm2_name, norm2)
|
| 77 |
+
self.conv3 = build_conv_layer(
|
| 78 |
+
self.conv_cfg,
|
| 79 |
+
width,
|
| 80 |
+
self.planes * self.expansion,
|
| 81 |
+
kernel_size=1,
|
| 82 |
+
bias=False)
|
| 83 |
+
self.add_module(self.norm3_name, norm3)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@BACKBONES.register_module()
|
| 87 |
+
class ResNeXt(ResNet):
|
| 88 |
+
"""ResNeXt backbone.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
|
| 92 |
+
in_channels (int): Number of input image channels. Normally 3.
|
| 93 |
+
num_stages (int): Resnet stages, normally 4.
|
| 94 |
+
groups (int): Group of resnext.
|
| 95 |
+
base_width (int): Base width of resnext.
|
| 96 |
+
strides (Sequence[int]): Strides of the first block of each stage.
|
| 97 |
+
dilations (Sequence[int]): Dilation of each stage.
|
| 98 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 99 |
+
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
|
| 100 |
+
layer is the 3x3 conv layer, otherwise the stride-two layer is
|
| 101 |
+
the first 1x1 conv layer.
|
| 102 |
+
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
|
| 103 |
+
not freezing any parameters.
|
| 104 |
+
norm_cfg (dict): dictionary to construct and config norm layer.
|
| 105 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 106 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 107 |
+
and its variants only.
|
| 108 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 109 |
+
memory while slowing down the training speed.
|
| 110 |
+
zero_init_residual (bool): whether to use zero init for last norm layer
|
| 111 |
+
in resblocks to let them behave as identity.
|
| 112 |
+
|
| 113 |
+
Example:
|
| 114 |
+
>>> from annotator.uniformer.mmseg.models import ResNeXt
|
| 115 |
+
>>> import torch
|
| 116 |
+
>>> self = ResNeXt(depth=50)
|
| 117 |
+
>>> self.eval()
|
| 118 |
+
>>> inputs = torch.rand(1, 3, 32, 32)
|
| 119 |
+
>>> level_outputs = self.forward(inputs)
|
| 120 |
+
>>> for level_out in level_outputs:
|
| 121 |
+
... print(tuple(level_out.shape))
|
| 122 |
+
(1, 256, 8, 8)
|
| 123 |
+
(1, 512, 4, 4)
|
| 124 |
+
(1, 1024, 2, 2)
|
| 125 |
+
(1, 2048, 1, 1)
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
arch_settings = {
|
| 129 |
+
50: (Bottleneck, (3, 4, 6, 3)),
|
| 130 |
+
101: (Bottleneck, (3, 4, 23, 3)),
|
| 131 |
+
152: (Bottleneck, (3, 8, 36, 3))
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def __init__(self, groups=1, base_width=4, **kwargs):
|
| 135 |
+
self.groups = groups
|
| 136 |
+
self.base_width = base_width
|
| 137 |
+
super(ResNeXt, self).__init__(**kwargs)
|
| 138 |
+
|
| 139 |
+
def make_res_layer(self, **kwargs):
|
| 140 |
+
"""Pack all blocks in a stage into a ``ResLayer``"""
|
| 141 |
+
return ResLayer(
|
| 142 |
+
groups=self.groups,
|
| 143 |
+
base_width=self.base_width,
|
| 144 |
+
base_channels=self.base_channels,
|
| 145 |
+
**kwargs)
|