File size: 13,640 Bytes
d670799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Dict, List, Optional, Sequence, Tuple, Union

import mmcv
import numpy as np
from mmengine.dist import master_only
from mmengine.fileio.io import isdir, isfile, join_path, list_dir_or_file
from mmengine.visualization import Visualizer

from mmaction.registry import VISBACKENDS, VISUALIZERS
from mmaction.structures import ActionDataSample


def _get_adaptive_scale(img_shape: Tuple[int, int],

                        min_scale: float = 0.3,

                        max_scale: float = 3.0) -> float:
    """Get adaptive scale according to frame shape.



    The target scale depends on the the short edge length of the frame. If the

    short edge length equals 224, the output is 1.0. And output linear scales

    according the short edge length.



    You can also specify the minimum scale and the maximum scale to limit the

    linear scale.



    Args:

        img_shape (Tuple[int, int]): The shape of the canvas frame.

        min_size (int): The minimum scale. Defaults to 0.3.

        max_size (int): The maximum scale. Defaults to 3.0.



    Returns:

        int: The adaptive scale.

    """
    short_edge_length = min(img_shape)
    scale = short_edge_length / 224.
    return min(max(scale, min_scale), max_scale)


@VISUALIZERS.register_module()
class ActionVisualizer(Visualizer):
    """Universal Visualizer for classification task.



    Args:

        name (str): Name of the instance. Defaults to 'visualizer'.

        vis_backends (list, optional): Visual backend config list.

            Defaults to None.

        save_dir (str, optional): Save file dir for all storage backends.

            If it is None, the backend storage will not save any data.

        fig_save_cfg (dict): Keyword parameters of figure for saving.

            Defaults to empty dict.

        fig_show_cfg (dict): Keyword parameters of figure for showing.

            Defaults to empty dict.



    Examples:

        >>> import torch

        >>> import decord

        >>> from pathlib import Path

        >>> from mmaction.structures import ActionDataSample, ActionVisualizer

        >>> from mmengine.structures import LabelData

        >>> # Example frame

        >>> video = decord.VideoReader('./demo/demo.mp4')

        >>> video = video.get_batch(range(32)).asnumpy()

        >>> # Example annotation

        >>> data_sample = ActionDataSample()

        >>> data_sample.gt_label = LabelData(item=torch.tensor([2]))

        >>> # Setup the visualizer

        >>> vis = ActionVisualizer(

        ...     save_dir="./outputs",

        ...     vis_backends=[dict(type='LocalVisBackend')])

        >>> # Set classes names

        >>> vis.dataset_meta = {'classes': ['running', 'standing', 'sitting']}

        >>> # Save the visualization result by the specified storage backends.

        >>> vis.add_datasample('demo', video)

        >>> assert Path('outputs/vis_data/demo/frames_0/1.png').exists()

        >>> assert Path('outputs/vis_data/demo/frames_0/2.png').exists()

        >>> # Save another visualization result with the same name.

        >>> vis.add_datasample('demo', video, step=1)

        >>> assert Path('outputs/vis_data/demo/frames_1/2.png').exists()

    """

    def __init__(

        self,

        name='visualizer',

        vis_backends: Optional[List[Dict]] = None,

        save_dir: Optional[str] = None,

        fig_save_cfg=dict(frameon=False),

        fig_show_cfg=dict(frameon=False)

    ) -> None:
        super().__init__(
            name=name,
            image=None,
            vis_backends=vis_backends,
            save_dir=save_dir,
            fig_save_cfg=fig_save_cfg,
            fig_show_cfg=fig_show_cfg)

    def _load_video(self,

                    video: Union[np.ndarray, Sequence[np.ndarray], str],

                    target_resolution: Optional[Tuple[int]] = None):
        """Load video from multiple source and convert to target resolution.



        Args:

            video (np.ndarray, str): The video to draw.

            target_resolution (Tuple[int], optional): Set to

                (desired_width desired_height) to have resized frames. If

                either dimension is None, the frames are resized by keeping

                the existing aspect ratio. Defaults to None.

        """
        if isinstance(video, np.ndarray) or isinstance(video, list):
            frames = video
        elif isinstance(video, str):
            # video file path
            if isfile(video):
                try:
                    import decord
                except ImportError:
                    raise ImportError(
                        'Please install decord to load video file.')
                video = decord.VideoReader(video)
                frames = [x.asnumpy()[..., ::-1] for x in video]
            # rawframes folder path
            elif isdir(video):
                frame_list = sorted(list_dir_or_file(video, list_dir=False))
                frames = [mmcv.imread(join_path(video, x)) for x in frame_list]
        else:
            raise TypeError(f'type of video {type(video)} not supported')

        if target_resolution is not None:
            w, h = target_resolution
            frame_h, frame_w, _ = frames[0].shape
            if w == -1:
                w = int(h / frame_h * frame_w)
            if h == -1:
                h = int(w / frame_w * frame_h)
            frames = [mmcv.imresize(f, (w, h)) for f in frames]

        return frames

    @master_only
    def add_datasample(self,

                       name: str,

                       video: Union[np.ndarray, Sequence[np.ndarray], str],

                       data_sample: Optional[ActionDataSample] = None,

                       draw_gt: bool = True,

                       draw_pred: bool = True,

                       draw_score: bool = True,

                       rescale_factor: Optional[float] = None,

                       show_frames: bool = False,

                       text_cfg: dict = dict(),

                       wait_time: float = 0.1,

                       out_path: Optional[str] = None,

                       out_type: str = 'img',

                       target_resolution: Optional[Tuple[int]] = None,

                       step: int = 0,

                       fps: int = 4) -> None:
        """Draw datasample and save to all backends.



        - If ``out_path`` is specified, all storage backends are ignored

          and save the videos to the ``out_path``.

        - If ``show_frames`` is True, plot the frames in a window sequentially,

          please confirm you are able to access the graphical interface.



        Args:

            name (str): The frame identifier.

            video (np.ndarray, str): The video to draw. supports decoded

                np.ndarray, video file path, rawframes folder path.

            data_sample (:obj:`ActionDataSample`, optional): The annotation of

                the frame. Defaults to None.

            draw_gt (bool): Whether to draw ground truth labels.

                Defaults to True.

            draw_pred (bool): Whether to draw prediction labels.

                Defaults to True.

            draw_score (bool): Whether to draw the prediction scores

                of prediction categories. Defaults to True.

            rescale_factor (float, optional): Rescale the frame by the rescale

                factor before visualization. Defaults to None.

            show_frames (bool): Whether to display the frames of the video.

                Defaults to False.

            text_cfg (dict): Extra text setting, which accepts

                arguments of :attr:`mmengine.Visualizer.draw_texts`.

                Defaults to an empty dict.

            wait_time (float): Delay in seconds. 0 is the special

                value that means "forever". Defaults to 0.1.

            out_path (str, optional): Extra folder to save the visualization

                result. If specified, the visualizer will only save the result

                frame to the out_path and ignore its storage backends.

                Defaults to None.

            out_type (str): Output format type, choose from 'img', 'gif',

                'video'. Defaults to ``'img'``.

            target_resolution (Tuple[int], optional): Set to

                (desired_width desired_height) to have resized frames. If

                either dimension is None, the frames are resized by keeping

                the existing aspect ratio. Defaults to None.

            step (int): Global step value to record. Defaults to 0.

            fps (int): Frames per second for saving video. Defaults to 4.

        """
        classes = None
        video = self._load_video(video, target_resolution)
        tol_video = len(video)

        if self.dataset_meta is not None:
            classes = self.dataset_meta.get('classes', None)

        if data_sample is None:
            data_sample = ActionDataSample()

        resulted_video = []
        for frame_idx, frame in enumerate(video):
            frame_name = 'frame %d of %s' % (frame_idx + 1, name)
            if rescale_factor is not None:
                frame = mmcv.imrescale(frame, rescale_factor)

            texts = ['Frame %d of total %d frames' % (frame_idx, tol_video)]
            self.set_image(frame)

            if draw_gt and 'gt_labels' in data_sample:
                gt_labels = data_sample.gt_label
                idx = gt_labels.tolist()
                class_labels = [''] * len(idx)
                if classes is not None:
                    class_labels = [f' ({classes[i]})' for i in idx]
                labels = [
                    str(idx[i]) + class_labels[i] for i in range(len(idx))
                ]
                prefix = 'Ground truth: '
                texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels))

            if draw_pred and 'pred_labels' in data_sample:
                pred_labels = data_sample.pred_labels
                idx = pred_labels.item.tolist()
                score_labels = [''] * len(idx)
                class_labels = [''] * len(idx)
                if draw_score and 'score' in pred_labels:
                    score_labels = [
                        f', {pred_labels.score[i].item():.2f}' for i in idx
                    ]

                if classes is not None:
                    class_labels = [f' ({classes[i]})' for i in idx]

                labels = [
                    str(idx[i]) + score_labels[i] + class_labels[i]
                    for i in range(len(idx))
                ]
                prefix = 'Prediction: '
                texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels))

            img_scale = _get_adaptive_scale(frame.shape[:2])
            _text_cfg = {
                'positions':
                np.array([(img_scale * 5, ) * 2]).astype(np.int32),
                'font_sizes': int(img_scale * 7),
                'font_families': 'monospace',
                'colors': 'white',
                'bboxes': dict(facecolor='black', alpha=0.5, boxstyle='Round'),
            }
            _text_cfg.update(text_cfg)
            self.draw_texts('\n'.join(texts), **_text_cfg)
            drawn_img = self.get_image()
            resulted_video.append(drawn_img)

        if show_frames:
            frame_wait_time = 1. / fps
            for frame_idx, drawn_img in enumerate(resulted_video):
                frame_name = 'frame %d of %s' % (frame_idx + 1, name)
                if frame_idx < len(resulted_video) - 1:
                    wait_time = frame_wait_time
                else:
                    wait_time = wait_time
                self.show(
                    drawn_img[:, :, ::-1],
                    win_name=frame_name,
                    wait_time=wait_time)

        resulted_video = np.array(resulted_video)
        if out_path is not None:
            save_dir, save_name = osp.split(out_path)
            vis_backend_cfg = dict(type='LocalVisBackend', save_dir=save_dir)
            tmp_local_vis_backend = VISBACKENDS.build(vis_backend_cfg)
            tmp_local_vis_backend.add_video(
                save_name,
                resulted_video,
                step=step,
                fps=fps,
                out_type=out_type)
        else:
            self.add_video(
                name, resulted_video, step=step, fps=fps, out_type=out_type)
        return resulted_video

    @master_only
    def add_video(

        self,

        name: str,

        image: np.ndarray,

        step: int = 0,

        fps: int = 4,

        out_type: str = 'img',

    ) -> None:
        """Record the image.



        Args:

            name (str): The image identifier.

            image (np.ndarray, optional): The image to be saved. The format

                should be RGB. Default to None.

            step (int): Global step value to record. Default to 0.

            fps (int): Frames per second for saving video. Defaults to 4.

            out_type (str): Output format type, choose from 'img', 'gif',

                'video'. Defaults to ``'img'``.

        """
        for vis_backend in self._vis_backends.values():
            vis_backend.add_video(
                name, image, step=step, fps=fps,
                out_type=out_type)  # type: ignore