isLinXu commited on
Commit
0d58b8d
·
1 Parent(s): 1a28c00
Files changed (2) hide show
  1. mmaction2_app.py +525 -0
  2. requirements.txt +20 -0
mmaction2_app.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.system("pip install -U openmim")
4
+ os.system("pip install mmengine")
5
+ os.system("pip install mmcv")
6
+ os.system("pip install mmdet")
7
+ os.system("pip install mmpose")
8
+ os.system("pip install mmaction2")
9
+
10
+ import argparse
11
+ import fnmatch
12
+ import os.path
13
+ import os.path as osp
14
+ from operator import itemgetter
15
+ from typing import Optional, Tuple
16
+
17
+ import torch
18
+ from mmengine import Config, DictAction
19
+
20
+ from mmaction.apis import inference_recognizer, init_recognizer
21
+ from mmaction.visualization import ActionVisualizer
22
+ from mim import download
23
+
24
+ import warnings
25
+ warnings.filterwarnings("ignore")
26
+
27
+ import gradio as gr
28
+
29
+ mmaction2_models_list = [
30
+ 'slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb',
31
+ 'slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava22-rgb',
32
+ 'slowonly-lfb-nl_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb',
33
+ 'slowonly-lfb-max_kinetics400-pretrained-r50_8xb12-4x16x1-20e_ava21-rgb',
34
+ 'slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb',
35
+ 'slowfast_kinetics400-pretrained-r50-context_8xb16-4x16x1-20e_ava21-rgb',
36
+ 'slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb',
37
+ 'slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb',
38
+ 'slowfast_kinetics400-pretrained-r50-temporal-max_8xb6-8x8x1-cosine-10e_ava22-rgb',
39
+ 'slowfast_r50-k400-pre-temporal-max-focal-alpha3-gamma1_8xb6-8x8x1-cosine-10e_ava22-rgb',
40
+ 'slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb',
41
+ 'slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb',
42
+ 'slowonly_kinetics700-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb',
43
+ 'slowonly_kinetics400-pretrained-r50-nl_8xb16-4x16x1-20e_ava21-rgb',
44
+ 'slowonly_kinetics400-pretrained-r50-nl_8xb16-8x8x1-20e_ava21-rgb',
45
+ 'slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb',
46
+ 'slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-8e_multisports-rgb',
47
+ 'vit-base-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb',
48
+ 'vit-large-p16_videomae-k400-pre_8xb8-16x4x1-20e-adamw_ava-kinetics-rgb',
49
+ 'c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb',
50
+ 'c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb',
51
+ 'c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb',
52
+ 'c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb',
53
+ 'c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb',
54
+ 'ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb',
55
+ 'ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb',
56
+ 'ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb',
57
+ 'ipcsn_r152_32x2x1-180e_kinetics400-rgb',
58
+ 'ircsn_r152_32x2x1-180e_kinetics400-rgb',
59
+ 'ipcsn_ig65m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb',
60
+ 'ipcsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb',
61
+ 'ircsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb',
62
+ 'i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb',
63
+ 'i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb',
64
+ 'i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb',
65
+ 'i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb',
66
+ 'i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb',
67
+ 'i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb',
68
+ 'mvit-small-p244_32xb16-16x4x1-200e_kinetics400-rgb_infer',
69
+ 'mvit-small-p244_32xb16-16x4x1-200e_kinetics400-rgb',
70
+ 'mvit-base-p244_32x3x1_kinetics400-rgb', 'mvit-large-p244_40x3x1_kinetics400-rgb',
71
+ 'mvit-small-p244_k400-pre_16xb16-u16-100e_sthv2-rgb_infer',
72
+ 'mvit-small-p244_k400-pre_16xb16-u16-100e_sthv2-rgb',
73
+ 'mvit-base-p244_u32_sthv2-rgb', 'mvit-large-p244_u40_sthv2-rgb',
74
+ 'mvit-small-p244_k400-maskfeat-pre_8xb32-16x4x1-100e_kinetics400-rgb',
75
+ 'slowonly_r50_8xb16-8x8x1-256e_imagenet-kinetics400-rgb',
76
+ 'r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb',
77
+ 'r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb',
78
+ 'slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb',
79
+ 'slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb',
80
+ 'slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb',
81
+ 'slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb',
82
+ 'slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb',
83
+ 'slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb',
84
+ 'slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb',
85
+ 'slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb',
86
+ 'slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb',
87
+ 'slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb',
88
+ 'slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb',
89
+ 'slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb',
90
+ 'slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb',
91
+ 'slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb',
92
+ 'slowonly_imagenet-pretrained-r50_32xb8-8x8x1-steplr-150e_kinetics710-rgb',
93
+ 'swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb',
94
+ 'swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb',
95
+ 'swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb',
96
+ 'swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb',
97
+ 'swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb',
98
+ 'swin-small-p244-w877_in1k-pre_32xb4-amp-32x2x1-30e_kinetics710-rgb',
99
+ 'tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb',
100
+ 'tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb',
101
+ 'tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb',
102
+ 'timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb',
103
+ 'timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb',
104
+ 'timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb',
105
+ 'tin_imagenet-pretrained-r50_8xb6-1x1x8-40e_sthv1-rgb',
106
+ 'tin_imagenet-pretrained-r50_8xb6-1x1x8-40e_sthv2-rgb',
107
+ 'tin_kinetics400-pretrained-tsm-r50_1x1x8-50e_kinetics400-rgb',
108
+ 'tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb',
109
+ 'tpn-slowonly_imagenet-pretrained-r50_8xb8-8x8x1-150e_kinetics400-rgb',
110
+ 'tpn-tsm_imagenet-pretrained-r50_8xb8-1x1x8-150e_sthv1-rgb',
111
+ 'trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb',
112
+ 'trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb',
113
+ 'tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb',
114
+ 'tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb',
115
+ 'tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb',
116
+ 'tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb',
117
+ 'tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb',
118
+ 'tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb',
119
+ 'tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb',
120
+ 'tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb',
121
+ 'tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb',
122
+ 'tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb',
123
+ 'tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb',
124
+ 'tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb',
125
+ 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb',
126
+ 'tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb',
127
+ 'tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb',
128
+ 'tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb',
129
+ 'tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb',
130
+ 'tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb',
131
+ 'tsn_imagenet-pretrained-swin-transformer_32xb8-1x1x8-50e_kinetics400-rgb',
132
+ 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb',
133
+ 'tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb',
134
+ 'uniformer-small_imagenet1k-pre_16x4x1_kinetics400-rgb',
135
+ 'uniformer-base_imagenet1k-pre_16x4x1_kinetics400-rgb',
136
+ 'uniformer-base_imagenet1k-pre_32x4x1_kinetics400-rgb',
137
+ 'uniformerv2-base-p16-res224_clip_8xb32-u8_kinetics400-rgb',
138
+ 'uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics400-rgb',
139
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics400-rgb',
140
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics400-rgb',
141
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics400-rgb',
142
+ 'uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics400-rgb',
143
+ 'uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics600-rgb',
144
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics600-rgb',
145
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics600-rgb',
146
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics600-rgb',
147
+ 'uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics600-rgb',
148
+ 'uniformerv2-base-p16-res224_clip-pre_8xb32-u8_kinetics700-rgb',
149
+ 'uniformerv2-base-p16-res224_clip-kinetics710-pre_8xb32-u8_kinetics700-rgb',
150
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u8_kinetics700-rgb',
151
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u16_kinetics700-rgb',
152
+ 'uniformerv2-large-p14-res224_clip-kinetics710-pre_u32_kinetics700-rgb',
153
+ 'uniformerv2-large-p14-res336_clip-kinetics710-pre_u32_kinetics700-rgb',
154
+ 'uniformerv2-base-p16-res224_clip-pre_u8_kinetics710-rgb',
155
+ 'uniformerv2-large-p14-res224_clip-pre_u8_kinetics710-rgb',
156
+ 'uniformerv2-large-p14-res336_clip-pre_u8_kinetics710-rgb',
157
+ 'uniformerv2-base-p16-res224_clip-kinetics710-kinetics-k400-pre_16xb32-u8_mitv1-rgb',
158
+ 'uniformerv2-large-p16-res224_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb',
159
+ 'uniformerv2-large-p16-res336_clip-kinetics710-kinetics-k400-pre_u8_mitv1-rgb',
160
+ 'vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400',
161
+ 'vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400',
162
+ 'vit-small-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400',
163
+ 'vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400',
164
+ 'x3d_s_13x6x1_facebook-kinetics400-rgb',
165
+ 'x3d_m_16x5x1_facebook-kinetics400-rgb',
166
+ 'tsn_r18_8xb320-64x1x1-100e_kinetics400-audio-feature',
167
+ 'bmn_2xb8-400x100-9e_activitynet-feature',
168
+ 'bsn_400x100_1xb16_20e_activitynet_feature (cuhk_mean_100)',
169
+ 'clip4clip_vit-base-p32-res224-clip-pre_8xb16-u12-5e_msrvtt-9k-rgb',
170
+ '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d',
171
+ '2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d',
172
+ '2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d',
173
+ '2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d',
174
+ '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d',
175
+ '2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d',
176
+ '2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d',
177
+ '2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d',
178
+ 'slowonly_r50_8xb16-u48-240e_gym-keypoint',
179
+ 'slowonly_r50_8xb16-u48-240e_gym-limb', 'slowonly_r50_8xb16-u48-240e_ntu60-xsub-keypoint',
180
+ 'slowonly_r50_8xb16-u48-240e_ntu60-xsub-limb',
181
+ 'slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_hmdb51-split1-keypoint',
182
+ 'slowonly_kinetics400-pretrained-r50_8xb16-u48-120e_ucf101-split1-keypoint',
183
+ 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d',
184
+ 'stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d',
185
+ 'stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d',
186
+ 'stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d',
187
+ 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d',
188
+ 'stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d',
189
+ 'stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d',
190
+ 'stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d',
191
+ 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d',
192
+ 'stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d',
193
+ 'stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d',
194
+ 'stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d',
195
+ 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d',
196
+ 'stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d',
197
+ 'stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d',
198
+ 'stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d',
199
+ 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d',
200
+ 'stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d',
201
+ 'stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d',
202
+ 'stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d',
203
+ 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d',
204
+ 'stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d',
205
+ 'stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d',
206
+ 'stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d'
207
+ ]
208
+
209
+ labelmap_list = [
210
+ 'kinetics_label_map_k400.txt', 'kinetics_label_map_k600.txt', 'kinetics_label_map_k700.txt',
211
+ 'kinetics_label_map_k710.txt', 'diving48_label_map.txt', 'gym_label_map.txt',
212
+ 'hmdb51_label_map.txt', 'jester_label_map.txt', 'mit_label_map.txt',
213
+ 'mmit_label_map.txt', 'multisports_label_map.txt', 'skeleton_label_map_gym99.txt',
214
+ 'skeleton_label_map_ntu60.txt', 'sthv1_label_map.txt', 'sthv2_label_map.txt', 'ucf101_label_map.txt',
215
+ ]
216
+
217
+
218
+
219
+ def parse_args():
220
+ parser = argparse.ArgumentParser(description='MMAction2 demo')
221
+ parser.add_argument('--config', help='test config file path')
222
+ parser.add_argument('--checkpoint', help='checkpoint file/url')
223
+ parser.add_argument('--video', help='video file/url or rawframes directory')
224
+ parser.add_argument('--label', help='label file')
225
+ parser.add_argument(
226
+ '--cfg-options',
227
+ nargs='+',
228
+ action=DictAction,
229
+ help='override some settings in the used config, the key-value pair '
230
+ 'in xxx=yyy format will be merged into config file. For example, '
231
+ "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
232
+ parser.add_argument(
233
+ '--device', type=str, default='cuda:0', help='CPU/CUDA device option')
234
+ parser.add_argument(
235
+ '--fps',
236
+ default=30,
237
+ type=int,
238
+ help='specify fps value of the output video when using rawframes to '
239
+ 'generate file')
240
+ parser.add_argument(
241
+ '--font-scale',
242
+ default=None,
243
+ type=float,
244
+ help='font scale of the text in output video')
245
+ parser.add_argument(
246
+ '--font-color',
247
+ default='white',
248
+ help='font color of the text in output video')
249
+ parser.add_argument(
250
+ '--target-resolution',
251
+ nargs=2,
252
+ default=None,
253
+ type=int,
254
+ help='Target resolution (w, h) for resizing the frames when using a '
255
+ 'video as input. If either dimension is set to -1, the frames are '
256
+ 'resized by keeping the existing aspect ratio')
257
+ parser.add_argument('--out-filename', default=None, help='output filename')
258
+ args = parser.parse_args()
259
+ return args
260
+
261
+
262
+ def get_output(
263
+ video_path: str,
264
+ out_filename: str,
265
+ data_sample: str,
266
+ labels: list,
267
+ fps: int = 30,
268
+ font_scale: Optional[str] = None,
269
+ font_color: str = 'white',
270
+ target_resolution: Optional[Tuple[int]] = None,
271
+ ) -> None:
272
+ """Get demo output using ``moviepy``.
273
+
274
+ This function will generate video file or gif file from raw video or
275
+ frames, by using ``moviepy``. For more information of some parameters,
276
+ you can refer to: https://github.com/Zulko/moviepy.
277
+
278
+ Args:
279
+ video_path (str): The video file path.
280
+ out_filename (str): Output filename for the generated file.
281
+ datasample (str): Predicted label of the generated file.
282
+ labels (list): Label list of current dataset.
283
+ fps (int): Number of picture frames to read per second. Defaults to 30.
284
+ font_scale (float): Font scale of the text. Defaults to None.
285
+ font_color (str): Font color of the text. Defaults to ``white``.
286
+ target_resolution (Tuple[int], optional): Set to
287
+ (desired_width desired_height) to have resized frames. If
288
+ either dimension is None, the frames are resized by keeping
289
+ the existing aspect ratio. Defaults to None.
290
+ """
291
+
292
+ if video_path.startswith(('http://', 'https://')):
293
+ raise NotImplementedError
294
+
295
+ # init visualizer
296
+ out_type = 'gif' if osp.splitext(out_filename)[1] == '.gif' else 'video'
297
+ visualizer = ActionVisualizer()
298
+ visualizer.dataset_meta = dict(classes=labels)
299
+
300
+ text_cfg = {'colors': font_color}
301
+ if font_scale is not None:
302
+ text_cfg.update({'font_sizes': font_scale})
303
+
304
+ visualizer.add_datasample(
305
+ out_filename,
306
+ video_path,
307
+ data_sample,
308
+ draw_pred=True,
309
+ draw_gt=False,
310
+ text_cfg=text_cfg,
311
+ fps=fps,
312
+ out_type=out_type,
313
+ out_path=osp.join('demo', out_filename),
314
+ target_resolution=target_resolution)
315
+
316
+
317
+ def clear_folder(folder_path):
318
+ import shutil
319
+ for filename in os.listdir(folder_path):
320
+ file_path = os.path.join(folder_path, filename)
321
+ try:
322
+ if os.path.isfile(file_path) or os.path.islink(file_path):
323
+ os.unlink(file_path)
324
+ elif os.path.isdir(file_path):
325
+ shutil.rmtree(file_path)
326
+ except Exception as e:
327
+ print(f"Failed to delete {file_path}. Reason: {e}")
328
+ print(f"Clear {folder_path} successfully.")
329
+
330
+
331
+ def download_cfg_checkpoint_model_name(model_name):
332
+ clear_folder("./checkpoint")
333
+ download(package='mmaction2',
334
+ configs=[model_name],
335
+ dest_root='./checkpoint')
336
+
337
+
338
+ def download_test_video():
339
+ # Images
340
+ torch.hub.download_url_to_file(
341
+ 'https://user-images.githubusercontent.com/59380685/267197615-0e372587-9f42-428a-8f3b-e4e6f17e8b1a.mp4',
342
+ 'demo.mp4')
343
+ torch.hub.download_url_to_file(
344
+ 'https://user-images.githubusercontent.com/59380685/267197620-56ee9562-ba3a-4ac4-977a-6df1cd693c39.mp4',
345
+ 'zelda.mp4')
346
+ torch.hub.download_url_to_file(
347
+ 'https://user-images.githubusercontent.com/59380685/267197784-b8bff32a-6655-4777-a3f4-49070d480a76.mp4',
348
+ 'test_video_structuralize.mp4')
349
+ torch.hub.download_url_to_file(
350
+ 'https://user-images.githubusercontent.com/59380685/267197798-9f88e0b9-1889-494a-a886-2e1e9ed43327.mp4',
351
+ 'shaowei.mp4')
352
+ torch.hub.download_url_to_file(
353
+ 'https://user-images.githubusercontent.com/59380685/267197804-953056d5-1351-4c5c-8459-f4e8f6815836.mp4',
354
+ 'demo_skeleton.mp4')
355
+ torch.hub.download_url_to_file(
356
+ 'https://user-images.githubusercontent.com/59380685/267197812-b4be4451-b694-4717-b8cf-545e36e506c1.mp4',
357
+ 'cxk.mp4')
358
+
359
+
360
+ def download_label_map_txt():
361
+ torch.hub.download_url_to_file(
362
+ 'https://github.com/isLinXu/issues/files/12579936/ucf101_label_map.txt',
363
+ 'ucf101_label_map.txt')
364
+ torch.hub.download_url_to_file(
365
+ 'https://github.com/isLinXu/issues/files/12579940/gym_label_map.txt',
366
+ 'gym_label_map.txt')
367
+ torch.hub.download_url_to_file(
368
+ 'https://github.com/isLinXu/issues/files/12579943/diving48_label_map.txt',
369
+ 'diving48_label_map.txt')
370
+ torch.hub.download_url_to_file(
371
+ 'https://github.com/isLinXu/issues/files/12579947/hmdb51_label_map.txt',
372
+ 'hmdb51_label_map.txt')
373
+ torch.hub.download_url_to_file(
374
+ 'https://github.com/isLinXu/issues/files/12579949/jester_label_map.txt',
375
+ 'jester_label_map.txt')
376
+ torch.hub.download_url_to_file(
377
+ 'https://github.com/isLinXu/issues/files/12579951/kinetics_label_map_k400.txt',
378
+ 'kinetics_label_map_k400.txt')
379
+ torch.hub.download_url_to_file(
380
+ 'https://github.com/isLinXu/issues/files/12579952/kinetics_label_map_k600.txt',
381
+ 'kinetics_label_map_k600.txt')
382
+ torch.hub.download_url_to_file(
383
+ 'https://github.com/isLinXu/issues/files/12579953/kinetics_label_map_k700.txt',
384
+ 'kinetics_label_map_k700.txt')
385
+ torch.hub.download_url_to_file(
386
+ 'https://github.com/isLinXu/issues/files/12579954/kinetics_label_map_k710.txt',
387
+ 'kinetics_label_map_k710.txt')
388
+ torch.hub.download_url_to_file(
389
+ 'https://github.com/isLinXu/issues/files/12579955/mit_label_map.txt',
390
+ 'mit_label_map.txt')
391
+ torch.hub.download_url_to_file(
392
+ 'https://github.com/isLinXu/issues/files/12579957/mmit_label_map.txt',
393
+ 'mmit_label_map.txt')
394
+ torch.hub.download_url_to_file(
395
+ 'https://github.com/isLinXu/issues/files/12579960/multisports_label_map.txt',
396
+ 'multisports_label_map.txt')
397
+ torch.hub.download_url_to_file(
398
+ 'https://github.com/isLinXu/issues/files/12579961/skeleton_label_map_ntu60.txt',
399
+ 'mmit_label_map.txt')
400
+ torch.hub.download_url_to_file(
401
+ 'https://github.com/isLinXu/issues/files/12579960/multisports_label_map.txt',
402
+ 'multisports_label_map.txt')
403
+ torch.hub.download_url_to_file(
404
+ 'https://github.com/isLinXu/issues/files/12579961/skeleton_label_map_ntu60.txt',
405
+ 'skeleton_label_map_ntu60.txt')
406
+ torch.hub.download_url_to_file(
407
+ 'https://github.com/isLinXu/issues/files/12579962/skeleton_label_map_gym99.txt',
408
+ 'skeleton_label_map_gym99.txt')
409
+ torch.hub.download_url_to_file(
410
+ 'https://github.com/isLinXu/issues/files/12579965/sthv1_label_map.txt',
411
+ 'sthv1_label_map.txt')
412
+ torch.hub.download_url_to_file(
413
+ 'https://github.com/isLinXu/issues/files/12579967/sthv2_label_map.txt',
414
+ 'sthv2_label_map.txt')
415
+
416
+
417
+ def mmaction_inference(video, mmaction2_models, device, label, out_filename):
418
+ args = parse_args()
419
+ path = "./checkpoint"
420
+ if not os.path.exists(path):
421
+ os.makedirs(path)
422
+ download_cfg_checkpoint_model_name(mmaction2_models)
423
+ config = [f for f in os.listdir(path) if fnmatch.fnmatch(f, "*.py")][0]
424
+ config = path + "/" + config
425
+
426
+ checkpoint = [f for f in os.listdir(path) if fnmatch.fnmatch(f, "*.pth")][0]
427
+ checkpoint = path + "/" + checkpoint
428
+
429
+ # args setting
430
+ args.config = config
431
+ args.checkpoint = checkpoint
432
+ args.video = video
433
+ args.device = device
434
+ args.label = label
435
+ args.out_filename = out_filename
436
+
437
+ cfg = Config.fromfile(args.config)
438
+ if args.cfg_options is not None:
439
+ cfg.merge_from_dict(args.cfg_options)
440
+
441
+ # Build the recognizer from a config file and checkpoint file/url
442
+ model = init_recognizer(cfg, args.checkpoint, device=args.device)
443
+ pred_result = inference_recognizer(model, args.video)
444
+
445
+ pred_scores = pred_result.pred_scores.item.tolist()
446
+ score_tuples = tuple(zip(range(len(pred_scores)), pred_scores))
447
+ score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)
448
+ top5_label = score_sorted[:5]
449
+
450
+ labels = open(args.label).readlines()
451
+ labels = [x.strip() for x in labels]
452
+ results = [(labels[k[0]], k[1]) for k in top5_label]
453
+
454
+ print('The top-5 labels with corresponding scores are:')
455
+ for result in results:
456
+ print(f'{result[0]}: ', result[1])
457
+
458
+ if args.out_filename is not None:
459
+
460
+ if args.target_resolution is not None:
461
+ if args.target_resolution[0] == -1:
462
+ assert isinstance(args.target_resolution[1], int)
463
+ assert args.target_resolution[1] > 0
464
+ if args.target_resolution[1] == -1:
465
+ assert isinstance(args.target_resolution[0], int)
466
+ assert args.target_resolution[0] > 0
467
+ args.target_resolution = tuple(args.target_resolution)
468
+
469
+ get_output(
470
+ args.video,
471
+ args.out_filename,
472
+ pred_result,
473
+ labels,
474
+ fps=args.fps,
475
+ font_scale=args.font_scale,
476
+ font_color=args.font_color,
477
+ target_resolution=args.target_resolution)
478
+ save_dir_path = "demo/" + args.out_filename
479
+ if os.path.exists(save_dir_path):
480
+ print(f'File saved as {save_dir_path}')
481
+ return save_dir_path
482
+ else:
483
+ base_name = os.path.basename(args.video)
484
+ print(f'File saved as {base_name}')
485
+ return base_name
486
+
487
+
488
+ if __name__ == '__main__':
489
+ print("Downloading test video and model...")
490
+ download_test_video()
491
+ print("Downloading label map txt...")
492
+ download_label_map_txt()
493
+
494
+ input_video = gr.Video(type='mp4', label="Original video")
495
+ mmaction2_models = gr.inputs.Dropdown(label="MMAction2 models", choices=[x for x in mmaction2_models_list],
496
+ default='tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb')
497
+ device = gr.inputs.Radio(label="Device", choices=["cpu", "cuda:0"], default="cpu")
498
+ # label = gr.inputs.Textbox(label="Label file", default="label_map/kinetics/label_map_k400.txt")
499
+ label = gr.inputs.Dropdown(label="Label file", choices=[x for x in labelmap_list], default='kinetics_label_map_k400.txt')
500
+ out_filename = gr.inputs.Textbox(label="Output filename", default="demo_dst.mp4")
501
+ output_video = gr.Video(label="Output video")
502
+
503
+ examples = [['zelda.mp4', 'tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb', "cpu",
504
+ 'kinetics_label_map_k400.txt', "demo_dst.mp4"],
505
+ ['shaowei.mp4', 'slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb', "cpu",
506
+ 'kinetics_label_map_k400.txt', "demo_dst.mp4"],
507
+ ['baoguo.mp4', 'slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb', "cpu",
508
+ 'kinetics_label_map_k400.txt', "demo_dst.mp4"],
509
+ ['cxk.mp4', 'slowfast-acrn_kinetics400-pretrained-r50_8xb8-8x8x1-cosine-10e_ava21-rgb', "cpu",
510
+ 'kinetics_label_map_k400.txt', "demo_dst.mp4"]
511
+ ]
512
+
513
+ title = "MMAction2 web demo"
514
+ description = "<div align='center'><img src='https://raw.githubusercontent.com/open-mmlab/mmaction2/main/resources/mmaction2_logo.png' width='450''/><div>" \
515
+ "<p style='text-align: center'><a href='https://github.com/open-mmlab/mmaction2'>MMAction2</a> MMAction2 是一款基于 PyTorch 开发的行为识别开源工具包,是 open-mmlab 项目的一个子项目。" \
516
+ "OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark.</p>"
517
+ article = "<p style='text-align: center'><a href='https://github.com/open-mmlab/mmaction2'>MMAction2</a></p>" \
518
+ "<p style='text-align: center'><a href='https://github.com/isLinXu'>gradio build by gatilin</a></a></p>"
519
+
520
+ # gradio demo
521
+ iface = gr.Interface(fn=mmaction_inference,
522
+ inputs=[input_video, mmaction2_models, device, label, out_filename],
523
+ outputs=output_video,examples=examples,
524
+ title=title, description=description, article=article)
525
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wget~=3.2
2
+ opencv-python~=4.6.0.66
3
+ numpy~=1.23.0
4
+ torch~=1.13.1
5
+ torchvision~=0.14.1
6
+ pillow~=9.4.0
7
+ gradio~=3.42.0
8
+ ultralytics~=8.0.169
9
+ pyyaml~=6.0
10
+ wandb~=0.13.11
11
+ tqdm~=4.65.0
12
+ matplotlib~=3.7.1
13
+ pandas~=2.0.0
14
+ seaborn~=0.12.2
15
+ requests~=2.31.0
16
+ psutil~=5.9.4
17
+ thop~=0.1.1-2209072238
18
+ timm~=0.9.2
19
+ super-gradients~=3.2.0
20
+ openmim