maverickrzw commited on
Commit
2402804
·
1 Parent(s): 98a47fe
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mmdetection +0 -1
  2. mmdetection/.circleci/config.yml +34 -0
  3. mmdetection/.circleci/docker/Dockerfile +11 -0
  4. mmdetection/.circleci/test.yml +204 -0
  5. mmdetection/.dev_scripts/batch_test_list.py +545 -0
  6. mmdetection/.dev_scripts/batch_train_list.txt +80 -0
  7. mmdetection/.dev_scripts/benchmark_filter.py +167 -0
  8. mmdetection/.dev_scripts/benchmark_full_models.txt +96 -0
  9. mmdetection/.dev_scripts/benchmark_inference_fps.py +171 -0
  10. mmdetection/.dev_scripts/benchmark_options.py +16 -0
  11. mmdetection/.dev_scripts/benchmark_test.py +115 -0
  12. mmdetection/.dev_scripts/benchmark_test_image.py +134 -0
  13. mmdetection/.dev_scripts/benchmark_train.py +178 -0
  14. mmdetection/.dev_scripts/benchmark_train_models.txt +20 -0
  15. mmdetection/.dev_scripts/benchmark_valid_flops.py +295 -0
  16. mmdetection/.dev_scripts/check_links.py +157 -0
  17. mmdetection/.dev_scripts/convert_test_benchmark_script.py +114 -0
  18. mmdetection/.dev_scripts/convert_train_benchmark_script.py +104 -0
  19. mmdetection/.dev_scripts/covignore.cfg +5 -0
  20. mmdetection/.dev_scripts/diff_coverage_test.sh +40 -0
  21. mmdetection/.dev_scripts/download_checkpoints.py +83 -0
  22. mmdetection/.dev_scripts/gather_models.py +308 -0
  23. mmdetection/.dev_scripts/gather_test_benchmark_metric.py +96 -0
  24. mmdetection/.dev_scripts/gather_train_benchmark_metric.py +151 -0
  25. mmdetection/.dev_scripts/linter.sh +3 -0
  26. mmdetection/.dev_scripts/test_benchmark.sh +157 -0
  27. mmdetection/.dev_scripts/test_init_backbone.py +178 -0
  28. mmdetection/.dev_scripts/train_benchmark.sh +164 -0
  29. mmdetection/.gitignore +123 -0
  30. mmdetection/.owners.yml +14 -0
  31. mmdetection/.pre-commit-config-zh-cn.yaml +61 -0
  32. mmdetection/.pre-commit-config.yaml +50 -0
  33. mmdetection/.readthedocs.yml +14 -0
  34. mmdetection/CITATION.cff +8 -0
  35. mmdetection/LICENSE +203 -0
  36. mmdetection/MANIFEST.in +7 -0
  37. mmdetection/README.md +442 -0
  38. mmdetection/README_zh-CN.md +461 -0
  39. mmdetection/configs/_base_/datasets/ade20k_instance.py +53 -0
  40. mmdetection/configs/_base_/datasets/ade20k_panoptic.py +38 -0
  41. mmdetection/configs/_base_/datasets/ade20k_semantic.py +48 -0
  42. mmdetection/configs/_base_/datasets/cityscapes_detection.py +84 -0
  43. mmdetection/configs/_base_/datasets/cityscapes_instance.py +113 -0
  44. mmdetection/configs/_base_/datasets/coco_caption.py +60 -0
  45. mmdetection/configs/_base_/datasets/coco_detection.py +95 -0
  46. mmdetection/configs/_base_/datasets/coco_instance.py +98 -0
  47. mmdetection/configs/_base_/datasets/coco_instance_semantic.py +78 -0
  48. mmdetection/configs/_base_/datasets/coco_panoptic.py +94 -0
  49. mmdetection/configs/_base_/datasets/coco_semantic.py +78 -0
  50. mmdetection/configs/_base_/datasets/ct_detection.py +96 -0
mmdetection DELETED
@@ -1 +0,0 @@
1
- Subproject commit bce85dc84dcae85bab87c171481acff7b3df25ad
 
 
mmdetection/.circleci/config.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2.1
2
+
3
+ # this allows you to use CircleCI's dynamic configuration feature
4
+ setup: true
5
+
6
+ # the path-filtering orb is required to continue a pipeline based on
7
+ # the path of an updated fileset
8
+ orbs:
9
+ path-filtering: circleci/path-filtering@0.1.2
10
+
11
+ workflows:
12
+ # the always-run workflow is always triggered, regardless of the pipeline parameters.
13
+ always-run:
14
+ jobs:
15
+ # the path-filtering/filter job determines which pipeline
16
+ # parameters to update.
17
+ - path-filtering/filter:
18
+ name: check-updated-files
19
+ # 3-column, whitespace-delimited mapping. One mapping per
20
+ # line:
21
+ # <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
22
+ mapping: |
23
+ mmdet/.* lint_only false
24
+ requirements/.* lint_only false
25
+ tests/.* lint_only false
26
+ tools/.* lint_only false
27
+ configs/.* lint_only false
28
+ .circleci/.* lint_only false
29
+ base-revision: dev-3.x
30
+ # this is the path of the configuration we should trigger once
31
+ # path filtering and pipeline parameter value updates are
32
+ # complete. In this case, we are using the parent dynamic
33
+ # configuration itself.
34
+ config-path: .circleci/test.yml
mmdetection/.circleci/docker/Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG PYTORCH="1.8.1"
2
+ ARG CUDA="10.2"
3
+ ARG CUDNN="7"
4
+
5
+ FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
6
+
7
+ # To fix GPG key error when running apt-get update
8
+ RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
9
+ RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
10
+
11
+ RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
mmdetection/.circleci/test.yml ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2.1
2
+
3
+ # the default pipeline parameters, which will be updated according to
4
+ # the results of the path-filtering orb
5
+ parameters:
6
+ lint_only:
7
+ type: boolean
8
+ default: true
9
+
10
+ jobs:
11
+ lint:
12
+ docker:
13
+ - image: cimg/python:3.7.4
14
+ steps:
15
+ - checkout
16
+ - run:
17
+ name: Install pre-commit hook
18
+ command: |
19
+ pip install pre-commit
20
+ pre-commit install
21
+ - run:
22
+ name: Linting
23
+ command: pre-commit run --all-files
24
+ - run:
25
+ name: Check docstring coverage
26
+ command: |
27
+ pip install interrogate
28
+ interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 85 mmdet
29
+
30
+ build_cpu:
31
+ parameters:
32
+ # The python version must match available image tags in
33
+ # https://circleci.com/developer/images/image/cimg/python
34
+ python:
35
+ type: string
36
+ torch:
37
+ type: string
38
+ torchvision:
39
+ type: string
40
+ docker:
41
+ - image: cimg/python:<< parameters.python >>
42
+ resource_class: large
43
+ steps:
44
+ - checkout
45
+ - run:
46
+ name: Install Libraries
47
+ command: |
48
+ sudo apt-get update
49
+ sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
50
+ - run:
51
+ name: Configure Python & pip
52
+ command: |
53
+ pip install --upgrade pip
54
+ pip install wheel
55
+ - run:
56
+ name: Install PyTorch
57
+ command: |
58
+ python -V
59
+ python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
60
+ - when:
61
+ condition:
62
+ equal: ["3.9.0", << parameters.python >>]
63
+ steps:
64
+ - run: pip install "protobuf <= 3.20.1" && sudo apt-get update && sudo apt-get -y install libprotobuf-dev protobuf-compiler cmake
65
+ - run: pip install dsdl
66
+ - run:
67
+ name: Install mmdet dependencies
68
+ # numpy may be downgraded after building pycocotools, which causes `ImportError: numpy.core.multiarray failed to import`
69
+ # force reinstall pycocotools to ensure pycocotools being built under the currenct numpy
70
+ command: |
71
+ python -m pip install git+ssh://git@github.com/open-mmlab/mmengine.git@main
72
+ pip install -U openmim
73
+ mim install 'mmcv >= 2.0.0rc4'
74
+ pip install -r requirements/tests.txt -r requirements/optional.txt
75
+ pip install --force-reinstall pycocotools
76
+ pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
77
+ pip install -r requirements/tracking.txt
78
+ pip install git+https://github.com/cocodataset/panopticapi.git
79
+ pip install git+https://github.com/JonathonLuiten/TrackEval.git
80
+ - run:
81
+ name: Build and install
82
+ command: |
83
+ pip install -e .
84
+ - run:
85
+ name: Run unittests
86
+ command: |
87
+ python -m coverage run --branch --source mmdet -m pytest tests/
88
+ python -m coverage xml
89
+ python -m coverage report -m
90
+
91
+ build_cuda:
92
+ parameters:
93
+ torch:
94
+ type: string
95
+ cuda:
96
+ type: enum
97
+ enum: ["11.1", "11.7"]
98
+ cudnn:
99
+ type: integer
100
+ default: 8
101
+ machine:
102
+ image: ubuntu-2004-cuda-11.4:202110-01
103
+ # docker_layer_caching: true
104
+ resource_class: gpu.nvidia.small
105
+ steps:
106
+ - checkout
107
+ - run:
108
+ # CLoning repos in VM since Docker doesn't have access to the private key
109
+ name: Clone Repos
110
+ command: |
111
+ git clone -b main --depth 1 ssh://git@github.com/open-mmlab/mmengine.git /home/circleci/mmengine
112
+ - run:
113
+ name: Build Docker image
114
+ command: |
115
+ docker build .circleci/docker -t mmdetection:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
116
+ docker run --gpus all -t -d -v /home/circleci/project:/mmdetection -v /home/circleci/mmengine:/mmengine -w /mmdetection --name mmdetection mmdetection:gpu
117
+ docker exec mmdetection apt-get install -y git
118
+ - run:
119
+ name: Install mmdet dependencies
120
+ command: |
121
+ docker exec mmdetection pip install -e /mmengine
122
+ docker exec mmdetection pip install -U openmim
123
+ docker exec mmdetection mim install 'mmcv >= 2.0.0rc4'
124
+ docker exec mmdetection pip install -r requirements/tests.txt -r requirements/optional.txt
125
+ docker exec mmdetection pip install pycocotools
126
+ docker exec mmdetection pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
127
+ docker exec mmdetection pip install -r requirements/tracking.txt
128
+ docker exec mmdetection pip install git+https://github.com/cocodataset/panopticapi.git
129
+ docker exec mmdetection pip install git+https://github.com/JonathonLuiten/TrackEval.git
130
+ docker exec mmdetection python -c 'import mmcv; print(mmcv.__version__)'
131
+ - run:
132
+ name: Build and install
133
+ command: |
134
+ docker exec mmdetection pip install -e .
135
+ - run:
136
+ name: Run unittests
137
+ command: |
138
+ docker exec mmdetection python -m pytest tests/
139
+
140
+ workflows:
141
+ pr_stage_lint:
142
+ when: << pipeline.parameters.lint_only >>
143
+ jobs:
144
+ - lint:
145
+ name: lint
146
+ filters:
147
+ branches:
148
+ ignore:
149
+ - dev-3.x
150
+ pr_stage_test:
151
+ when:
152
+ not: << pipeline.parameters.lint_only >>
153
+ jobs:
154
+ - lint:
155
+ name: lint
156
+ filters:
157
+ branches:
158
+ ignore:
159
+ - dev-3.x
160
+ - build_cpu:
161
+ name: minimum_version_cpu
162
+ torch: 1.8.0
163
+ torchvision: 0.9.0
164
+ python: 3.7.16
165
+ requires:
166
+ - lint
167
+ - build_cpu:
168
+ name: maximum_version_cpu
169
+ torch: 2.0.0
170
+ torchvision: 0.15.1
171
+ python: 3.9.0
172
+ requires:
173
+ - minimum_version_cpu
174
+ - hold:
175
+ type: approval
176
+ requires:
177
+ - maximum_version_cpu
178
+ - build_cuda:
179
+ name: mainstream_version_gpu
180
+ torch: 1.8.1
181
+ # Use double quotation mark to explicitly specify its type
182
+ # as string instead of number
183
+ cuda: "11.1"
184
+ requires:
185
+ - hold
186
+ - build_cuda:
187
+ name: maximum_version_gpu
188
+ torch: 2.0.0
189
+ cuda: "11.7"
190
+ cudnn: 8
191
+ requires:
192
+ - hold
193
+ merge_stage_test:
194
+ when:
195
+ not: << pipeline.parameters.lint_only >>
196
+ jobs:
197
+ - build_cuda:
198
+ name: minimum_version_gpu
199
+ torch: 1.8.0
200
+ cuda: "11.1"
201
+ filters:
202
+ branches:
203
+ only:
204
+ - dev-3.x
mmdetection/.dev_scripts/batch_test_list.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+
3
+ # missing wider_face/timm_example/strong_baselines/simple_copy_paste/
4
+ # selfsup_pretrain/seesaw_loss/pascal_voc/openimages/lvis/ld/lad/cityscapes/deepfashion
5
+
6
+ # yapf: disable
7
+ atss = dict(
8
+ config='configs/atss/atss_r50_fpn_1x_coco.py',
9
+ checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
10
+ url='https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', # noqa
11
+ eval='bbox',
12
+ metric=dict(bbox_mAP=39.4),
13
+ )
14
+ autoassign = dict(
15
+ config='configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py',
16
+ checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
17
+ url='https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', # noqa
18
+ eval='bbox',
19
+ metric=dict(bbox_mAP=40.4),
20
+ )
21
+ carafe = dict(
22
+ config='configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py',
23
+ checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
24
+ url='https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
25
+ eval='bbox',
26
+ metric=dict(bbox_mAP=38.6),
27
+ )
28
+ cascade_rcnn = [
29
+ dict(
30
+ config='configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
31
+ checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
32
+ eval='bbox',
33
+ url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', # noqa
34
+ metric=dict(bbox_mAP=40.3),
35
+ ),
36
+ dict(
37
+ config='configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
38
+ checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
39
+ url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', # noqa
40
+ eval=['bbox', 'segm'],
41
+ metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
42
+ ),
43
+ ]
44
+ cascade_rpn = dict(
45
+ config='configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py', # noqa
46
+ checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
47
+ url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', # noqa
48
+ eval='bbox',
49
+ metric=dict(bbox_mAP=40.4),
50
+ )
51
+ centernet = dict(
52
+ config='configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py',
53
+ checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
54
+ url='https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
55
+ eval='bbox',
56
+ metric=dict(bbox_mAP=29.5),
57
+ )
58
+ centripetalnet = dict(
59
+ config='configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py', # noqa
60
+ checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
61
+ url='https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
62
+ eval='bbox',
63
+ metric=dict(bbox_mAP=44.7),
64
+ )
65
+ convnext = dict(
66
+ config='configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py', # noqa
67
+ checkpoint='cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
68
+ url='https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
69
+ eval=['bbox', 'segm'],
70
+ metric=dict(bbox_mAP=51.8, segm_mAP=44.8),
71
+ )
72
+ cornernet = dict(
73
+ config='configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py',
74
+ checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
75
+ url='https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
76
+ eval='bbox',
77
+ metric=dict(bbox_mAP=41.2),
78
+ )
79
+ dcn = dict(
80
+ config='configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py',
81
+ checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
82
+ url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', # noqa
83
+ eval='bbox',
84
+ metric=dict(bbox_mAP=41.3),
85
+ )
86
+ dcnv2 = dict(
87
+ config='configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py',
88
+ checkpoint='faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth',
89
+ url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth', # noqa
90
+ eval='bbox',
91
+ metric=dict(bbox_mAP=38.7),
92
+ )
93
+ ddod = dict(
94
+ config='configs/ddod/ddod_r50_fpn_1x_coco.py',
95
+ checkpoint='ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth',
96
+ url='https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth', # noqa
97
+ eval='bbox',
98
+ metric=dict(bbox_mAP=41.7),
99
+ )
100
+ deformable_detr = dict(
101
+ config='configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py',
102
+ checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
103
+ url='https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
104
+ eval='bbox',
105
+ metric=dict(bbox_mAP=44.5),
106
+ )
107
+ detectors = dict(
108
+ config='configs/detectors/detectors_htc-r50_1x_coco.py',
109
+ checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
110
+ url='https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth', # noqa
111
+ eval=['bbox', 'segm'],
112
+ metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
113
+ )
114
+ detr = dict(
115
+ config='configs/detr/detr_r50_8xb2-150e_coco.py',
116
+ checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
117
+ url='https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', # noqa
118
+ eval='bbox',
119
+ metric=dict(bbox_mAP=40.1),
120
+ )
121
+ double_heads = dict(
122
+ config='configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py',
123
+ checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
124
+ url='https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', # noqa
125
+ eval='bbox',
126
+ metric=dict(bbox_mAP=40.0),
127
+ )
128
+ dyhead = dict(
129
+ config='configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py',
130
+ checkpoint='atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth',
131
+ url='https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth', # noqa
132
+ eval='bbox',
133
+ metric=dict(bbox_mAP=43.3),
134
+ )
135
+ dynamic_rcnn = dict(
136
+ config='configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py',
137
+ checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
138
+ url='https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth', # noqa
139
+ eval='bbox',
140
+ metric=dict(bbox_mAP=38.9),
141
+ )
142
+ efficientnet = dict(
143
+ config='configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py',
144
+ checkpoint='retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
145
+ url='https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
146
+ eval='bbox',
147
+ metric=dict(bbox_mAP=40.5),
148
+ )
149
+ empirical_attention = dict(
150
+ config='configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py', # noqa
151
+ checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
152
+ url='https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
153
+ eval='bbox',
154
+ metric=dict(bbox_mAP=40.0),
155
+ )
156
+ faster_rcnn = dict(
157
+ config='configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
158
+ checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
159
+ url='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', # noqa
160
+ eval='bbox',
161
+ metric=dict(bbox_mAP=37.4),
162
+ )
163
+ fcos = dict(
164
+ config='configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py', # noqa
165
+ checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
166
+ url='https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
167
+ eval='bbox',
168
+ metric=dict(bbox_mAP=38.7),
169
+ )
170
+ foveabox = dict(
171
+ config='configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py',
172
+ checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
173
+ url='https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', # noqa
174
+ eval='bbox',
175
+ metric=dict(bbox_mAP=37.9),
176
+ )
177
+ fpg = dict(
178
+ config='configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py',
179
+ checkpoint='mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
180
+ url='https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
181
+ eval=['bbox', 'segm'],
182
+ metric=dict(bbox_mAP=43.0, segm_mAP=38.1),
183
+ )
184
+ free_anchor = dict(
185
+ config='configs/free_anchor/freeanchor_r50_fpn_1x_coco.py',
186
+ checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
187
+ url='https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', # noqa
188
+ eval='bbox',
189
+ metric=dict(bbox_mAP=38.7),
190
+ )
191
+ fsaf = dict(
192
+ config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
193
+ checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
194
+ url='https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth', # noqa
195
+ eval='bbox',
196
+ metric=dict(bbox_mAP=37.4),
197
+ )
198
+ gcnet = dict(
199
+ config='configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py', # noqa
200
+ checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
201
+ url='https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
202
+ eval=['bbox', 'segm'],
203
+ metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
204
+ )
205
+ gfl = dict(
206
+ config='configs/gfl/gfl_r50_fpn_1x_coco.py',
207
+ checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
208
+ url='https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', # noqa
209
+ eval='bbox',
210
+ metric=dict(bbox_mAP=40.2),
211
+ )
212
+ ghm = dict(
213
+ config='configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py',
214
+ checkpoint='retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth',
215
+ url='https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth', # noqa
216
+ eval='bbox',
217
+ metric=dict(bbox_mAP=37.0),
218
+ )
219
+ gn = dict(
220
+ config='configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py',
221
+ checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
222
+ url='https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', # noqa
223
+ eval=['bbox', 'segm'],
224
+ metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
225
+ )
226
+ gn_ws = dict(
227
+ config='configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py',
228
+ checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
229
+ url='https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', # noqa
230
+ eval='bbox',
231
+ metric=dict(bbox_mAP=39.7),
232
+ )
233
+ grid_rcnn = dict(
234
+ config='configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py',
235
+ checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
236
+ url='https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', # noqa
237
+ eval='bbox',
238
+ metric=dict(bbox_mAP=40.4),
239
+ )
240
+ groie = dict(
241
+ config='configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py',
242
+ checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
243
+ url='https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
244
+ eval='bbox',
245
+ metric=dict(bbox_mAP=38.3),
246
+ )
247
+ guided_anchoring = dict(
248
+ config='configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py', # noqa
249
+ checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
250
+ url='https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', # noqa
251
+ eval='bbox',
252
+ metric=dict(bbox_mAP=36.9),
253
+ )
254
+ hrnet = dict(
255
+ config='configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py',
256
+ checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
257
+ url='https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', # noqa
258
+ eval='bbox',
259
+ metric=dict(bbox_mAP=36.9),
260
+ )
261
+ htc = dict(
262
+ config='configs/htc/htc_r50_fpn_1x_coco.py',
263
+ checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
264
+ url='https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth', # noqa
265
+ eval=['bbox', 'segm'],
266
+ metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
267
+ )
268
+ instaboost = dict(
269
+ config='configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py',
270
+ checkpoint='mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth',
271
+ url='https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth', # noqa
272
+ eval=['bbox', 'segm'],
273
+ metric=dict(bbox_mAP=40.6, segm_mAP=36.6),
274
+ )
275
+ libra_rcnn = dict(
276
+ config='configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py',
277
+ checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
278
+ url='https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', # noqa
279
+ eval='bbox',
280
+ metric=dict(bbox_mAP=38.3),
281
+ )
282
+ mask2former = dict(
283
+ config='configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py',
284
+ checkpoint='mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
285
+ url='https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
286
+ eval=['bbox', 'segm', 'PQ'],
287
+ metric=dict(PQ=51.9, bbox_mAP=44.8, segm_mAP=41.9),
288
+ )
289
+ mask_rcnn = dict(
290
+ config='configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
291
+ checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
292
+ url='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', # noqa
293
+ eval=['bbox', 'segm'],
294
+ metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
295
+ )
296
+ maskformer = dict(
297
+ config='configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py',
298
+ checkpoint='maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
299
+ url='https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
300
+ eval='PQ',
301
+ metric=dict(PQ=46.9),
302
+ )
303
+ ms_rcnn = dict(
304
+ config='configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py',
305
+ checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
306
+ url='https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', # noqa
307
+ eval=['bbox', 'segm'],
308
+ metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
309
+ )
310
+ nas_fcos = dict(
311
+ config='configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py', # noqa
312
+ checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
313
+ url='https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
314
+ eval='bbox',
315
+ metric=dict(bbox_mAP=39.4),
316
+ )
317
+ nas_fpn = dict(
318
+ config='configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py',
319
+ checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
320
+ url='https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', # noqa
321
+ eval='bbox',
322
+ metric=dict(bbox_mAP=40.5),
323
+ )
324
+ paa = dict(
325
+ config='configs/paa/paa_r50_fpn_1x_coco.py',
326
+ checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
327
+ url='https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth', # noqa
328
+ eval='bbox',
329
+ metric=dict(bbox_mAP=40.4),
330
+ )
331
+ pafpn = dict(
332
+ config='configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py',
333
+ checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
334
+ url='https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
335
+ eval='bbox',
336
+ metric=dict(bbox_mAP=37.5),
337
+ )
338
+ panoptic_fpn = dict(
339
+ config='configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py',
340
+ checkpoint='panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth',
341
+ url='https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth', # noqa
342
+ eval='PQ',
343
+ metric=dict(PQ=40.2),
344
+ )
345
+ pisa = dict(
346
+ config='configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py',
347
+ checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
348
+ url='https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', # noqa
349
+ eval='bbox',
350
+ metric=dict(bbox_mAP=38.4),
351
+ )
352
+ point_rend = dict(
353
+ config='configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py',
354
+ checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
355
+ url='https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', # noqa
356
+ eval=['bbox', 'segm'],
357
+ metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
358
+ )
359
+ pvt = dict(
360
+ config='configs/pvt/retinanet_pvt-s_fpn_1x_coco.py',
361
+ checkpoint='retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth',
362
+ url='https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth', # noqa
363
+ eval='bbox',
364
+ metric=dict(bbox_mAP=40.4),
365
+ )
366
+ queryinst = dict(
367
+ config='configs/queryinst/queryinst_r50_fpn_1x_coco.py',
368
+ checkpoint='queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth',
369
+ url='https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth', # noqa
370
+ eval=['bbox', 'segm'],
371
+ metric=dict(bbox_mAP=42.0, segm_mAP=37.5),
372
+ )
373
+ regnet = dict(
374
+ config='configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py',
375
+ checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
376
+ url='https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
377
+ eval=['bbox', 'segm'],
378
+ metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
379
+ )
380
+ reppoints = dict(
381
+ config='configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py',
382
+ checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
383
+ url='https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', # noqa
384
+ eval='bbox',
385
+ metric=dict(bbox_mAP=37.0),
386
+ )
387
+ res2net = dict(
388
+ config='configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py',
389
+ checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
390
+ url='https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', # noqa
391
+ eval='bbox',
392
+ metric=dict(bbox_mAP=43.0),
393
+ )
394
+ resnest = dict(
395
+ config='configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py', # noqa
396
+ checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
397
+ url='https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
398
+ eval='bbox',
399
+ metric=dict(bbox_mAP=42.0),
400
+ )
401
+ resnet_strikes_back = dict(
402
+ config='configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py', # noqa
403
+ checkpoint='mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
404
+ url='https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
405
+ eval=['bbox', 'segm'],
406
+ metric=dict(bbox_mAP=41.2, segm_mAP=38.2),
407
+ )
408
+ retinanet = dict(
409
+ config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
410
+ checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
411
+ url='https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', # noqa
412
+ eval='bbox',
413
+ metric=dict(bbox_mAP=36.5),
414
+ )
415
+ rpn = dict(
416
+ config='configs/rpn/rpn_r50_fpn_1x_coco.py',
417
+ checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
418
+ url='https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', # noqa
419
+ eval='proposal_fast',
420
+ metric=dict(AR_1000=58.2),
421
+ )
422
+ sabl = [
423
+ dict(
424
+ config='configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py',
425
+ checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
426
+ url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', # noqa
427
+ eval='bbox',
428
+ metric=dict(bbox_mAP=37.7),
429
+ ),
430
+ dict(
431
+ config='configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py',
432
+ checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
433
+ url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', # noqa
434
+ eval='bbox',
435
+ metric=dict(bbox_mAP=39.9),
436
+ ),
437
+ ]
438
+ scnet = dict(
439
+ config='configs/scnet/scnet_r50_fpn_1x_coco.py',
440
+ checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
441
+ url='https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth', # noqa
442
+ eval='bbox',
443
+ metric=dict(bbox_mAP=43.5),
444
+ )
445
+ scratch = dict(
446
+ config='configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py',
447
+ checkpoint='scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
448
+ url='https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
449
+ eval=['bbox', 'segm'],
450
+ metric=dict(bbox_mAP=41.2, segm_mAP=37.4),
451
+ )
452
+ solo = dict(
453
+ config='configs/solo/decoupled-solo_r50_fpn_1x_coco.py',
454
+ checkpoint='decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth',
455
+ url='https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth', # noqa
456
+ eval='segm',
457
+ metric=dict(segm_mAP=33.9),
458
+ )
459
+ solov2 = dict(
460
+ config='configs/solov2/solov2_r50_fpn_1x_coco.py',
461
+ checkpoint='solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth',
462
+ url='https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth', # noqa
463
+ eval='segm',
464
+ metric=dict(segm_mAP=34.8),
465
+ )
466
+ sparse_rcnn = dict(
467
+ config='configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
468
+ checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
469
+ url='https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', # noqa
470
+ eval='bbox',
471
+ metric=dict(bbox_mAP=37.9),
472
+ )
473
+ ssd = [
474
+ dict(
475
+ config='configs/ssd/ssd300_coco.py',
476
+ checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
477
+ url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth', # noqa
478
+ eval='bbox',
479
+ metric=dict(bbox_mAP=25.5),
480
+ ),
481
+ dict(
482
+ config='configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',
483
+ checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
484
+ url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
485
+ eval='bbox',
486
+ metric=dict(bbox_mAP=21.3),
487
+ ),
488
+ ]
489
+ swin = dict(
490
+ config='configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py',
491
+ checkpoint='mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
492
+ url='https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
493
+ eval=['bbox', 'segm'],
494
+ metric=dict(bbox_mAP=42.7, segm_mAP=39.3),
495
+ )
496
+ tood = dict(
497
+ config='configs/tood/tood_r50_fpn_1x_coco.py',
498
+ checkpoint='tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth',
499
+ url='https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth', # noqa
500
+ eval='bbox',
501
+ metric=dict(bbox_mAP=42.4),
502
+ )
503
+ tridentnet = dict(
504
+ config='configs/tridentnet/tridentnet_r50-caffe_1x_coco.py',
505
+ checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
506
+ url='https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', # noqa
507
+ eval='bbox',
508
+ metric=dict(bbox_mAP=37.6),
509
+ )
510
+ vfnet = dict(
511
+ config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
512
+ checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
513
+ url='https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', # noqa
514
+ eval='bbox',
515
+ metric=dict(bbox_mAP=41.6),
516
+ )
517
+ yolact = dict(
518
+ config='configs/yolact/yolact_r50_1xb8-55e_coco.py',
519
+ checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
520
+ url='https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth', # noqa
521
+ eval=['bbox', 'segm'],
522
+ metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
523
+ )
524
+ yolo = dict(
525
+ config='configs/yolo/yolov3_d53_8xb8-320-273e_coco.py',
526
+ checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
527
+ url='https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth', # noqa
528
+ eval='bbox',
529
+ metric=dict(bbox_mAP=27.9),
530
+ )
531
+ yolof = dict(
532
+ config='configs/yolof/yolof_r50-c5_8xb8-1x_coco.py',
533
+ checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
534
+ url='https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', # noqa
535
+ eval='bbox',
536
+ metric=dict(bbox_mAP=37.5),
537
+ )
538
+ yolox = dict(
539
+ config='configs/yolox/yolox_tiny_8xb8-300e_coco.py',
540
+ checkpoint='yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
541
+ url='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
542
+ eval='bbox',
543
+ metric=dict(bbox_mAP=31.8),
544
+ )
545
+ # yapf: enable
mmdetection/.dev_scripts/batch_train_list.txt ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py
2
+ configs/atss/atss_r50_fpn_1x_coco.py
3
+ configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py
4
+ configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py
5
+ configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py
6
+ configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
7
+ configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py
8
+ configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py
9
+ configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
10
+ configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py
11
+ configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py
12
+ configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py
13
+ configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
14
+ configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py
15
+ configs/ddod/ddod_r50_fpn_1x_coco.py
16
+ configs/detectors/detectors_htc-r50_1x_coco.py
17
+ configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
18
+ configs/detr/detr_r50_8xb2-150e_coco.py
19
+ configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py
20
+ configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py
21
+ configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py
22
+ configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py
23
+ configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py
24
+ configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
25
+ configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py
26
+ configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
27
+ configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py
28
+ configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py
29
+ configs/free_anchor/freeanchor_r50_fpn_1x_coco.py
30
+ configs/fsaf/fsaf_r50_fpn_1x_coco.py
31
+ configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py
32
+ configs/gfl/gfl_r50_fpn_1x_coco.py
33
+ configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py
34
+ configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py
35
+ configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py
36
+ configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py
37
+ configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py
38
+ configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py
39
+ configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py
40
+ configs/htc/htc_r50_fpn_1x_coco.py
41
+ configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py
42
+ configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py
43
+ configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py
44
+ configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py
45
+ configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
46
+ configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
47
+ configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py
48
+ configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py
49
+ configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py
50
+ configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py
51
+ configs/paa/paa_r50_fpn_1x_coco.py
52
+ configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py
53
+ configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
54
+ configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py
55
+ configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py
56
+ configs/pvt/retinanet_pvt-t_fpn_1x_coco.py
57
+ configs/queryinst/queryinst_r50_fpn_1x_coco.py
58
+ configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py
59
+ configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py
60
+ configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py
61
+ configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py
62
+ configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py
63
+ configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py
64
+ configs/rpn/rpn_r50_fpn_1x_coco.py
65
+ configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py
66
+ configs/scnet/scnet_r50_fpn_1x_coco.py
67
+ configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
68
+ configs/solo/solo_r50_fpn_1x_coco.py
69
+ configs/solov2/solov2_r50_fpn_1x_coco.py
70
+ configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py
71
+ configs/ssd/ssd300_coco.py
72
+ configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py
73
+ configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
74
+ configs/tood/tood_r50_fpn_1x_coco.py
75
+ 'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py
76
+ configs/vfnet/vfnet_r50_fpn_1x_coco.py
77
+ configs/yolact/yolact_r50_8xb8-55e_coco.py
78
+ configs/yolo/yolov3_d53_8xb8-320-273e_coco.py
79
+ configs/yolof/yolof_r50-c5_8xb8-1x_coco.py
80
+ configs/yolox/yolox_tiny_8xb8-300e_coco.py
mmdetection/.dev_scripts/benchmark_filter.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import os
4
+ import os.path as osp
5
+
6
+
7
+ def parse_args():
8
+ parser = argparse.ArgumentParser(description='Filter configs to train')
9
+ parser.add_argument(
10
+ '--basic-arch',
11
+ action='store_true',
12
+ help='to train models in basic arch')
13
+ parser.add_argument(
14
+ '--datasets', action='store_true', help='to train models in dataset')
15
+ parser.add_argument(
16
+ '--data-pipeline',
17
+ action='store_true',
18
+ help='to train models related to data pipeline, e.g. augmentations')
19
+ parser.add_argument(
20
+ '--nn-module',
21
+ action='store_true',
22
+ help='to train models related to neural network modules')
23
+ parser.add_argument(
24
+ '--model-options',
25
+ nargs='+',
26
+ help='custom options to special model benchmark')
27
+ parser.add_argument(
28
+ '--out',
29
+ type=str,
30
+ default='batch_train_list.txt',
31
+ help='output path of gathered metrics to be stored')
32
+ args = parser.parse_args()
33
+ return args
34
+
35
+
36
+ basic_arch_root = [
37
+ 'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
38
+ 'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
39
+ 'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
40
+ 'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
41
+ 'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
42
+ 'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
43
+ 'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
44
+ ]
45
+
46
+ datasets_root = [
47
+ 'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
48
+ ]
49
+
50
+ data_pipeline_root = ['albu_example', 'instaboost']
51
+
52
+ nn_module_root = [
53
+ 'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
54
+ 'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
55
+ ]
56
+
57
+ benchmark_pool = [
58
+ 'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
59
+ 'configs/atss/atss_r50_fpn_1x_coco.py',
60
+ 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
61
+ 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
62
+ 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
63
+ 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
64
+ 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
65
+ 'configs/centripetalnet/'
66
+ 'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
67
+ 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
68
+ 'configs/cornernet/'
69
+ 'cornernet_hourglass104_mstest_8x6_210e_coco.py',
70
+ 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
71
+ 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
72
+ 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
73
+ 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
74
+ 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
75
+ 'configs/detectors/detectors_htc_r50_1x_coco.py',
76
+ 'configs/detr/detr_r50_8x2_150e_coco.py',
77
+ 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
78
+ 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
79
+ 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
80
+ 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
81
+ 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
82
+ 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
83
+ 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
84
+ 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
85
+ 'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
86
+ 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
87
+ 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
88
+ 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
89
+ 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
90
+ 'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
91
+ 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
92
+ 'configs/gfl/gfl_r50_fpn_1x_coco.py',
93
+ 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
94
+ 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
95
+ 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
96
+ 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
97
+ 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
98
+ 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
99
+ 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
100
+ 'configs/htc/htc_r50_fpn_1x_coco.py',
101
+ 'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
102
+ 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
103
+ 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
104
+ 'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
105
+ 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
106
+ 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
107
+ 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
108
+ 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
109
+ 'configs/paa/paa_r50_fpn_1x_coco.py',
110
+ 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
111
+ 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
112
+ 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
113
+ 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
114
+ 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
115
+ 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
116
+ 'configs/resnest/'
117
+ 'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
118
+ 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
119
+ 'configs/rpn/rpn_r50_fpn_1x_coco.py',
120
+ 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
121
+ 'configs/ssd/ssd300_coco.py',
122
+ 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
123
+ 'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
124
+ 'configs/yolact/yolact_r50_1x8_coco.py',
125
+ 'configs/yolo/yolov3_d53_320_273e_coco.py',
126
+ 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
127
+ 'configs/scnet/scnet_r50_fpn_1x_coco.py',
128
+ 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
129
+ ]
130
+
131
+
132
+ def main():
133
+ args = parse_args()
134
+
135
+ benchmark_type = []
136
+ if args.basic_arch:
137
+ benchmark_type += basic_arch_root
138
+ if args.datasets:
139
+ benchmark_type += datasets_root
140
+ if args.data_pipeline:
141
+ benchmark_type += data_pipeline_root
142
+ if args.nn_module:
143
+ benchmark_type += nn_module_root
144
+
145
+ special_model = args.model_options
146
+ if special_model is not None:
147
+ benchmark_type += special_model
148
+
149
+ config_dpath = 'configs/'
150
+ benchmark_configs = []
151
+ for cfg_root in benchmark_type:
152
+ cfg_dir = osp.join(config_dpath, cfg_root)
153
+ configs = os.scandir(cfg_dir)
154
+ for cfg in configs:
155
+ config_path = osp.join(cfg_dir, cfg.name)
156
+ if (config_path in benchmark_pool
157
+ and config_path not in benchmark_configs):
158
+ benchmark_configs.append(config_path)
159
+
160
+ print(f'Totally found {len(benchmark_configs)} configs to benchmark')
161
+ with open(args.out, 'w') as f:
162
+ for config in benchmark_configs:
163
+ f.write(config + '\n')
164
+
165
+
166
+ if __name__ == '__main__':
167
+ main()
mmdetection/.dev_scripts/benchmark_full_models.txt ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py
2
+ atss/atss_r50_fpn_1x_coco.py
3
+ autoassign/autoassign_r50-caffe_fpn_1x_coco.py
4
+ boxinst/boxinst_r50_fpn_ms-90k_coco.py
5
+ carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py
6
+ cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py
7
+ cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
8
+ cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py
9
+ centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
10
+ centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py
11
+ condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
12
+ conditional_detr/conditional-detr_r50_8xb2-50e_coco.py
13
+ convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py
14
+ cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py
15
+ dab_detr/dab-detr_r50_8xb2-50e_coco.py
16
+ dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
17
+ dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py
18
+ ddod/ddod_r50_fpn_1x_coco.py
19
+ deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
20
+ detectors/detectors_htc-r50_1x_coco.py
21
+ detr/detr_r50_8xb2-150e_coco.py
22
+ dino/dino-4scale_r50_8xb2-12e_coco.py
23
+ double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py
24
+ dyhead/atss_r50_fpn_dyhead_1x_coco.py
25
+ dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py
26
+ efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py
27
+ empirical_attention/faster-rcnn_r50-attn0010-dcn_fpn_1x_coco.py
28
+ faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
29
+ fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
30
+ foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py
31
+ fpg/retinanet_r50_fpg_crop640_50e_coco.py
32
+ free_anchor/freeanchor_r50_fpn_1x_coco.py
33
+ fsaf/fsaf_r50_fpn_1x_coco.py
34
+ gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py
35
+ gfl/gfl_r50_fpn_1x_coco.py
36
+ glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py
37
+ ghm/retinanet_r50_fpn_ghm-1x_coco.py
38
+ gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py
39
+ gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py
40
+ grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py
41
+ groie/faste-rcnn_r50_fpn_groie_1x_coco.py
42
+ guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py
43
+ hrnet/htc_hrnetv2p-w18_20e_coco.py
44
+ htc/htc_r50_fpn_1x_coco.py
45
+ instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py
46
+ lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py
47
+ ld/ld_r18-gflv1-r101_fpn_1x_coco.py
48
+ libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py
49
+ lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py
50
+ mask2former/mask2former_r50_8xb2-lsj-50e_coco.py
51
+ mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
52
+ mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
53
+ maskformer/maskformer_r50_ms-16xb1-75e_coco.py
54
+ ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py
55
+ nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py
56
+ nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py
57
+ paa/paa_r50_fpn_1x_coco.py
58
+ pafpn/faster-rcnn_r50_pafpn_1x_coco.py
59
+ panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
60
+ pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py
61
+ point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py
62
+ pvt/retinanet_pvtv2-b0_fpn_1x_coco.py
63
+ queryinst/queryinst_r50_fpn_1x_coco.py
64
+ regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py
65
+ reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py
66
+ res2net/faster-rcnn_res2net-101_fpn_2x_coco.py
67
+ resnest/mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py
68
+ resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py
69
+ retinanet/retinanet_r50_fpn_1x_coco.py
70
+ rpn/rpn_r50_fpn_1x_coco.py
71
+ rtmdet/rtmdet_s_8xb32-300e_coco.py
72
+ rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
73
+ sabl/sabl-retinanet_r50_fpn_1x_coco.py
74
+ scnet/scnet_r50_fpn_1x_coco.py
75
+ scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
76
+ seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py
77
+ simple_copy_paste/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-90k_coco.py
78
+ soft_teacher/soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py
79
+ solo/solo_r50_fpn_1x_coco.py
80
+ solov2/solov2_r50_fpn_1x_coco.py
81
+ sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py
82
+ ssd/ssd300_coco.py
83
+ swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
84
+ tood/tood_r50_fpn_1x_coco.py
85
+ tridentnet/tridentnet_r50-caffe_1x_coco.py
86
+ vfnet/vfnet_r50_fpn_1x_coco.py
87
+ yolact/yolact_r50_8xb8-55e_coco.py
88
+ yolo/yolov3_d53_8xb8-320-273e_coco.py
89
+ yolof/yolof_r50-c5_8xb8-1x_coco.py
90
+ yolox/yolox_s_8xb8-300e_coco.py
91
+ deepsort/deepsort_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
92
+ mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py
93
+ masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py
94
+ ocsort/ocsort_yolox_x_8xb4-amp-80e_crowdhuman-mot17halftrain_test-mot17halfval.py
95
+ qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
96
+ strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py
mmdetection/.dev_scripts/benchmark_inference_fps.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import os
4
+ import os.path as osp
5
+
6
+ from mmengine.config import Config, DictAction
7
+ from mmengine.dist import init_dist
8
+ from mmengine.fileio import dump
9
+ from mmengine.utils import mkdir_or_exist
10
+ from terminaltables import GithubFlavoredMarkdownTable
11
+
12
+ from tools.analysis_tools.benchmark import repeat_measure_inference_speed
13
+
14
+
15
+ def parse_args():
16
+ parser = argparse.ArgumentParser(
17
+ description='MMDet benchmark a model of FPS')
18
+ parser.add_argument('config', help='test config file path')
19
+ parser.add_argument('checkpoint_root', help='Checkpoint file root path')
20
+ parser.add_argument(
21
+ '--round-num',
22
+ type=int,
23
+ default=1,
24
+ help='round a number to a given precision in decimal digits')
25
+ parser.add_argument(
26
+ '--repeat-num',
27
+ type=int,
28
+ default=1,
29
+ help='number of repeat times of measurement for averaging the results')
30
+ parser.add_argument(
31
+ '--out', type=str, help='output path of gathered fps to be stored')
32
+ parser.add_argument(
33
+ '--max-iter', type=int, default=2000, help='num of max iter')
34
+ parser.add_argument(
35
+ '--log-interval', type=int, default=50, help='interval of logging')
36
+ parser.add_argument(
37
+ '--fuse-conv-bn',
38
+ action='store_true',
39
+ help='Whether to fuse conv and bn, this will slightly increase'
40
+ 'the inference speed')
41
+ parser.add_argument(
42
+ '--cfg-options',
43
+ nargs='+',
44
+ action=DictAction,
45
+ help='override some settings in the used config, the key-value pair '
46
+ 'in xxx=yyy format will be merged into config file. If the value to '
47
+ 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
48
+ 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
49
+ 'Note that the quotation marks are necessary and that no white space '
50
+ 'is allowed.')
51
+ parser.add_argument(
52
+ '--launcher',
53
+ choices=['none', 'pytorch', 'slurm', 'mpi'],
54
+ default='none',
55
+ help='job launcher')
56
+ parser.add_argument('--local_rank', type=int, default=0)
57
+ args = parser.parse_args()
58
+ if 'LOCAL_RANK' not in os.environ:
59
+ os.environ['LOCAL_RANK'] = str(args.local_rank)
60
+ return args
61
+
62
+
63
+ def results2markdown(result_dict):
64
+ table_data = []
65
+ is_multiple_results = False
66
+ for cfg_name, value in result_dict.items():
67
+ name = cfg_name.replace('configs/', '')
68
+ fps = value['fps']
69
+ ms_times_pre_image = value['ms_times_pre_image']
70
+ if isinstance(fps, list):
71
+ is_multiple_results = True
72
+ mean_fps = value['mean_fps']
73
+ mean_times_pre_image = value['mean_times_pre_image']
74
+ fps_str = ','.join([str(s) for s in fps])
75
+ ms_times_pre_image_str = ','.join(
76
+ [str(s) for s in ms_times_pre_image])
77
+ table_data.append([
78
+ name, fps_str, mean_fps, ms_times_pre_image_str,
79
+ mean_times_pre_image
80
+ ])
81
+ else:
82
+ table_data.append([name, fps, ms_times_pre_image])
83
+
84
+ if is_multiple_results:
85
+ table_data.insert(0, [
86
+ 'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
87
+ 'mean_times_pre_image(ms)'
88
+ ])
89
+
90
+ else:
91
+ table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
92
+ table = GithubFlavoredMarkdownTable(table_data)
93
+ print(table.table, flush=True)
94
+
95
+
96
+ if __name__ == '__main__':
97
+ args = parse_args()
98
+ assert args.round_num >= 0
99
+ assert args.repeat_num >= 1
100
+
101
+ config = Config.fromfile(args.config)
102
+
103
+ if args.launcher == 'none':
104
+ raise NotImplementedError('Only supports distributed mode')
105
+ else:
106
+ init_dist(args.launcher)
107
+
108
+ result_dict = {}
109
+ for model_key in config:
110
+ model_infos = config[model_key]
111
+ if not isinstance(model_infos, list):
112
+ model_infos = [model_infos]
113
+ for model_info in model_infos:
114
+ record_metrics = model_info['metric']
115
+ cfg_path = model_info['config'].strip()
116
+ cfg = Config.fromfile(cfg_path)
117
+ checkpoint = osp.join(args.checkpoint_root,
118
+ model_info['checkpoint'].strip())
119
+ try:
120
+ fps = repeat_measure_inference_speed(cfg, checkpoint,
121
+ args.max_iter,
122
+ args.log_interval,
123
+ args.fuse_conv_bn,
124
+ args.repeat_num)
125
+ if args.repeat_num > 1:
126
+ fps_list = [round(fps_, args.round_num) for fps_ in fps]
127
+ times_pre_image_list = [
128
+ round(1000 / fps_, args.round_num) for fps_ in fps
129
+ ]
130
+ mean_fps = round(
131
+ sum(fps_list) / len(fps_list), args.round_num)
132
+ mean_times_pre_image = round(
133
+ sum(times_pre_image_list) / len(times_pre_image_list),
134
+ args.round_num)
135
+ print(
136
+ f'{cfg_path} '
137
+ f'Overall fps: {fps_list}[{mean_fps}] img / s, '
138
+ f'times per image: '
139
+ f'{times_pre_image_list}[{mean_times_pre_image}] '
140
+ f'ms / img',
141
+ flush=True)
142
+ result_dict[cfg_path] = dict(
143
+ fps=fps_list,
144
+ mean_fps=mean_fps,
145
+ ms_times_pre_image=times_pre_image_list,
146
+ mean_times_pre_image=mean_times_pre_image)
147
+ else:
148
+ print(
149
+ f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
150
+ f'times per image: {1000 / fps:.{args.round_num}f} '
151
+ f'ms / img',
152
+ flush=True)
153
+ result_dict[cfg_path] = dict(
154
+ fps=round(fps, args.round_num),
155
+ ms_times_pre_image=round(1000 / fps, args.round_num))
156
+ except Exception as e:
157
+ print(f'{cfg_path} error: {repr(e)}')
158
+ if args.repeat_num > 1:
159
+ result_dict[cfg_path] = dict(
160
+ fps=[0],
161
+ mean_fps=0,
162
+ ms_times_pre_image=[0],
163
+ mean_times_pre_image=0)
164
+ else:
165
+ result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
166
+
167
+ if args.out:
168
+ mkdir_or_exist(args.out)
169
+ dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
170
+
171
+ results2markdown(result_dict)
mmdetection/.dev_scripts/benchmark_options.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+
3
+ third_part_libs = [
4
+ 'pip install -r ../requirements/albu.txt',
5
+ 'pip install instaboostfast',
6
+ 'pip install git+https://github.com/cocodataset/panopticapi.git',
7
+ 'pip install timm',
8
+ 'pip install mmpretrain',
9
+ 'pip install git+https://github.com/lvis-dataset/lvis-api.git',
10
+ 'pip install -r ../requirements/multimodal.txt',
11
+ 'pip install -r ../requirements/tracking.txt',
12
+ 'pip install git+https://github.com/JonathonLuiten/TrackEval.git',
13
+ ]
14
+
15
+ default_floating_range = 0.5
16
+ model_floating_ranges = {'atss/atss_r50_fpn_1x_coco.py': 0.3}
mmdetection/.dev_scripts/benchmark_test.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+ import os
4
+ import os.path as osp
5
+ from argparse import ArgumentParser
6
+
7
+ from mmengine.config import Config, DictAction
8
+ from mmengine.logging import MMLogger
9
+ from mmengine.registry import RUNNERS
10
+ from mmengine.runner import Runner
11
+
12
+ from mmdet.testing import replace_to_ceph
13
+ from mmdet.utils import register_all_modules, replace_cfg_vals
14
+
15
+
16
+ def parse_args():
17
+ parser = ArgumentParser()
18
+ parser.add_argument('config', help='test config file path')
19
+ parser.add_argument('checkpoint_root', help='Checkpoint file root path')
20
+ parser.add_argument('--work-dir', help='the dir to save logs')
21
+ parser.add_argument('--ceph', action='store_true')
22
+ parser.add_argument(
23
+ '--cfg-options',
24
+ nargs='+',
25
+ action=DictAction,
26
+ help='override some settings in the used config, the key-value pair '
27
+ 'in xxx=yyy format will be merged into config file. If the value to '
28
+ 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
29
+ 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
30
+ 'Note that the quotation marks are necessary and that no white space '
31
+ 'is allowed.')
32
+ parser.add_argument(
33
+ '--launcher',
34
+ choices=['none', 'pytorch', 'slurm', 'mpi'],
35
+ default='none',
36
+ help='job launcher')
37
+ parser.add_argument('--local_rank', type=int, default=0)
38
+ args = parser.parse_args()
39
+ if 'LOCAL_RANK' not in os.environ:
40
+ os.environ['LOCAL_RANK'] = str(args.local_rank)
41
+ args = parser.parse_args()
42
+ return args
43
+
44
+
45
+ # TODO: Need to refactor test.py so that it can be reused.
46
+ def fast_test_model(config_name, checkpoint, args, logger=None):
47
+ cfg = Config.fromfile(config_name)
48
+ cfg = replace_cfg_vals(cfg)
49
+ cfg.launcher = args.launcher
50
+ if args.cfg_options is not None:
51
+ cfg.merge_from_dict(args.cfg_options)
52
+
53
+ # work_dir is determined in this priority: CLI > segment in file > filename
54
+ if args.work_dir is not None:
55
+ # update configs according to CLI args if args.work_dir is not None
56
+ cfg.work_dir = osp.join(args.work_dir,
57
+ osp.splitext(osp.basename(config_name))[0])
58
+ elif cfg.get('work_dir', None) is None:
59
+ # use config filename as default work_dir if cfg.work_dir is None
60
+ cfg.work_dir = osp.join('./work_dirs',
61
+ osp.splitext(osp.basename(config_name))[0])
62
+
63
+ if args.ceph:
64
+ replace_to_ceph(cfg)
65
+
66
+ cfg.load_from = checkpoint
67
+
68
+ # TODO: temporary plan
69
+ if 'visualizer' in cfg:
70
+ if 'name' in cfg.visualizer:
71
+ del cfg.visualizer.name
72
+
73
+ # build the runner from config
74
+ if 'runner_type' not in cfg:
75
+ # build the default runner
76
+ runner = Runner.from_cfg(cfg)
77
+ else:
78
+ # build customized runner from the registry
79
+ # if 'runner_type' is set in the cfg
80
+ runner = RUNNERS.build(cfg)
81
+
82
+ runner.test()
83
+
84
+
85
+ # Sample test whether the inference code is correct
86
+ def main(args):
87
+ # register all modules in mmdet into the registries
88
+ register_all_modules(init_default_scope=False)
89
+
90
+ config = Config.fromfile(args.config)
91
+
92
+ # test all model
93
+ logger = MMLogger.get_instance(
94
+ name='MMLogger',
95
+ log_file='benchmark_test.log',
96
+ log_level=logging.ERROR)
97
+
98
+ for model_key in config:
99
+ model_infos = config[model_key]
100
+ if not isinstance(model_infos, list):
101
+ model_infos = [model_infos]
102
+ for model_info in model_infos:
103
+ print('processing: ', model_info['config'], flush=True)
104
+ config_name = model_info['config'].strip()
105
+ checkpoint = osp.join(args.checkpoint_root,
106
+ model_info['checkpoint'].strip())
107
+ try:
108
+ fast_test_model(config_name, checkpoint, args, logger)
109
+ except Exception as e:
110
+ logger.error(f'{config_name} " : {repr(e)}')
111
+
112
+
113
+ if __name__ == '__main__':
114
+ args = parse_args()
115
+ main(args)
mmdetection/.dev_scripts/benchmark_test_image.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+ import os.path as osp
4
+ from argparse import ArgumentParser
5
+
6
+ import mmcv
7
+ from mmengine.config import Config
8
+ from mmengine.logging import MMLogger
9
+ from mmengine.utils import mkdir_or_exist
10
+
11
+ from mmdet.apis import inference_detector, init_detector
12
+ from mmdet.registry import VISUALIZERS
13
+ from mmdet.utils import register_all_modules
14
+
15
+
16
+ def parse_args():
17
+ parser = ArgumentParser()
18
+ parser.add_argument('config', help='test config file path')
19
+ parser.add_argument('checkpoint_root', help='Checkpoint file root path')
20
+ parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
21
+ parser.add_argument('--aug', action='store_true', help='aug test')
22
+ parser.add_argument('--model-name', help='model name to inference')
23
+ parser.add_argument('--show', action='store_true', help='show results')
24
+ parser.add_argument('--out-dir', default=None, help='Dir to output file')
25
+ parser.add_argument(
26
+ '--wait-time',
27
+ type=float,
28
+ default=1,
29
+ help='the interval of show (s), 0 is block')
30
+ parser.add_argument(
31
+ '--device', default='cuda:0', help='Device used for inference')
32
+ parser.add_argument(
33
+ '--palette',
34
+ default='coco',
35
+ choices=['coco', 'voc', 'citys', 'random'],
36
+ help='Color palette used for visualization')
37
+ parser.add_argument(
38
+ '--score-thr', type=float, default=0.3, help='bbox score threshold')
39
+ args = parser.parse_args()
40
+ return args
41
+
42
+
43
+ def inference_model(config_name, checkpoint, visualizer, args, logger=None):
44
+ cfg = Config.fromfile(config_name)
45
+ if args.aug:
46
+ raise NotImplementedError()
47
+
48
+ model = init_detector(
49
+ cfg, checkpoint, palette=args.palette, device=args.device)
50
+ visualizer.dataset_meta = model.dataset_meta
51
+
52
+ # test a single image
53
+ result = inference_detector(model, args.img)
54
+
55
+ # show the results
56
+ if args.show or args.out_dir is not None:
57
+ img = mmcv.imread(args.img)
58
+ img = mmcv.imconvert(img, 'bgr', 'rgb')
59
+ out_file = None
60
+ if args.out_dir is not None:
61
+ out_dir = args.out_dir
62
+ mkdir_or_exist(out_dir)
63
+
64
+ out_file = osp.join(
65
+ out_dir,
66
+ config_name.split('/')[-1].replace('py', 'jpg'))
67
+
68
+ visualizer.add_datasample(
69
+ 'result',
70
+ img,
71
+ data_sample=result,
72
+ draw_gt=False,
73
+ show=args.show,
74
+ wait_time=args.wait_time,
75
+ out_file=out_file,
76
+ pred_score_thr=args.score_thr)
77
+
78
+ return result
79
+
80
+
81
+ # Sample test whether the inference code is correct
82
+ def main(args):
83
+ # register all modules in mmdet into the registries
84
+ register_all_modules()
85
+
86
+ config = Config.fromfile(args.config)
87
+
88
+ # init visualizer
89
+ visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
90
+ visualizer = VISUALIZERS.build(visualizer_cfg)
91
+
92
+ # test single model
93
+ if args.model_name:
94
+ if args.model_name in config:
95
+ model_infos = config[args.model_name]
96
+ if not isinstance(model_infos, list):
97
+ model_infos = [model_infos]
98
+ model_info = model_infos[0]
99
+ config_name = model_info['config'].strip()
100
+ print(f'processing: {config_name}', flush=True)
101
+ checkpoint = osp.join(args.checkpoint_root,
102
+ model_info['checkpoint'].strip())
103
+ # build the model from a config file and a checkpoint file
104
+ inference_model(config_name, checkpoint, visualizer, args)
105
+ return
106
+ else:
107
+ raise RuntimeError('model name input error.')
108
+
109
+ # test all model
110
+ logger = MMLogger.get_instance(
111
+ name='MMLogger',
112
+ log_file='benchmark_test_image.log',
113
+ log_level=logging.ERROR)
114
+
115
+ for model_key in config:
116
+ model_infos = config[model_key]
117
+ if not isinstance(model_infos, list):
118
+ model_infos = [model_infos]
119
+ for model_info in model_infos:
120
+ print('processing: ', model_info['config'], flush=True)
121
+ config_name = model_info['config'].strip()
122
+ checkpoint = osp.join(args.checkpoint_root,
123
+ model_info['checkpoint'].strip())
124
+ try:
125
+ # build the model from a config file and a checkpoint file
126
+ inference_model(config_name, checkpoint, visualizer, args,
127
+ logger)
128
+ except Exception as e:
129
+ logger.error(f'{config_name} " : {repr(e)}')
130
+
131
+
132
+ if __name__ == '__main__':
133
+ args = parse_args()
134
+ main(args)
mmdetection/.dev_scripts/benchmark_train.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import logging
3
+ import os
4
+ import os.path as osp
5
+ from argparse import ArgumentParser
6
+
7
+ from mmengine.config import Config, DictAction
8
+ from mmengine.logging import MMLogger, print_log
9
+ from mmengine.registry import RUNNERS
10
+ from mmengine.runner import Runner
11
+
12
+ from mmdet.testing import replace_to_ceph
13
+ from mmdet.utils import register_all_modules, replace_cfg_vals
14
+
15
+
16
+ def parse_args():
17
+ parser = ArgumentParser()
18
+ parser.add_argument('config', help='test config file path')
19
+ parser.add_argument('--work-dir', help='the dir to save logs and models')
20
+ parser.add_argument('--ceph', action='store_true')
21
+ parser.add_argument('--save-ckpt', action='store_true')
22
+ parser.add_argument(
23
+ '--amp',
24
+ action='store_true',
25
+ default=False,
26
+ help='enable automatic-mixed-precision training')
27
+ parser.add_argument(
28
+ '--auto-scale-lr',
29
+ action='store_true',
30
+ help='enable automatically scaling LR.')
31
+ parser.add_argument(
32
+ '--resume',
33
+ action='store_true',
34
+ help='resume from the latest checkpoint in the work_dir automatically')
35
+ parser.add_argument(
36
+ '--cfg-options',
37
+ nargs='+',
38
+ action=DictAction,
39
+ help='override some settings in the used config, the key-value pair '
40
+ 'in xxx=yyy format will be merged into config file. If the value to '
41
+ 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
42
+ 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
43
+ 'Note that the quotation marks are necessary and that no white space '
44
+ 'is allowed.')
45
+ parser.add_argument(
46
+ '--launcher',
47
+ choices=['none', 'pytorch', 'slurm', 'mpi'],
48
+ default='none',
49
+ help='job launcher')
50
+ parser.add_argument('--local_rank', type=int, default=0)
51
+ args = parser.parse_args()
52
+ if 'LOCAL_RANK' not in os.environ:
53
+ os.environ['LOCAL_RANK'] = str(args.local_rank)
54
+ args = parser.parse_args()
55
+ return args
56
+
57
+
58
+ # TODO: Need to refactor train.py so that it can be reused.
59
+ def fast_train_model(config_name, args, logger=None):
60
+ cfg = Config.fromfile(config_name)
61
+ cfg = replace_cfg_vals(cfg)
62
+ cfg.launcher = args.launcher
63
+ if args.cfg_options is not None:
64
+ cfg.merge_from_dict(args.cfg_options)
65
+
66
+ # work_dir is determined in this priority: CLI > segment in file > filename
67
+ if args.work_dir is not None:
68
+ # update configs according to CLI args if args.work_dir is not None
69
+ cfg.work_dir = osp.join(args.work_dir,
70
+ osp.splitext(osp.basename(config_name))[0])
71
+ elif cfg.get('work_dir', None) is None:
72
+ # use config filename as default work_dir if cfg.work_dir is None
73
+ cfg.work_dir = osp.join('./work_dirs',
74
+ osp.splitext(osp.basename(config_name))[0])
75
+
76
+ ckpt_hook = cfg.default_hooks.checkpoint
77
+ by_epoch = ckpt_hook.get('by_epoch', True)
78
+ fast_stop_hook = dict(type='FastStopTrainingHook')
79
+ fast_stop_hook['by_epoch'] = by_epoch
80
+ if args.save_ckpt:
81
+ if by_epoch:
82
+ interval = 1
83
+ stop_iter_or_epoch = 2
84
+ else:
85
+ interval = 4
86
+ stop_iter_or_epoch = 10
87
+ fast_stop_hook['stop_iter_or_epoch'] = stop_iter_or_epoch
88
+ fast_stop_hook['save_ckpt'] = True
89
+ ckpt_hook.interval = interval
90
+
91
+ if 'custom_hooks' in cfg:
92
+ cfg.custom_hooks.append(fast_stop_hook)
93
+ else:
94
+ custom_hooks = [fast_stop_hook]
95
+ cfg.custom_hooks = custom_hooks
96
+
97
+ # TODO: temporary plan
98
+ if 'visualizer' in cfg:
99
+ if 'name' in cfg.visualizer:
100
+ del cfg.visualizer.name
101
+
102
+ # enable automatic-mixed-precision training
103
+ if args.amp is True:
104
+ optim_wrapper = cfg.optim_wrapper.type
105
+ if optim_wrapper == 'AmpOptimWrapper':
106
+ print_log(
107
+ 'AMP training is already enabled in your config.',
108
+ logger='current',
109
+ level=logging.WARNING)
110
+ else:
111
+ assert optim_wrapper == 'OptimWrapper', (
112
+ '`--amp` is only supported when the optimizer wrapper type is '
113
+ f'`OptimWrapper` but got {optim_wrapper}.')
114
+ cfg.optim_wrapper.type = 'AmpOptimWrapper'
115
+ cfg.optim_wrapper.loss_scale = 'dynamic'
116
+
117
+ # enable automatically scaling LR
118
+ if args.auto_scale_lr:
119
+ if 'auto_scale_lr' in cfg and \
120
+ 'enable' in cfg.auto_scale_lr and \
121
+ 'base_batch_size' in cfg.auto_scale_lr:
122
+ cfg.auto_scale_lr.enable = True
123
+ else:
124
+ raise RuntimeError('Can not find "auto_scale_lr" or '
125
+ '"auto_scale_lr.enable" or '
126
+ '"auto_scale_lr.base_batch_size" in your'
127
+ ' configuration file.')
128
+
129
+ if args.ceph:
130
+ replace_to_ceph(cfg)
131
+
132
+ cfg.resume = args.resume
133
+
134
+ # build the runner from config
135
+ if 'runner_type' not in cfg:
136
+ # build the default runner
137
+ runner = Runner.from_cfg(cfg)
138
+ else:
139
+ # build customized runner from the registry
140
+ # if 'runner_type' is set in the cfg
141
+ runner = RUNNERS.build(cfg)
142
+
143
+ runner.train()
144
+
145
+
146
+ # Sample test whether the train code is correct
147
+ def main(args):
148
+ # register all modules in mmdet into the registries
149
+ register_all_modules(init_default_scope=False)
150
+
151
+ config = Config.fromfile(args.config)
152
+
153
+ # test all model
154
+ logger = MMLogger.get_instance(
155
+ name='MMLogger',
156
+ log_file='benchmark_train.log',
157
+ log_level=logging.ERROR)
158
+
159
+ for model_key in config:
160
+ model_infos = config[model_key]
161
+ if not isinstance(model_infos, list):
162
+ model_infos = [model_infos]
163
+ for model_info in model_infos:
164
+ print('processing: ', model_info['config'], flush=True)
165
+ config_name = model_info['config'].strip()
166
+ try:
167
+ fast_train_model(config_name, args, logger)
168
+ except RuntimeError as e:
169
+ # quick exit is the normal exit message
170
+ if 'quick exit' not in repr(e):
171
+ logger.error(f'{config_name} " : {repr(e)}')
172
+ except Exception as e:
173
+ logger.error(f'{config_name} " : {repr(e)}')
174
+
175
+
176
+ if __name__ == '__main__':
177
+ args = parse_args()
178
+ main(args)
mmdetection/.dev_scripts/benchmark_train_models.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ atss/atss_r50_fpn_1x_coco.py
2
+ faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
3
+ mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py
4
+ cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py
5
+ panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py
6
+ retinanet/retinanet_r50_fpn_1x_coco.py
7
+ rtmdet/rtmdet_s_8xb32-300e_coco.py
8
+ rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
9
+ deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
10
+ fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
11
+ centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py
12
+ dino/dino-4scale_r50_8xb2-12e_coco.py
13
+ htc/htc_r50_fpn_1x_coco.py
14
+ mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
15
+ swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py
16
+ condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
17
+ lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py
18
+ mask2former_vis/mask2former_r50_8xb2-8e_youtubevis2021.py
19
+ masktrack_rcnn/masktrack-rcnn_mask-rcnn_r50_fpn_8xb1-12e_youtubevis2021.py
20
+ qdtrack/qdtrack_faster-rcnn_r50_fpn_8xb2-4e_mot17halftrain_test-mot17halfval.py
mmdetection/.dev_scripts/benchmark_valid_flops.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import re
3
+ import tempfile
4
+ from argparse import ArgumentParser
5
+ from collections import OrderedDict
6
+ from functools import partial
7
+ from pathlib import Path
8
+
9
+ import numpy as np
10
+ import pandas as pd
11
+ import torch
12
+ from mmengine import Config, DictAction
13
+ from mmengine.analysis import get_model_complexity_info
14
+ from mmengine.analysis.print_helper import _format_size
15
+ from mmengine.fileio import FileClient
16
+ from mmengine.logging import MMLogger
17
+ from mmengine.model import revert_sync_batchnorm
18
+ from mmengine.runner import Runner
19
+ from modelindex.load_model_index import load
20
+ from rich.console import Console
21
+ from rich.table import Table
22
+ from rich.text import Text
23
+ from tqdm import tqdm
24
+
25
+ from mmdet.registry import MODELS
26
+ from mmdet.utils import register_all_modules
27
+
28
+ console = Console()
29
+ MMDET_ROOT = Path(__file__).absolute().parents[1]
30
+
31
+
32
+ def parse_args():
33
+ parser = ArgumentParser(description='Valid all models in model-index.yml')
34
+ parser.add_argument(
35
+ '--shape',
36
+ type=int,
37
+ nargs='+',
38
+ default=[1280, 800],
39
+ help='input image size')
40
+ parser.add_argument(
41
+ '--checkpoint_root',
42
+ help='Checkpoint file root path. If set, load checkpoint before test.')
43
+ parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
44
+ parser.add_argument('--models', nargs='+', help='models name to inference')
45
+ parser.add_argument(
46
+ '--batch-size',
47
+ type=int,
48
+ default=1,
49
+ help='The batch size during the inference.')
50
+ parser.add_argument(
51
+ '--flops', action='store_true', help='Get Flops and Params of models')
52
+ parser.add_argument(
53
+ '--flops-str',
54
+ action='store_true',
55
+ help='Output FLOPs and params counts in a string form.')
56
+ parser.add_argument(
57
+ '--cfg-options',
58
+ nargs='+',
59
+ action=DictAction,
60
+ help='override some settings in the used config, the key-value pair '
61
+ 'in xxx=yyy format will be merged into config file. If the value to '
62
+ 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
63
+ 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
64
+ 'Note that the quotation marks are necessary and that no white space '
65
+ 'is allowed.')
66
+ parser.add_argument(
67
+ '--size_divisor',
68
+ type=int,
69
+ default=32,
70
+ help='Pad the input image, the minimum size that is divisible '
71
+ 'by size_divisor, -1 means do not pad the image.')
72
+ args = parser.parse_args()
73
+ return args
74
+
75
+
76
+ def inference(config_file, checkpoint, work_dir, args, exp_name):
77
+ logger = MMLogger.get_instance(name='MMLogger')
78
+ logger.warning('if you want test flops, please make sure torch>=1.12')
79
+ cfg = Config.fromfile(config_file)
80
+ cfg.work_dir = work_dir
81
+ cfg.load_from = checkpoint
82
+ cfg.log_level = 'WARN'
83
+ cfg.experiment_name = exp_name
84
+ if args.cfg_options is not None:
85
+ cfg.merge_from_dict(args.cfg_options)
86
+
87
+ # forward the model
88
+ result = {'model': config_file.stem}
89
+
90
+ if args.flops:
91
+
92
+ if len(args.shape) == 1:
93
+ h = w = args.shape[0]
94
+ elif len(args.shape) == 2:
95
+ h, w = args.shape
96
+ else:
97
+ raise ValueError('invalid input shape')
98
+ divisor = args.size_divisor
99
+ if divisor > 0:
100
+ h = int(np.ceil(h / divisor)) * divisor
101
+ w = int(np.ceil(w / divisor)) * divisor
102
+
103
+ input_shape = (3, h, w)
104
+ result['resolution'] = input_shape
105
+
106
+ try:
107
+ cfg = Config.fromfile(config_file)
108
+ if hasattr(cfg, 'head_norm_cfg'):
109
+ cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
110
+ cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
111
+ type='SyncBN', requires_grad=True)
112
+ cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
113
+ type='SyncBN', requires_grad=True)
114
+
115
+ if args.cfg_options is not None:
116
+ cfg.merge_from_dict(args.cfg_options)
117
+
118
+ model = MODELS.build(cfg.model)
119
+ input = torch.rand(1, *input_shape)
120
+ if torch.cuda.is_available():
121
+ model.cuda()
122
+ input = input.cuda()
123
+ model = revert_sync_batchnorm(model)
124
+ inputs = (input, )
125
+ model.eval()
126
+ outputs = get_model_complexity_info(
127
+ model, input_shape, inputs, show_table=False, show_arch=False)
128
+ flops = outputs['flops']
129
+ params = outputs['params']
130
+ activations = outputs['activations']
131
+ result['Get Types'] = 'direct'
132
+ except: # noqa 772
133
+ logger = MMLogger.get_instance(name='MMLogger')
134
+ logger.warning(
135
+ 'Direct get flops failed, try to get flops with data')
136
+ cfg = Config.fromfile(config_file)
137
+ if hasattr(cfg, 'head_norm_cfg'):
138
+ cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
139
+ cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
140
+ type='SyncBN', requires_grad=True)
141
+ cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
142
+ type='SyncBN', requires_grad=True)
143
+ data_loader = Runner.build_dataloader(cfg.val_dataloader)
144
+ data_batch = next(iter(data_loader))
145
+ model = MODELS.build(cfg.model)
146
+ if torch.cuda.is_available():
147
+ model = model.cuda()
148
+ model = revert_sync_batchnorm(model)
149
+ model.eval()
150
+ _forward = model.forward
151
+ data = model.data_preprocessor(data_batch)
152
+ del data_loader
153
+ model.forward = partial(
154
+ _forward, data_samples=data['data_samples'])
155
+ outputs = get_model_complexity_info(
156
+ model,
157
+ input_shape,
158
+ data['inputs'],
159
+ show_table=False,
160
+ show_arch=False)
161
+ flops = outputs['flops']
162
+ params = outputs['params']
163
+ activations = outputs['activations']
164
+ result['Get Types'] = 'dataloader'
165
+
166
+ if args.flops_str:
167
+ flops = _format_size(flops)
168
+ params = _format_size(params)
169
+ activations = _format_size(activations)
170
+
171
+ result['flops'] = flops
172
+ result['params'] = params
173
+
174
+ return result
175
+
176
+
177
+ def show_summary(summary_data, args):
178
+ table = Table(title='Validation Benchmark Regression Summary')
179
+ table.add_column('Model')
180
+ table.add_column('Validation')
181
+ table.add_column('Resolution (c, h, w)')
182
+ if args.flops:
183
+ table.add_column('Flops', justify='right', width=11)
184
+ table.add_column('Params', justify='right')
185
+
186
+ for model_name, summary in summary_data.items():
187
+ row = [model_name]
188
+ valid = summary['valid']
189
+ color = 'green' if valid == 'PASS' else 'red'
190
+ row.append(f'[{color}]{valid}[/{color}]')
191
+ if valid == 'PASS':
192
+ row.append(str(summary['resolution']))
193
+ if args.flops:
194
+ row.append(str(summary['flops']))
195
+ row.append(str(summary['params']))
196
+ table.add_row(*row)
197
+
198
+ console.print(table)
199
+ table_data = {
200
+ x.header: [Text.from_markup(y).plain for y in x.cells]
201
+ for x in table.columns
202
+ }
203
+ table_pd = pd.DataFrame(table_data)
204
+ table_pd.to_csv('./mmdetection_flops.csv')
205
+
206
+
207
+ # Sample test whether the inference code is correct
208
+ def main(args):
209
+ register_all_modules()
210
+ model_index_file = MMDET_ROOT / 'model-index.yml'
211
+ model_index = load(str(model_index_file))
212
+ model_index.build_models_with_collections()
213
+ models = OrderedDict({model.name: model for model in model_index.models})
214
+
215
+ logger = MMLogger(
216
+ 'validation',
217
+ logger_name='validation',
218
+ log_file='benchmark_test_image.log',
219
+ log_level=logging.INFO)
220
+
221
+ if args.models:
222
+ patterns = [
223
+ re.compile(pattern.replace('+', '_')) for pattern in args.models
224
+ ]
225
+ filter_models = {}
226
+ for k, v in models.items():
227
+ k = k.replace('+', '_')
228
+ if any([re.match(pattern, k) for pattern in patterns]):
229
+ filter_models[k] = v
230
+ if len(filter_models) == 0:
231
+ print('No model found, please specify models in:')
232
+ print('\n'.join(models.keys()))
233
+ return
234
+ models = filter_models
235
+
236
+ summary_data = {}
237
+ tmpdir = tempfile.TemporaryDirectory()
238
+ for model_name, model_info in tqdm(models.items()):
239
+
240
+ if model_info.config is None:
241
+ continue
242
+
243
+ model_info.config = model_info.config.replace('%2B', '+')
244
+ config = Path(model_info.config)
245
+
246
+ try:
247
+ config.exists()
248
+ except: # noqa 722
249
+ logger.error(f'{model_name}: {config} not found.')
250
+ continue
251
+
252
+ logger.info(f'Processing: {model_name}')
253
+
254
+ http_prefix = 'https://download.openmmlab.com/mmdetection/'
255
+ if args.checkpoint_root is not None:
256
+ root = args.checkpoint_root
257
+ if 's3://' in args.checkpoint_root:
258
+ from petrel_client.common.exception import AccessDeniedError
259
+ file_client = FileClient.infer_client(uri=root)
260
+ checkpoint = file_client.join_path(
261
+ root, model_info.weights[len(http_prefix):])
262
+ try:
263
+ exists = file_client.exists(checkpoint)
264
+ except AccessDeniedError:
265
+ exists = False
266
+ else:
267
+ checkpoint = Path(root) / model_info.weights[len(http_prefix):]
268
+ exists = checkpoint.exists()
269
+ if exists:
270
+ checkpoint = str(checkpoint)
271
+ else:
272
+ print(f'WARNING: {model_name}: {checkpoint} not found.')
273
+ checkpoint = None
274
+ else:
275
+ checkpoint = None
276
+
277
+ try:
278
+ # build the model from a config file and a checkpoint file
279
+ result = inference(MMDET_ROOT / config, checkpoint, tmpdir.name,
280
+ args, model_name)
281
+ result['valid'] = 'PASS'
282
+ except Exception: # noqa 722
283
+ import traceback
284
+ logger.error(f'"{config}" :\n{traceback.format_exc()}')
285
+ result = {'valid': 'FAIL'}
286
+
287
+ summary_data[model_name] = result
288
+
289
+ tmpdir.cleanup()
290
+ show_summary(summary_data, args)
291
+
292
+
293
+ if __name__ == '__main__':
294
+ args = parse_args()
295
+ main(args)
mmdetection/.dev_scripts/check_links.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # https://github.com/allenai/allennlp/blob/main/scripts/check_links.py
3
+
4
+ import argparse
5
+ import logging
6
+ import os
7
+ import pathlib
8
+ import re
9
+ import sys
10
+ from multiprocessing.dummy import Pool
11
+ from typing import NamedTuple, Optional, Tuple
12
+
13
+ import requests
14
+ from mmengine.logging import MMLogger
15
+
16
+
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(
19
+ description='Goes through all the inline-links '
20
+ 'in markdown files and reports the breakages')
21
+ parser.add_argument(
22
+ '--num-threads',
23
+ type=int,
24
+ default=100,
25
+ help='Number of processes to confirm the link')
26
+ parser.add_argument('--https-proxy', type=str, help='https proxy')
27
+ parser.add_argument(
28
+ '--out',
29
+ type=str,
30
+ default='link_reports.txt',
31
+ help='output path of reports')
32
+ args = parser.parse_args()
33
+ return args
34
+
35
+
36
+ OK_STATUS_CODES = (
37
+ 200,
38
+ 401, # the resource exists but may require some sort of login.
39
+ 403, # ^ same
40
+ 405, # HEAD method not allowed.
41
+ # the resource exists, but our default 'Accept-' header may not
42
+ # match what the server can provide.
43
+ 406,
44
+ )
45
+
46
+
47
+ class MatchTuple(NamedTuple):
48
+ source: str
49
+ name: str
50
+ link: str
51
+
52
+
53
+ def check_link(
54
+ match_tuple: MatchTuple,
55
+ http_session: requests.Session,
56
+ logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
57
+ reason: Optional[str] = None
58
+ if match_tuple.link.startswith('http'):
59
+ result_ok, reason = check_url(match_tuple, http_session)
60
+ else:
61
+ result_ok = check_path(match_tuple)
62
+ if logger is None:
63
+ print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
64
+ else:
65
+ logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
66
+ return match_tuple, result_ok, reason
67
+
68
+
69
+ def check_url(match_tuple: MatchTuple,
70
+ http_session: requests.Session) -> Tuple[bool, str]:
71
+ """Check if a URL is reachable."""
72
+ try:
73
+ result = http_session.head(
74
+ match_tuple.link, timeout=5, allow_redirects=True)
75
+ return (
76
+ result.ok or result.status_code in OK_STATUS_CODES,
77
+ f'status code = {result.status_code}',
78
+ )
79
+ except (requests.ConnectionError, requests.Timeout):
80
+ return False, 'connection error'
81
+
82
+
83
+ def check_path(match_tuple: MatchTuple) -> bool:
84
+ """Check if a file in this repository exists."""
85
+ relative_path = match_tuple.link.split('#')[0]
86
+ full_path = os.path.join(
87
+ os.path.dirname(str(match_tuple.source)), relative_path)
88
+ return os.path.exists(full_path)
89
+
90
+
91
+ def main():
92
+ args = parse_args()
93
+
94
+ # setup logger
95
+ logger = MMLogger.get_instance(name='mmdet', log_file=args.out)
96
+
97
+ # setup https_proxy
98
+ if args.https_proxy:
99
+ os.environ['https_proxy'] = args.https_proxy
100
+
101
+ # setup http_session
102
+ http_session = requests.Session()
103
+ for resource_prefix in ('http://', 'https://'):
104
+ http_session.mount(
105
+ resource_prefix,
106
+ requests.adapters.HTTPAdapter(
107
+ max_retries=5,
108
+ pool_connections=20,
109
+ pool_maxsize=args.num_threads),
110
+ )
111
+
112
+ logger.info('Finding all markdown files in the current directory...')
113
+
114
+ project_root = (pathlib.Path(__file__).parent / '..').resolve()
115
+ markdown_files = project_root.glob('**/*.md')
116
+
117
+ all_matches = set()
118
+ url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)')
119
+ for markdown_file in markdown_files:
120
+ with open(markdown_file) as handle:
121
+ for line in handle.readlines():
122
+ matches = url_regex.findall(line)
123
+ for name, link in matches:
124
+ if 'localhost' not in link:
125
+ all_matches.add(
126
+ MatchTuple(
127
+ source=str(markdown_file),
128
+ name=name,
129
+ link=link))
130
+
131
+ logger.info(f' {len(all_matches)} markdown files found')
132
+ logger.info('Checking to make sure we can retrieve each link...')
133
+
134
+ with Pool(processes=args.num_threads) as pool:
135
+ results = pool.starmap(check_link, [(match, http_session, logger)
136
+ for match in list(all_matches)])
137
+
138
+ # collect unreachable results
139
+ unreachable_results = [(match_tuple, reason)
140
+ for match_tuple, success, reason in results
141
+ if not success]
142
+
143
+ if unreachable_results:
144
+ logger.info('================================================')
145
+ logger.info(f'Unreachable links ({len(unreachable_results)}):')
146
+ for match_tuple, reason in unreachable_results:
147
+ logger.info(' > Source: ' + match_tuple.source)
148
+ logger.info(' Name: ' + match_tuple.name)
149
+ logger.info(' Link: ' + match_tuple.link)
150
+ if reason is not None:
151
+ logger.info(' Reason: ' + reason)
152
+ sys.exit(1)
153
+ logger.info('No Unreachable link found.')
154
+
155
+
156
+ if __name__ == '__main__':
157
+ main()
mmdetection/.dev_scripts/convert_test_benchmark_script.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import os
4
+ import os.path as osp
5
+
6
+ from mmengine import Config
7
+
8
+
9
+ def parse_args():
10
+ parser = argparse.ArgumentParser(
11
+ description='Convert benchmark model list to script')
12
+ parser.add_argument('config', help='test config file path')
13
+ parser.add_argument('--port', type=int, default=29666, help='dist port')
14
+ parser.add_argument(
15
+ '--run', action='store_true', help='run script directly')
16
+ parser.add_argument(
17
+ '--out', type=str, help='path to save model benchmark script')
18
+
19
+ args = parser.parse_args()
20
+ return args
21
+
22
+
23
+ def process_model_info(model_info, work_dir):
24
+ config = model_info['config'].strip()
25
+ fname, _ = osp.splitext(osp.basename(config))
26
+ job_name = fname
27
+ work_dir = '$WORK_DIR/' + fname
28
+ checkpoint = model_info['checkpoint'].strip()
29
+ return dict(
30
+ config=config,
31
+ job_name=job_name,
32
+ work_dir=work_dir,
33
+ checkpoint=checkpoint)
34
+
35
+
36
+ def create_test_bash_info(commands, model_test_dict, port, script_name,
37
+ partition):
38
+ config = model_test_dict['config']
39
+ job_name = model_test_dict['job_name']
40
+ checkpoint = model_test_dict['checkpoint']
41
+ work_dir = model_test_dict['work_dir']
42
+
43
+ echo_info = f' \necho \'{config}\' &'
44
+ commands.append(echo_info)
45
+ commands.append('\n')
46
+
47
+ command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
48
+ f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
49
+
50
+ command_info += f'{partition} '
51
+ command_info += f'{job_name} '
52
+ command_info += f'{config} '
53
+ command_info += f'$CHECKPOINT_DIR/{checkpoint} '
54
+ command_info += f'--work-dir {work_dir} '
55
+
56
+ command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
57
+ command_info += ' &'
58
+
59
+ commands.append(command_info)
60
+
61
+
62
+ def main():
63
+ args = parse_args()
64
+ if args.out:
65
+ out_suffix = args.out.split('.')[-1]
66
+ assert args.out.endswith('.sh'), \
67
+ f'Expected out file path suffix is .sh, but get .{out_suffix}'
68
+ assert args.out or args.run, \
69
+ ('Please specify at least one operation (save/run/ the '
70
+ 'script) with the argument "--out" or "--run"')
71
+
72
+ commands = []
73
+ partition_name = 'PARTITION=$1 '
74
+ commands.append(partition_name)
75
+ commands.append('\n')
76
+
77
+ checkpoint_root = 'CHECKPOINT_DIR=$2 '
78
+ commands.append(checkpoint_root)
79
+ commands.append('\n')
80
+
81
+ work_dir = 'WORK_DIR=$3 '
82
+ commands.append(work_dir)
83
+ commands.append('\n')
84
+
85
+ cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
86
+ commands.append(cpus_pre_task)
87
+ commands.append('\n')
88
+
89
+ script_name = osp.join('tools', 'slurm_test.sh')
90
+ port = args.port
91
+
92
+ cfg = Config.fromfile(args.config)
93
+
94
+ for model_key in cfg:
95
+ model_infos = cfg[model_key]
96
+ if not isinstance(model_infos, list):
97
+ model_infos = [model_infos]
98
+ for model_info in model_infos:
99
+ print('processing: ', model_info['config'])
100
+ model_test_dict = process_model_info(model_info, work_dir)
101
+ create_test_bash_info(commands, model_test_dict, port, script_name,
102
+ '$PARTITION')
103
+ port += 1
104
+
105
+ command_str = ''.join(commands)
106
+ if args.out:
107
+ with open(args.out, 'w') as f:
108
+ f.write(command_str)
109
+ if args.run:
110
+ os.system(command_str)
111
+
112
+
113
+ if __name__ == '__main__':
114
+ main()
mmdetection/.dev_scripts/convert_train_benchmark_script.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import os
4
+ import os.path as osp
5
+
6
+
7
+ def parse_args():
8
+ parser = argparse.ArgumentParser(
9
+ description='Convert benchmark model json to script')
10
+ parser.add_argument(
11
+ 'txt_path', type=str, help='txt path output by benchmark_filter')
12
+ parser.add_argument(
13
+ '--run', action='store_true', help='run script directly')
14
+ parser.add_argument(
15
+ '--out', type=str, help='path to save model benchmark script')
16
+
17
+ args = parser.parse_args()
18
+ return args
19
+
20
+
21
+ def determine_gpus(cfg_name):
22
+ gpus = 8
23
+ gpus_pre_node = 8
24
+
25
+ if cfg_name.find('16x') >= 0:
26
+ gpus = 16
27
+ elif cfg_name.find('4xb4') >= 0:
28
+ gpus = 4
29
+ gpus_pre_node = 4
30
+ elif 'lad' in cfg_name:
31
+ gpus = 2
32
+ gpus_pre_node = 2
33
+
34
+ return gpus, gpus_pre_node
35
+
36
+
37
+ def main():
38
+ args = parse_args()
39
+ if args.out:
40
+ out_suffix = args.out.split('.')[-1]
41
+ assert args.out.endswith('.sh'), \
42
+ f'Expected out file path suffix is .sh, but get .{out_suffix}'
43
+ assert args.out or args.run, \
44
+ ('Please specify at least one operation (save/run/ the '
45
+ 'script) with the argument "--out" or "--run"')
46
+
47
+ root_name = './tools'
48
+ train_script_name = osp.join(root_name, 'slurm_train.sh')
49
+
50
+ commands = []
51
+ partition_name = 'PARTITION=$1 '
52
+ commands.append(partition_name)
53
+ commands.append('\n')
54
+
55
+ work_dir = 'WORK_DIR=$2 '
56
+ commands.append(work_dir)
57
+ commands.append('\n')
58
+
59
+ cpus_pre_task = 'CPUS_PER_TASK=${3:-4} '
60
+ commands.append(cpus_pre_task)
61
+ commands.append('\n')
62
+ commands.append('\n')
63
+
64
+ with open(args.txt_path, 'r') as f:
65
+ model_cfgs = f.readlines()
66
+ for i, cfg in enumerate(model_cfgs):
67
+ cfg = cfg.strip()
68
+ if len(cfg) == 0:
69
+ continue
70
+ # print cfg name
71
+ echo_info = f'echo \'{cfg}\' &'
72
+ commands.append(echo_info)
73
+ commands.append('\n')
74
+
75
+ fname, _ = osp.splitext(osp.basename(cfg))
76
+ out_fname = '$WORK_DIR/' + fname
77
+
78
+ gpus, gpus_pre_node = determine_gpus(cfg)
79
+ command_info = f'GPUS={gpus} GPUS_PER_NODE={gpus_pre_node} ' \
80
+ f'CPUS_PER_TASK=$CPUS_PRE_TASK {train_script_name} '
81
+ command_info += '$PARTITION '
82
+ command_info += f'{fname} '
83
+ command_info += f'{cfg} '
84
+ command_info += f'{out_fname} '
85
+
86
+ command_info += '--cfg-options default_hooks.checkpoint.' \
87
+ 'max_keep_ckpts=1 '
88
+ command_info += '&'
89
+
90
+ commands.append(command_info)
91
+
92
+ if i < len(model_cfgs):
93
+ commands.append('\n')
94
+
95
+ command_str = ''.join(commands)
96
+ if args.out:
97
+ with open(args.out, 'w') as f:
98
+ f.write(command_str)
99
+ if args.run:
100
+ os.system(command_str)
101
+
102
+
103
+ if __name__ == '__main__':
104
+ main()
mmdetection/.dev_scripts/covignore.cfg ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Each line should be the relative path to the root directory
2
+ # of this repo. Support regular expression as well.
3
+ # For example:
4
+
5
+ .*/__init__.py
mmdetection/.dev_scripts/diff_coverage_test.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ readarray -t IGNORED_FILES < $( dirname "$0" )/covignore.cfg
4
+ REUSE_COVERAGE_REPORT=${REUSE_COVERAGE_REPORT:-0}
5
+ REPO=${1:-"origin"}
6
+ BRANCH=${2:-"refactor_dev"}
7
+
8
+ git fetch $REPO $BRANCH
9
+
10
+ PY_FILES=""
11
+ for FILE_NAME in $(git diff --name-only ${REPO}/${BRANCH}); do
12
+ # Only test python files in mmdet/ existing in current branch, and not ignored in covignore.cfg
13
+ if [ ${FILE_NAME: -3} == ".py" ] && [ ${FILE_NAME:0:6} == "mmdet/" ] && [ -f "$FILE_NAME" ]; then
14
+ IGNORED=false
15
+ for IGNORED_FILE_NAME in "${IGNORED_FILES[@]}"; do
16
+ # Skip blank lines
17
+ if [ -z "$IGNORED_FILE_NAME" ]; then
18
+ continue
19
+ fi
20
+ if [ "${IGNORED_FILE_NAME::1}" != "#" ] && [[ "$FILE_NAME" =~ $IGNORED_FILE_NAME ]]; then
21
+ echo "Ignoring $FILE_NAME"
22
+ IGNORED=true
23
+ break
24
+ fi
25
+ done
26
+ if [ "$IGNORED" = false ]; then
27
+ PY_FILES="$PY_FILES $FILE_NAME"
28
+ fi
29
+ fi
30
+ done
31
+
32
+ # Only test the coverage when PY_FILES are not empty, otherwise they will test the entire project
33
+ if [ ! -z "${PY_FILES}" ]
34
+ then
35
+ if [ "$REUSE_COVERAGE_REPORT" == "0" ]; then
36
+ coverage run --branch --source mmdet -m pytest tests/
37
+ fi
38
+ coverage report --fail-under 80 -m $PY_FILES
39
+ interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 95 $PY_FILES
40
+ fi
mmdetection/.dev_scripts/download_checkpoints.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+
3
+ import argparse
4
+ import math
5
+ import os
6
+ import os.path as osp
7
+ from multiprocessing import Pool
8
+
9
+ import torch
10
+ from mmengine.config import Config
11
+ from mmengine.utils import mkdir_or_exist
12
+
13
+
14
+ def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
15
+ # math.pow(1024, 2) is mean 1 MB
16
+ assert_msg = f"Downloaded url '{url}' does not exist " \
17
+ f'or size is < min_bytes={min_bytes}'
18
+ try:
19
+ print(f'Downloading {url} to {out_file}...')
20
+ torch.hub.download_url_to_file(url, str(out_file), progress=progress)
21
+ assert osp.exists(
22
+ out_file) and osp.getsize(out_file) > min_bytes, assert_msg
23
+ except Exception as e:
24
+ if osp.exists(out_file):
25
+ os.remove(out_file)
26
+ print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
27
+ os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
28
+ ) # curl download, retry and resume on fail
29
+ finally:
30
+ if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
31
+ os.remove(out_file) # remove partial downloads
32
+
33
+ if not osp.exists(out_file):
34
+ print(f'ERROR: {assert_msg}\n')
35
+ print('=========================================\n')
36
+
37
+
38
+ def parse_args():
39
+ parser = argparse.ArgumentParser(description='Download checkpoints')
40
+ parser.add_argument('config', help='test config file path')
41
+ parser.add_argument(
42
+ 'out', type=str, help='output dir of checkpoints to be stored')
43
+ parser.add_argument(
44
+ '--nproc', type=int, default=16, help='num of Processes')
45
+ parser.add_argument(
46
+ '--intranet',
47
+ action='store_true',
48
+ help='switch to internal network url')
49
+ args = parser.parse_args()
50
+ return args
51
+
52
+
53
+ if __name__ == '__main__':
54
+ args = parse_args()
55
+ mkdir_or_exist(args.out)
56
+
57
+ cfg = Config.fromfile(args.config)
58
+
59
+ checkpoint_url_list = []
60
+ checkpoint_out_list = []
61
+
62
+ for model in cfg:
63
+ model_infos = cfg[model]
64
+ if not isinstance(model_infos, list):
65
+ model_infos = [model_infos]
66
+ for model_info in model_infos:
67
+ checkpoint = model_info['checkpoint']
68
+ out_file = osp.join(args.out, checkpoint)
69
+ if not osp.exists(out_file):
70
+
71
+ url = model_info['url']
72
+ if args.intranet is True:
73
+ url = url.replace('.com', '.sensetime.com')
74
+ url = url.replace('https', 'http')
75
+
76
+ checkpoint_url_list.append(url)
77
+ checkpoint_out_list.append(out_file)
78
+
79
+ if len(checkpoint_url_list) > 0:
80
+ pool = Pool(min(os.cpu_count(), args.nproc))
81
+ pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
82
+ else:
83
+ print('No files to download!')
mmdetection/.dev_scripts/gather_models.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import glob
4
+ import os
5
+ import os.path as osp
6
+ import shutil
7
+ import subprocess
8
+ import time
9
+ from collections import OrderedDict
10
+
11
+ import torch
12
+ import yaml
13
+ from mmengine.config import Config
14
+ from mmengine.fileio import dump
15
+ from mmengine.utils import mkdir_or_exist, scandir
16
+
17
+
18
+ def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
19
+
20
+ class OrderedDumper(Dumper):
21
+ pass
22
+
23
+ def _dict_representer(dumper, data):
24
+ return dumper.represent_mapping(
25
+ yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
26
+
27
+ OrderedDumper.add_representer(OrderedDict, _dict_representer)
28
+ return yaml.dump(data, stream, OrderedDumper, **kwds)
29
+
30
+
31
+ def process_checkpoint(in_file, out_file):
32
+ checkpoint = torch.load(in_file, map_location='cpu')
33
+ # remove optimizer for smaller file size
34
+ if 'optimizer' in checkpoint:
35
+ del checkpoint['optimizer']
36
+ if 'ema_state_dict' in checkpoint:
37
+ del checkpoint['ema_state_dict']
38
+
39
+ # remove ema state_dict
40
+ for key in list(checkpoint['state_dict']):
41
+ if key.startswith('ema_'):
42
+ checkpoint['state_dict'].pop(key)
43
+ elif key.startswith('data_preprocessor'):
44
+ checkpoint['state_dict'].pop(key)
45
+
46
+ # if it is necessary to remove some sensitive data in checkpoint['meta'],
47
+ # add the code here.
48
+ if torch.__version__ >= '1.6':
49
+ torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
50
+ else:
51
+ torch.save(checkpoint, out_file)
52
+ sha = subprocess.check_output(['sha256sum', out_file]).decode()
53
+ final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
54
+ subprocess.Popen(['mv', out_file, final_file])
55
+ return final_file
56
+
57
+
58
+ def is_by_epoch(config):
59
+ cfg = Config.fromfile('./configs/' + config)
60
+ return cfg.train_cfg.type == 'EpochBasedTrainLoop'
61
+
62
+
63
+ def get_final_epoch_or_iter(config):
64
+ cfg = Config.fromfile('./configs/' + config)
65
+ if cfg.train_cfg.type == 'EpochBasedTrainLoop':
66
+ return cfg.train_cfg.max_epochs
67
+ else:
68
+ return cfg.train_cfg.max_iters
69
+
70
+
71
+ def get_best_epoch_or_iter(exp_dir):
72
+ best_epoch_iter_full_path = list(
73
+ sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
74
+ best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
75
+ best_epoch_or_iter = best_epoch_or_iter_model_path.\
76
+ split('_')[-1].split('.')[0]
77
+ return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
78
+
79
+
80
+ def get_real_epoch_or_iter(config):
81
+ cfg = Config.fromfile('./configs/' + config)
82
+ if cfg.train_cfg.type == 'EpochBasedTrainLoop':
83
+ epoch = cfg.train_cfg.max_epochs
84
+ return epoch
85
+ else:
86
+ return cfg.train_cfg.max_iters
87
+
88
+
89
+ def get_final_results(log_json_path,
90
+ epoch_or_iter,
91
+ results_lut='coco/bbox_mAP',
92
+ by_epoch=True):
93
+ result_dict = dict()
94
+ with open(log_json_path) as f:
95
+ r = f.readlines()[-1]
96
+ last_metric = r.split(',')[0].split(': ')[-1].strip()
97
+ result_dict[results_lut] = last_metric
98
+ return result_dict
99
+
100
+
101
+ def get_dataset_name(config):
102
+ # If there are more dataset, add here.
103
+ name_map = dict(
104
+ CityscapesDataset='Cityscapes',
105
+ CocoDataset='COCO',
106
+ CocoPanopticDataset='COCO',
107
+ DeepFashionDataset='Deep Fashion',
108
+ LVISV05Dataset='LVIS v0.5',
109
+ LVISV1Dataset='LVIS v1',
110
+ VOCDataset='Pascal VOC',
111
+ WIDERFaceDataset='WIDER Face',
112
+ OpenImagesDataset='OpenImagesDataset',
113
+ OpenImagesChallengeDataset='OpenImagesChallengeDataset',
114
+ Objects365V1Dataset='Objects365 v1',
115
+ Objects365V2Dataset='Objects365 v2')
116
+ cfg = Config.fromfile('./configs/' + config)
117
+ return name_map[cfg.dataset_type]
118
+
119
+
120
+ def find_last_dir(model_dir):
121
+ dst_times = []
122
+ for time_stamp in os.scandir(model_dir):
123
+ if osp.isdir(time_stamp):
124
+ dst_time = time.mktime(
125
+ time.strptime(time_stamp.name, '%Y%m%d_%H%M%S'))
126
+ dst_times.append([dst_time, time_stamp.name])
127
+ return max(dst_times, key=lambda x: x[0])[1]
128
+
129
+
130
+ def convert_model_info_to_pwc(model_infos):
131
+ pwc_files = {}
132
+ for model in model_infos:
133
+ cfg_folder_name = osp.split(model['config'])[-2]
134
+ pwc_model_info = OrderedDict()
135
+ pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
136
+ pwc_model_info['In Collection'] = 'Please fill in Collection name'
137
+ pwc_model_info['Config'] = osp.join('configs', model['config'])
138
+
139
+ # get metadata
140
+ meta_data = OrderedDict()
141
+ if 'epochs' in model:
142
+ meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
143
+ else:
144
+ meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
145
+ pwc_model_info['Metadata'] = meta_data
146
+
147
+ # get dataset name
148
+ dataset_name = get_dataset_name(model['config'])
149
+
150
+ # get results
151
+ results = []
152
+ # if there are more metrics, add here.
153
+ if 'bbox_mAP' in model['results']:
154
+ metric = round(model['results']['bbox_mAP'] * 100, 1)
155
+ results.append(
156
+ OrderedDict(
157
+ Task='Object Detection',
158
+ Dataset=dataset_name,
159
+ Metrics={'box AP': metric}))
160
+ if 'segm_mAP' in model['results']:
161
+ metric = round(model['results']['segm_mAP'] * 100, 1)
162
+ results.append(
163
+ OrderedDict(
164
+ Task='Instance Segmentation',
165
+ Dataset=dataset_name,
166
+ Metrics={'mask AP': metric}))
167
+ if 'PQ' in model['results']:
168
+ metric = round(model['results']['PQ'], 1)
169
+ results.append(
170
+ OrderedDict(
171
+ Task='Panoptic Segmentation',
172
+ Dataset=dataset_name,
173
+ Metrics={'PQ': metric}))
174
+ pwc_model_info['Results'] = results
175
+
176
+ link_string = 'https://download.openmmlab.com/mmdetection/v3.0/'
177
+ link_string += '{}/{}'.format(model['config'].rstrip('.py'),
178
+ osp.split(model['model_path'])[-1])
179
+ pwc_model_info['Weights'] = link_string
180
+ if cfg_folder_name in pwc_files:
181
+ pwc_files[cfg_folder_name].append(pwc_model_info)
182
+ else:
183
+ pwc_files[cfg_folder_name] = [pwc_model_info]
184
+ return pwc_files
185
+
186
+
187
+ def parse_args():
188
+ parser = argparse.ArgumentParser(description='Gather benchmarked models')
189
+ parser.add_argument(
190
+ 'root',
191
+ type=str,
192
+ default='work_dirs',
193
+ help='root path of benchmarked models to be gathered')
194
+ parser.add_argument(
195
+ '--out',
196
+ type=str,
197
+ default='gather',
198
+ help='output path of gathered models to be stored')
199
+ parser.add_argument(
200
+ '--best',
201
+ action='store_true',
202
+ help='whether to gather the best model.')
203
+
204
+ args = parser.parse_args()
205
+ return args
206
+
207
+
208
+ def main():
209
+ args = parse_args()
210
+ models_root = args.root
211
+ models_out = args.out
212
+ mkdir_or_exist(models_out)
213
+
214
+ # find all models in the root directory to be gathered
215
+ raw_configs = list(scandir('./configs', '.py', recursive=True))
216
+
217
+ # filter configs that is not trained in the experiments dir
218
+ used_configs = []
219
+ for raw_config in raw_configs:
220
+ if osp.exists(osp.join(models_root, raw_config)):
221
+ used_configs.append(raw_config)
222
+ print(f'Find {len(used_configs)} models to be gathered')
223
+
224
+ # find final_ckpt and log file for trained each config
225
+ # and parse the best performance
226
+ model_infos = []
227
+ for used_config in used_configs:
228
+ exp_dir = osp.join(models_root, used_config)
229
+ by_epoch = is_by_epoch(used_config)
230
+ # check whether the exps is finished
231
+ if args.best is True:
232
+ final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
233
+ else:
234
+ final_epoch_or_iter = get_final_epoch_or_iter(used_config)
235
+ final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
236
+ final_epoch_or_iter)
237
+
238
+ model_path = osp.join(exp_dir, final_model)
239
+ # skip if the model is still training
240
+ if not osp.exists(model_path):
241
+ continue
242
+
243
+ # get the latest logs
244
+ latest_exp_name = find_last_dir(exp_dir)
245
+ latest_exp_json = osp.join(exp_dir, latest_exp_name, 'vis_data',
246
+ latest_exp_name + '.json')
247
+
248
+ model_performance = get_final_results(
249
+ latest_exp_json, final_epoch_or_iter, by_epoch=by_epoch)
250
+
251
+ if model_performance is None:
252
+ continue
253
+
254
+ model_info = dict(
255
+ config=used_config,
256
+ results=model_performance,
257
+ final_model=final_model,
258
+ latest_exp_json=latest_exp_json,
259
+ latest_exp_name=latest_exp_name)
260
+ model_info['epochs' if by_epoch else 'iterations'] =\
261
+ final_epoch_or_iter
262
+ model_infos.append(model_info)
263
+
264
+ # publish model for each checkpoint
265
+ publish_model_infos = []
266
+ for model in model_infos:
267
+ model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
268
+ mkdir_or_exist(model_publish_dir)
269
+
270
+ model_name = osp.split(model['config'])[-1].split('.')[0]
271
+
272
+ model_name += '_' + model['latest_exp_name']
273
+ publish_model_path = osp.join(model_publish_dir, model_name)
274
+ trained_model_path = osp.join(models_root, model['config'],
275
+ model['final_model'])
276
+
277
+ # convert model
278
+ final_model_path = process_checkpoint(trained_model_path,
279
+ publish_model_path)
280
+
281
+ # copy log
282
+ shutil.copy(model['latest_exp_json'],
283
+ osp.join(model_publish_dir, f'{model_name}.log.json'))
284
+
285
+ # copy config to guarantee reproducibility
286
+ config_path = model['config']
287
+ config_path = osp.join(
288
+ 'configs',
289
+ config_path) if 'configs' not in config_path else config_path
290
+ target_config_path = osp.split(config_path)[-1]
291
+ shutil.copy(config_path, osp.join(model_publish_dir,
292
+ target_config_path))
293
+
294
+ model['model_path'] = final_model_path
295
+ publish_model_infos.append(model)
296
+
297
+ models = dict(models=publish_model_infos)
298
+ print(f'Totally gathered {len(publish_model_infos)} models')
299
+ dump(models, osp.join(models_out, 'model_info.json'))
300
+
301
+ pwc_files = convert_model_info_to_pwc(publish_model_infos)
302
+ for name in pwc_files:
303
+ with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
304
+ ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
305
+
306
+
307
+ if __name__ == '__main__':
308
+ main()
mmdetection/.dev_scripts/gather_test_benchmark_metric.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import glob
4
+ import os.path as osp
5
+
6
+ from mmengine.config import Config
7
+ from mmengine.fileio import dump, load
8
+ from mmengine.utils import mkdir_or_exist
9
+
10
+
11
+ def parse_args():
12
+ parser = argparse.ArgumentParser(
13
+ description='Gather benchmarked models metric')
14
+ parser.add_argument('config', help='test config file path')
15
+ parser.add_argument(
16
+ 'root',
17
+ type=str,
18
+ help='root path of benchmarked models to be gathered')
19
+ parser.add_argument(
20
+ '--out', type=str, help='output path of gathered metrics to be stored')
21
+ parser.add_argument(
22
+ '--not-show', action='store_true', help='not show metrics')
23
+ parser.add_argument(
24
+ '--show-all', action='store_true', help='show all model metrics')
25
+
26
+ args = parser.parse_args()
27
+ return args
28
+
29
+
30
+ if __name__ == '__main__':
31
+ args = parse_args()
32
+
33
+ root_path = args.root
34
+ metrics_out = args.out
35
+ result_dict = {}
36
+
37
+ cfg = Config.fromfile(args.config)
38
+
39
+ for model_key in cfg:
40
+ model_infos = cfg[model_key]
41
+ if not isinstance(model_infos, list):
42
+ model_infos = [model_infos]
43
+ for model_info in model_infos:
44
+ record_metrics = model_info['metric']
45
+ config = model_info['config'].strip()
46
+ fname, _ = osp.splitext(osp.basename(config))
47
+ metric_json_dir = osp.join(root_path, fname)
48
+ if osp.exists(metric_json_dir):
49
+ json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
50
+ if len(json_list) > 0:
51
+ log_json_path = list(sorted(json_list))[-1]
52
+
53
+ metric = load(log_json_path)
54
+ if config in metric.get('config', {}):
55
+
56
+ new_metrics = dict()
57
+ for record_metric_key in record_metrics:
58
+ record_metric_key_bk = record_metric_key
59
+ old_metric = record_metrics[record_metric_key]
60
+ if record_metric_key == 'AR_1000':
61
+ record_metric_key = 'AR@1000'
62
+ if record_metric_key not in metric['metric']:
63
+ raise KeyError(
64
+ 'record_metric_key not exist, please '
65
+ 'check your config')
66
+ new_metric = round(
67
+ metric['metric'][record_metric_key] * 100, 1)
68
+ new_metrics[record_metric_key_bk] = new_metric
69
+
70
+ if args.show_all:
71
+ result_dict[config] = dict(
72
+ before=record_metrics, after=new_metrics)
73
+ else:
74
+ for record_metric_key in record_metrics:
75
+ old_metric = record_metrics[record_metric_key]
76
+ new_metric = new_metrics[record_metric_key]
77
+ if old_metric != new_metric:
78
+ result_dict[config] = dict(
79
+ before=record_metrics,
80
+ after=new_metrics)
81
+ break
82
+ else:
83
+ print(f'{config} not included in: {log_json_path}')
84
+ else:
85
+ print(f'{config} not exist file: {metric_json_dir}')
86
+ else:
87
+ print(f'{config} not exist dir: {metric_json_dir}')
88
+
89
+ if metrics_out:
90
+ mkdir_or_exist(metrics_out)
91
+ dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))
92
+ if not args.not_show:
93
+ print('===================================')
94
+ for config_name, metrics in result_dict.items():
95
+ print(config_name, metrics)
96
+ print('===================================')
mmdetection/.dev_scripts/gather_train_benchmark_metric.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import argparse
3
+ import glob
4
+ import os.path as osp
5
+
6
+ from gather_models import get_final_results
7
+ from mmengine.config import Config
8
+ from mmengine.fileio import dump
9
+ from mmengine.utils import mkdir_or_exist
10
+
11
+ try:
12
+ import xlrd
13
+ except ImportError:
14
+ xlrd = None
15
+ try:
16
+ import xlutils
17
+ from xlutils.copy import copy
18
+ except ImportError:
19
+ xlutils = None
20
+
21
+
22
+ def parse_args():
23
+ parser = argparse.ArgumentParser(
24
+ description='Gather benchmarked models metric')
25
+ parser.add_argument(
26
+ 'root',
27
+ type=str,
28
+ help='root path of benchmarked models to be gathered')
29
+ parser.add_argument(
30
+ 'txt_path', type=str, help='txt path output by benchmark_filter')
31
+ parser.add_argument(
32
+ '--out', type=str, help='output path of gathered metrics to be stored')
33
+ parser.add_argument(
34
+ '--not-show', action='store_true', help='not show metrics')
35
+ parser.add_argument(
36
+ '--excel', type=str, help='input path of excel to be recorded')
37
+ parser.add_argument(
38
+ '--ncol', type=int, help='Number of column to be modified or appended')
39
+
40
+ args = parser.parse_args()
41
+ return args
42
+
43
+
44
+ if __name__ == '__main__':
45
+ args = parse_args()
46
+
47
+ if args.excel:
48
+ assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
49
+ 'at the same time'
50
+ if xlrd is None:
51
+ raise RuntimeError(
52
+ 'xlrd is not installed,'
53
+ 'Please use “pip install xlrd==1.2.0” to install')
54
+ if xlutils is None:
55
+ raise RuntimeError(
56
+ 'xlutils is not installed,'
57
+ 'Please use “pip install xlutils==2.0.0” to install')
58
+ readbook = xlrd.open_workbook(args.excel)
59
+ sheet = readbook.sheet_by_name('Sheet1')
60
+ sheet_info = {}
61
+ total_nrows = sheet.nrows
62
+ for i in range(3, sheet.nrows):
63
+ sheet_info[sheet.row_values(i)[0]] = i
64
+ xlrw = copy(readbook)
65
+ table = xlrw.get_sheet(0)
66
+
67
+ root_path = args.root
68
+ metrics_out = args.out
69
+
70
+ result_dict = {}
71
+ with open(args.txt_path, 'r') as f:
72
+ model_cfgs = f.readlines()
73
+ for i, config in enumerate(model_cfgs):
74
+ config = config.strip()
75
+ if len(config) == 0:
76
+ continue
77
+
78
+ config_name = osp.split(config)[-1]
79
+ config_name = osp.splitext(config_name)[0]
80
+ result_path = osp.join(root_path, config_name)
81
+ if osp.exists(result_path):
82
+ # 1 read config
83
+ cfg = Config.fromfile(config)
84
+ total_epochs = cfg.runner.max_epochs
85
+ final_results = cfg.evaluation.metric
86
+ if not isinstance(final_results, list):
87
+ final_results = [final_results]
88
+ final_results_out = []
89
+ for key in final_results:
90
+ if 'proposal_fast' in key:
91
+ final_results_out.append('AR@1000') # RPN
92
+ elif 'mAP' not in key:
93
+ final_results_out.append(key + '_mAP')
94
+
95
+ # 2 determine whether total_epochs ckpt exists
96
+ ckpt_path = f'epoch_{total_epochs}.pth'
97
+ if osp.exists(osp.join(result_path, ckpt_path)):
98
+ log_json_path = list(
99
+ sorted(glob.glob(osp.join(result_path,
100
+ '*.log.json'))))[-1]
101
+
102
+ # 3 read metric
103
+ model_performance = get_final_results(
104
+ log_json_path, total_epochs, final_results_out)
105
+ if model_performance is None:
106
+ print(f'log file error: {log_json_path}')
107
+ continue
108
+ for performance in model_performance:
109
+ if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
110
+ metric = round(
111
+ model_performance[performance] * 100, 1)
112
+ model_performance[performance] = metric
113
+ result_dict[config] = model_performance
114
+
115
+ # update and append excel content
116
+ if args.excel:
117
+ if 'AR@1000' in model_performance:
118
+ metrics = f'{model_performance["AR@1000"]}' \
119
+ f'(AR@1000)'
120
+ elif 'segm_mAP' in model_performance:
121
+ metrics = f'{model_performance["bbox_mAP"]}/' \
122
+ f'{model_performance["segm_mAP"]}'
123
+ else:
124
+ metrics = f'{model_performance["bbox_mAP"]}'
125
+
126
+ row_num = sheet_info.get(config, None)
127
+ if row_num:
128
+ table.write(row_num, args.ncol, metrics)
129
+ else:
130
+ table.write(total_nrows, 0, config)
131
+ table.write(total_nrows, args.ncol, metrics)
132
+ total_nrows += 1
133
+
134
+ else:
135
+ print(f'{config} not exist: {ckpt_path}')
136
+ else:
137
+ print(f'not exist: {config}')
138
+
139
+ # 4 save or print results
140
+ if metrics_out:
141
+ mkdir_or_exist(metrics_out)
142
+ dump(result_dict, osp.join(metrics_out, 'model_metric_info.json'))
143
+ if not args.not_show:
144
+ print('===================================')
145
+ for config_name, metrics in result_dict.items():
146
+ print(config_name, metrics)
147
+ print('===================================')
148
+ if args.excel:
149
+ filename, sufflx = osp.splitext(args.excel)
150
+ xlrw.save(f'{filename}_o{sufflx}')
151
+ print(f'>>> Output {filename}_o{sufflx}')
mmdetection/.dev_scripts/linter.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ yapf -r -i mmdet/ configs/ tests/ tools/
2
+ isort -rc mmdet/ configs/ tests/ tools/
3
+ flake8 .
mmdetection/.dev_scripts/test_benchmark.sh ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PARTITION=$1
2
+ CHECKPOINT_DIR=$2
3
+ WORK_DIR=$3
4
+ CPUS_PER_TASK=${4:-2}
5
+
6
+ echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
7
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir $WORK_DIR/atss_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29666 &
8
+ echo 'configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py' &
9
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION autoassign_r50-caffe_fpn_1x_coco configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir $WORK_DIR/autoassign_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29667 &
10
+ echo 'configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py' &
11
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn-carafe_1x_coco configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn-carafe_1x_coco --cfg-option env_cfg.dist_cfg.port=29668 &
12
+ echo 'configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' &
13
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir $WORK_DIR/cascade-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29669 &
14
+ echo 'configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' &
15
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-mask-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir $WORK_DIR/cascade-mask-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29670 &
16
+ echo 'configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py' &
17
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir $WORK_DIR/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29671 &
18
+ echo 'configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py' &
19
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION centernet_r18-dcnv2_8xb16-crop512-140e_coco configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir $WORK_DIR/centernet_r18-dcnv2_8xb16-crop512-140e_coco --cfg-option env_cfg.dist_cfg.port=29672 &
20
+ echo 'configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py' &
21
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir $WORK_DIR/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco --cfg-option env_cfg.dist_cfg.port=29673 &
22
+ echo 'configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' &
23
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth --work-dir $WORK_DIR/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco --cfg-option env_cfg.dist_cfg.port=29674 &
24
+ echo 'configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py' &
25
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION cornernet_hourglass104_8xb6-210e-mstest_coco configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir $WORK_DIR/cornernet_hourglass104_8xb6-210e-mstest_coco --cfg-option env_cfg.dist_cfg.port=29675 &
26
+ echo 'configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py' &
27
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir $WORK_DIR/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29676 &
28
+ echo 'configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py' &
29
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_mdpool_1x_coco configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_mdpool_1x_coco --cfg-option env_cfg.dist_cfg.port=29677 &
30
+ echo 'configs/ddod/ddod_r50_fpn_1x_coco.py' &
31
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ddod_r50_fpn_1x_coco configs/ddod/ddod_r50_fpn_1x_coco.py $CHECKPOINT_DIR/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth --work-dir $WORK_DIR/ddod_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29678 &
32
+ echo 'configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py' &
33
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION deformable-detr_r50_16xb2-50e_coco configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir $WORK_DIR/deformable-detr_r50_16xb2-50e_coco --cfg-option env_cfg.dist_cfg.port=29679 &
34
+ echo 'configs/detectors/detectors_htc-r50_1x_coco.py' &
35
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION detectors_htc-r50_1x_coco configs/detectors/detectors_htc-r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir $WORK_DIR/detectors_htc-r50_1x_coco --cfg-option env_cfg.dist_cfg.port=29680 &
36
+ echo 'configs/detr/detr_r50_8xb2-150e_coco.py' &
37
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION detr_r50_8xb2-150e_coco configs/detr/detr_r50_8xb2-150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir $WORK_DIR/detr_r50_8xb2-150e_coco --cfg-option env_cfg.dist_cfg.port=29681 &
38
+ echo 'configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py' &
39
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION dh-faster-rcnn_r50_fpn_1x_coco configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir $WORK_DIR/dh-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29682 &
40
+ echo 'configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py' &
41
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION atss_r50_fpn_dyhead_1x_coco configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth --work-dir $WORK_DIR/atss_r50_fpn_dyhead_1x_coco --cfg-option env_cfg.dist_cfg.port=29683 &
42
+ echo 'configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py' &
43
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION dynamic-rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir $WORK_DIR/dynamic-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29684 &
44
+ echo 'configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py' &
45
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_effb3_fpn_8xb4-crop896-1x_coco configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py $CHECKPOINT_DIR/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth --work-dir $WORK_DIR/retinanet_effb3_fpn_8xb4-crop896-1x_coco --cfg-option env_cfg.dist_cfg.port=29685 &
46
+ echo 'configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py' &
47
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50-attn1111_fpn_1x_coco configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir $WORK_DIR/faster-rcnn_r50-attn1111_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29686 &
48
+ echo 'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' &
49
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29687 &
50
+ echo 'configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py' &
51
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir $WORK_DIR/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco --cfg-option env_cfg.dist_cfg.port=29688 &
52
+ echo 'configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py' &
53
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fovea_r50_fpn_gn-head-align_4xb4-2x_coco configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir $WORK_DIR/fovea_r50_fpn_gn-head-align_4xb4-2x_coco --cfg-option env_cfg.dist_cfg.port=29689 &
54
+ echo 'configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py' &
55
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpg_crop640-50e_coco configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpg_crop640-50e_coco --cfg-option env_cfg.dist_cfg.port=29690 &
56
+ echo 'configs/free_anchor/freeanchor_r50_fpn_1x_coco.py' &
57
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION freeanchor_r50_fpn_1x_coco configs/free_anchor/freeanchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir $WORK_DIR/freeanchor_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29691 &
58
+ echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
59
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir $WORK_DIR/fsaf_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29692 &
60
+ echo 'configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py' &
61
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir $WORK_DIR/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29693 &
62
+ echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
63
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir $WORK_DIR/gfl_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29694 &
64
+ echo 'configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py' &
65
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_fpn_ghm-1x_coco configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py $CHECKPOINT_DIR/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth --work-dir $WORK_DIR/retinanet_r50_fpn_ghm-1x_coco --cfg-option env_cfg.dist_cfg.port=29695 &
66
+ echo 'configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py' &
67
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_gn-all_2x_coco --cfg-option env_cfg.dist_cfg.port=29696 &
68
+ echo 'configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' &
69
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_gn-ws-all_1x_coco configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_gn-ws-all_1x_coco --cfg-option env_cfg.dist_cfg.port=29697 &
70
+ echo 'configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py' &
71
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION grid-rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir $WORK_DIR/grid-rcnn_r50_fpn_gn-head_2x_coco --cfg-option env_cfg.dist_cfg.port=29698 &
72
+ echo 'configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py' &
73
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faste-rcnn_r50_fpn_groie_1x_coco configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir $WORK_DIR/faste-rcnn_r50_fpn_groie_1x_coco --cfg-option env_cfg.dist_cfg.port=29699 &
74
+ echo 'configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py' &
75
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ga-retinanet_r50-caffe_fpn_1x_coco configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir $WORK_DIR/ga-retinanet_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29700 &
76
+ echo 'configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py' &
77
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_hrnetv2p-w18-1x_coco configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir $WORK_DIR/faster-rcnn_hrnetv2p-w18-1x_coco --cfg-option env_cfg.dist_cfg.port=29701 &
78
+ echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
79
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir $WORK_DIR/htc_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29702 &
80
+ echo 'configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py' &
81
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_instaboost-4x_coco configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_instaboost-4x_coco --cfg-option env_cfg.dist_cfg.port=29703 &
82
+ echo 'configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py' &
83
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION libra-faster-rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir $WORK_DIR/libra-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29704 &
84
+ echo 'configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py' &
85
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask2former_r50_8xb2-lsj-50e_coco-panoptic configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py $CHECKPOINT_DIR/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth --work-dir $WORK_DIR/mask2former_r50_8xb2-lsj-50e_coco-panoptic --cfg-option env_cfg.dist_cfg.port=29705 &
86
+ echo 'configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' &
87
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir $WORK_DIR/mask-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29706 &
88
+ echo 'configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py' &
89
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION maskformer_r50_ms-16xb1-75e_coco configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py $CHECKPOINT_DIR/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth --work-dir $WORK_DIR/maskformer_r50_ms-16xb1-75e_coco --cfg-option env_cfg.dist_cfg.port=29707 &
90
+ echo 'configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py' &
91
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ms-rcnn_r50-caffe_fpn_1x_coco configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir $WORK_DIR/ms-rcnn_r50-caffe_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29708 &
92
+ echo 'configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py' &
93
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir $WORK_DIR/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco --cfg-option env_cfg.dist_cfg.port=29709 &
94
+ echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py' &
95
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640-50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir $WORK_DIR/retinanet_r50_nasfpn_crop640-50e_coco --cfg-option env_cfg.dist_cfg.port=29710 &
96
+ echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
97
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir $WORK_DIR/paa_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29711 &
98
+ echo 'configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py' &
99
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_pafpn_1x_coco configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir $WORK_DIR/faster-rcnn_r50_pafpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29712 &
100
+ echo 'configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py' &
101
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION panoptic-fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth --work-dir $WORK_DIR/panoptic-fpn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29713 &
102
+ echo 'configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py' &
103
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_r50_fpn_pisa_1x_coco configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir $WORK_DIR/faster-rcnn_r50_fpn_pisa_1x_coco --cfg-option env_cfg.dist_cfg.port=29714 &
104
+ echo 'configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py' &
105
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION point-rend_r50-caffe_fpn_ms-1x_coco configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir $WORK_DIR/point-rend_r50-caffe_fpn_ms-1x_coco --cfg-option env_cfg.dist_cfg.port=29715 &
106
+ echo 'configs/pvt/retinanet_pvt-s_fpn_1x_coco.py' &
107
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_pvt-s_fpn_1x_coco configs/pvt/retinanet_pvt-s_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth --work-dir $WORK_DIR/retinanet_pvt-s_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29716 &
108
+ echo 'configs/queryinst/queryinst_r50_fpn_1x_coco.py' &
109
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION queryinst_r50_fpn_1x_coco configs/queryinst/queryinst_r50_fpn_1x_coco.py $CHECKPOINT_DIR/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth --work-dir $WORK_DIR/queryinst_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29717 &
110
+ echo 'configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py' &
111
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir $WORK_DIR/mask-rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29718 &
112
+ echo 'configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py' &
113
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION reppoints-moment_r50_fpn_1x_coco configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir $WORK_DIR/reppoints-moment_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29719 &
114
+ echo 'configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py' &
115
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_res2net-101_fpn_2x_coco configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir $WORK_DIR/faster-rcnn_res2net-101_fpn_2x_coco --cfg-option env_cfg.dist_cfg.port=29720 &
116
+ echo 'configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' &
117
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir $WORK_DIR/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco --cfg-option env_cfg.dist_cfg.port=29721 &
118
+ echo 'configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py' &
119
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-rsb-pre_fpn_1x_coco configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth --work-dir $WORK_DIR/mask-rcnn_r50-rsb-pre_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29722 &
120
+ echo 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' &
121
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir $WORK_DIR/retinanet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29723 &
122
+ echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
123
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir $WORK_DIR/rpn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29724 &
124
+ echo 'configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py' &
125
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sabl-retinanet_r50_fpn_1x_coco configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir $WORK_DIR/sabl-retinanet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29725 &
126
+ echo 'configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py' &
127
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sabl-faster-rcnn_r50_fpn_1x_coco configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir $WORK_DIR/sabl-faster-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29726 &
128
+ echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
129
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir $WORK_DIR/scnet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29727 &
130
+ echo 'configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py' &
131
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_r50-scratch_fpn_gn-all_6x_coco configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py $CHECKPOINT_DIR/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth --work-dir $WORK_DIR/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco --cfg-option env_cfg.dist_cfg.port=29728 &
132
+ echo 'configs/solo/decoupled-solo_r50_fpn_1x_coco.py' &
133
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION decoupled-solo_r50_fpn_1x_coco configs/solo/decoupled-solo_r50_fpn_1x_coco.py $CHECKPOINT_DIR/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth --work-dir $WORK_DIR/decoupled-solo_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29729 &
134
+ echo 'configs/solov2/solov2_r50_fpn_1x_coco.py' &
135
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION solov2_r50_fpn_1x_coco configs/solov2/solov2_r50_fpn_1x_coco.py $CHECKPOINT_DIR/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth --work-dir $WORK_DIR/solov2_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29730 &
136
+ echo 'configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py' &
137
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION sparse-rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir $WORK_DIR/sparse-rcnn_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29731 &
138
+ echo 'configs/ssd/ssd300_coco.py' &
139
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir $WORK_DIR/ssd300_coco --cfg-option env_cfg.dist_cfg.port=29732 &
140
+ echo 'configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py' &
141
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2-scratch_8xb24-600e_coco configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir $WORK_DIR/ssdlite_mobilenetv2-scratch_8xb24-600e_coco --cfg-option env_cfg.dist_cfg.port=29733 &
142
+ echo 'configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py' &
143
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION mask-rcnn_swin-t-p4-w7_fpn_1x_coco configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth --work-dir $WORK_DIR/mask-rcnn_swin-t-p4-w7_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29734 &
144
+ echo 'configs/tood/tood_r50_fpn_1x_coco.py' &
145
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION tood_r50_fpn_1x_coco configs/tood/tood_r50_fpn_1x_coco.py $CHECKPOINT_DIR/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth --work-dir $WORK_DIR/tood_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29735 &
146
+ echo 'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py' &
147
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION tridentnet_r50-caffe_1x_coco configs/tridentnet/tridentnet_r50-caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir $WORK_DIR/tridentnet_r50-caffe_1x_coco --cfg-option env_cfg.dist_cfg.port=29736 &
148
+ echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
149
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir $WORK_DIR/vfnet_r50_fpn_1x_coco --cfg-option env_cfg.dist_cfg.port=29737 &
150
+ echo 'configs/yolact/yolact_r50_1xb8-55e_coco.py' &
151
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolact_r50_1xb8-55e_coco configs/yolact/yolact_r50_1xb8-55e_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir $WORK_DIR/yolact_r50_1xb8-55e_coco --cfg-option env_cfg.dist_cfg.port=29738 &
152
+ echo 'configs/yolo/yolov3_d53_8xb8-320-273e_coco.py' &
153
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolov3_d53_8xb8-320-273e_coco configs/yolo/yolov3_d53_8xb8-320-273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir $WORK_DIR/yolov3_d53_8xb8-320-273e_coco --cfg-option env_cfg.dist_cfg.port=29739 &
154
+ echo 'configs/yolof/yolof_r50-c5_8xb8-1x_coco.py' &
155
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolof_r50-c5_8xb8-1x_coco configs/yolof/yolof_r50-c5_8xb8-1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir $WORK_DIR/yolof_r50-c5_8xb8-1x_coco --cfg-option env_cfg.dist_cfg.port=29740 &
156
+ echo 'configs/yolox/yolox_tiny_8xb8-300e_coco.py' &
157
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK tools/slurm_test.sh $PARTITION yolox_tiny_8xb8-300e_coco configs/yolox/yolox_tiny_8xb8-300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth --work-dir $WORK_DIR/yolox_tiny_8xb8-300e_coco --cfg-option env_cfg.dist_cfg.port=29741 &
mmdetection/.dev_scripts/test_init_backbone.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ """Check out backbone whether successfully load pretrained checkpoint."""
3
+ import copy
4
+ import os
5
+ from os.path import dirname, exists, join
6
+
7
+ import pytest
8
+ from mmengine.config import Config
9
+ from mmengine.runner import CheckpointLoader
10
+ from mmengine.utils import ProgressBar
11
+
12
+ from mmdet.registry import MODELS
13
+
14
+
15
+ def _get_config_directory():
16
+ """Find the predefined detector config directory."""
17
+ try:
18
+ # Assume we are running in the source mmdetection repo
19
+ repo_dpath = dirname(dirname(__file__))
20
+ except NameError:
21
+ # For IPython development when this __file__ is not defined
22
+ import mmdet
23
+ repo_dpath = dirname(dirname(mmdet.__file__))
24
+ config_dpath = join(repo_dpath, 'configs')
25
+ if not exists(config_dpath):
26
+ raise Exception('Cannot find config path')
27
+ return config_dpath
28
+
29
+
30
+ def _get_config_module(fname):
31
+ """Load a configuration as a python module."""
32
+ config_dpath = _get_config_directory()
33
+ config_fpath = join(config_dpath, fname)
34
+ config_mod = Config.fromfile(config_fpath)
35
+ return config_mod
36
+
37
+
38
+ def _get_detector_cfg(fname):
39
+ """Grab configs necessary to create a detector.
40
+
41
+ These are deep copied to allow for safe modification of parameters without
42
+ influencing other tests.
43
+ """
44
+ config = _get_config_module(fname)
45
+ model = copy.deepcopy(config.model)
46
+ return model
47
+
48
+
49
+ def _traversed_config_file():
50
+ """We traversed all potential config files under the `config` file. If you
51
+ need to print details or debug code, you can use this function.
52
+
53
+ If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
54
+ need add the folder name in `ignores_folder` (if the config files in this
55
+ folder all set backbone.init_cfg is None) or add config name in
56
+ `ignores_file` (if the config file set backbone.init_cfg is None)
57
+ """
58
+ config_path = _get_config_directory()
59
+ check_cfg_names = []
60
+
61
+ # `base`, `legacy_1.x` and `common` ignored by default.
62
+ ignores_folder = ['_base_', 'legacy_1.x', 'common']
63
+ # 'ld' need load teacher model, if want to check 'ld',
64
+ # please check teacher_config path first.
65
+ ignores_folder += ['ld']
66
+ # `selfsup_pretrain` need convert model, if want to check this model,
67
+ # need to convert the model first.
68
+ ignores_folder += ['selfsup_pretrain']
69
+
70
+ # the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
71
+ # 'scratch' is None.
72
+ # the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
73
+ # is None
74
+ # Please confirm `bockbone.init_cfg` is None first.
75
+ ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
76
+ ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
77
+
78
+ for config_file_name in os.listdir(config_path):
79
+ if config_file_name not in ignores_folder:
80
+ config_file = join(config_path, config_file_name)
81
+ if os.path.isdir(config_file):
82
+ for config_sub_file in os.listdir(config_file):
83
+ if config_sub_file.endswith('py') and \
84
+ config_sub_file not in ignores_file:
85
+ name = join(config_file, config_sub_file)
86
+ check_cfg_names.append(name)
87
+ return check_cfg_names
88
+
89
+
90
+ def _check_backbone(config, print_cfg=True):
91
+ """Check out backbone whether successfully load pretrained model, by using
92
+ `backbone.init_cfg`.
93
+
94
+ First, using `CheckpointLoader.load_checkpoint` to load the checkpoint
95
+ without loading models.
96
+ Then, using `MODELS.build` to build models, and using
97
+ `model.init_weights()` to initialize the parameters.
98
+ Finally, assert weights and bias of each layer loaded from pretrained
99
+ checkpoint are equal to the weights and bias of original checkpoint.
100
+ For the convenience of comparison, we sum up weights and bias of
101
+ each loaded layer separately.
102
+
103
+ Args:
104
+ config (str): Config file path.
105
+ print_cfg (bool): Whether print logger and return the result.
106
+
107
+ Returns:
108
+ results (str or None): If backbone successfully load pretrained
109
+ checkpoint, return None; else, return config file path.
110
+ """
111
+ if print_cfg:
112
+ print('-' * 15 + 'loading ', config)
113
+ cfg = Config.fromfile(config)
114
+ init_cfg = None
115
+ try:
116
+ init_cfg = cfg.model.backbone.init_cfg
117
+ init_flag = True
118
+ except AttributeError:
119
+ init_flag = False
120
+ if init_cfg is None or init_cfg.get('type') != 'Pretrained':
121
+ init_flag = False
122
+ if init_flag:
123
+ checkpoint = CheckpointLoader.load_checkpoint(init_cfg.checkpoint)
124
+ if 'state_dict' in checkpoint:
125
+ state_dict = checkpoint['state_dict']
126
+ else:
127
+ state_dict = checkpoint
128
+
129
+ model = MODELS.build(cfg.model)
130
+ model.init_weights()
131
+
132
+ checkpoint_layers = state_dict.keys()
133
+ for name, value in model.backbone.state_dict().items():
134
+ if name in checkpoint_layers:
135
+ assert value.equal(state_dict[name])
136
+
137
+ if print_cfg:
138
+ print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
139
+ '\n', )
140
+ return None
141
+ else:
142
+ if print_cfg:
143
+ print(config + '\n' + '-' * 10 +
144
+ 'config file do not have init_cfg' + '-' * 10 + '\n')
145
+ return config
146
+
147
+
148
+ @pytest.mark.parametrize('config', _traversed_config_file())
149
+ def test_load_pretrained(config):
150
+ """Check out backbone whether successfully load pretrained model by using
151
+ `backbone.init_cfg`.
152
+
153
+ Details please refer to `_check_backbone`
154
+ """
155
+ _check_backbone(config, print_cfg=False)
156
+
157
+
158
+ def _test_load_pretrained():
159
+ """We traversed all potential config files under the `config` file. If you
160
+ need to print details or debug code, you can use this function.
161
+
162
+ Returns:
163
+ check_cfg_names (list[str]): Config files that backbone initialized
164
+ from pretrained checkpoint might be problematic. Need to recheck
165
+ the config file. The output including the config files that the
166
+ backbone.init_cfg is None
167
+ """
168
+ check_cfg_names = _traversed_config_file()
169
+ need_check_cfg = []
170
+
171
+ prog_bar = ProgressBar(len(check_cfg_names))
172
+ for config in check_cfg_names:
173
+ init_cfg_name = _check_backbone(config)
174
+ if init_cfg_name is not None:
175
+ need_check_cfg.append(init_cfg_name)
176
+ prog_bar.update()
177
+ print('These config files need to be checked again')
178
+ print(need_check_cfg)
mmdetection/.dev_scripts/train_benchmark.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PARTITION=$1
2
+ WORK_DIR=$2
3
+ CPUS_PER_TASK=${3:-4}
4
+
5
+ echo 'configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py' &
6
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_albu_1x_coco configs/albu_example/mask-rcnn_r50_fpn_albu_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_albu_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
7
+ echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
8
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $WORK_DIR/atss_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
9
+ echo 'configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py' &
10
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION autoassign_r50-caffe_fpn_1x_coco configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py $WORK_DIR/autoassign_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
11
+ echo 'configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py' &
12
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn-carafe_1x_coco configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn-carafe_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
13
+ echo 'configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py' &
14
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py $WORK_DIR/cascade-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
15
+ echo 'configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' &
16
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-mask-rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py $WORK_DIR/cascade-mask-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
17
+ echo 'configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py' &
18
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py $WORK_DIR/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
19
+ echo 'configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py' &
20
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centernet_r18-dcnv2_8xb16-crop512-140e_coco configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py $WORK_DIR/centernet_r18-dcnv2_8xb16-crop512-140e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
21
+ echo 'configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py' &
22
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centernet-update_r50-caffe_fpn_ms-1x_coco configs/centernet/centernet-update_r50-caffe_fpn_ms-1x_coco.py $WORK_DIR/centernet-update_r50-caffe_fpn_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
23
+ echo 'configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py' &
24
+ GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py $WORK_DIR/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
25
+ echo 'configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py' &
26
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION cornernet_hourglass104_8xb6-210e-mstest_coco configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py $WORK_DIR/cornernet_hourglass104_8xb6-210e-mstest_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
27
+ echo 'configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py' &
28
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py $WORK_DIR/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
29
+ echo 'configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py' &
30
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
31
+ echo 'configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py' &
32
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_mdpool_1x_coco configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_mdpool_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
33
+ echo 'configs/ddod/ddod_r50_fpn_1x_coco.py' &
34
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ddod_r50_fpn_1x_coco configs/ddod/ddod_r50_fpn_1x_coco.py $WORK_DIR/ddod_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
35
+ echo 'configs/detectors/detectors_htc-r50_1x_coco.py' &
36
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION detectors_htc-r50_1x_coco configs/detectors/detectors_htc-r50_1x_coco.py $WORK_DIR/detectors_htc-r50_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
37
+ echo 'configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py' &
38
+ GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION deformable-detr_r50_16xb2-50e_coco configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py $WORK_DIR/deformable-detr_r50_16xb2-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
39
+ echo 'configs/detr/detr_r50_8xb2-150e_coco.py' &
40
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION detr_r50_8xb2-150e_coco configs/detr/detr_r50_8xb2-150e_coco.py $WORK_DIR/detr_r50_8xb2-150e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
41
+ echo 'configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py' &
42
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION dh-faster-rcnn_r50_fpn_1x_coco configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/dh-faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
43
+ echo 'configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py' &
44
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION dynamic-rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py $WORK_DIR/dynamic-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
45
+ echo 'configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py' &
46
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION atss_r50_fpn_dyhead_1x_coco configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py $WORK_DIR/atss_r50_fpn_dyhead_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
47
+ echo 'configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py' &
48
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_effb3_fpn_8xb4-crop896-1x_coco configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py $WORK_DIR/retinanet_effb3_fpn_8xb4-crop896-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
49
+ echo 'configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py' &
50
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-attn1111_fpn_1x_coco configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50-attn1111_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
51
+ echo 'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py' &
52
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
53
+ echo 'configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py' &
54
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-caffe-dc5_ms-1x_coco configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py $WORK_DIR/faster-rcnn_r50-caffe-dc5_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
55
+ echo 'configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py' &
56
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py $WORK_DIR/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
57
+ echo 'configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py' &
58
+ GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fovea_r50_fpn_gn-head-align_4xb4-2x_coco configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py $WORK_DIR/fovea_r50_fpn_gn-head-align_4xb4-2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
59
+ echo 'configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py' &
60
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpg_crop640-50e_coco configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py $WORK_DIR/mask-rcnn_r50_fpg_crop640-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
61
+ echo 'configs/free_anchor/freeanchor_r50_fpn_1x_coco.py' &
62
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION freeanchor_r50_fpn_1x_coco configs/free_anchor/freeanchor_r50_fpn_1x_coco.py $WORK_DIR/freeanchor_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
63
+ echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
64
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $WORK_DIR/fsaf_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
65
+ echo 'configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py' &
66
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py $WORK_DIR/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
67
+ echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
68
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $WORK_DIR/gfl_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
69
+ echo 'configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py' &
70
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50_fpn_ghm-1x_coco configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py $WORK_DIR/retinanet_r50_fpn_ghm-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
71
+ echo 'configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py' &
72
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_gn-all_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
73
+ echo 'configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py' &
74
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_fpn_gn-ws-all_1x_coco configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py $WORK_DIR/faster-rcnn_r50_fpn_gn-ws-all_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
75
+ echo 'configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py' &
76
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION grid-rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py $WORK_DIR/grid-rcnn_r50_fpn_gn-head_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
77
+ echo 'configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py' &
78
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faste-rcnn_r50_fpn_groie_1x_coco configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py $WORK_DIR/faste-rcnn_r50_fpn_groie_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
79
+ echo 'configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py' &
80
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ga-retinanet_r50-caffe_fpn_1x_coco configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py $WORK_DIR/ga-retinanet_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
81
+ echo 'configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py' &
82
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_hrnetv2p-w18-1x_coco configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py $WORK_DIR/faster-rcnn_hrnetv2p-w18-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
83
+ echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
84
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $WORK_DIR/htc_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
85
+ echo 'configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py' &
86
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_instaboost-4x_coco configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_instaboost-4x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
87
+ echo 'configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py' &
88
+ GPUS=2 GPUS_PER_NODE=2 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION lad_r50-paa-r101_fpn_2xb8_coco_1x configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py $WORK_DIR/lad_r50-paa-r101_fpn_2xb8_coco_1x --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
89
+ echo 'configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py' &
90
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ld_r18-gflv1-r101_fpn_1x_coco configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py $WORK_DIR/ld_r18-gflv1-r101_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
91
+ echo 'configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py' &
92
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION libra-faster-rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py $WORK_DIR/libra-faster-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
93
+ echo 'configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py' &
94
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask2former_r50_8xb2-lsj-50e_coco-panoptic configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py $WORK_DIR/mask2former_r50_8xb2-lsj-50e_coco-panoptic --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
95
+ echo 'configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' &
96
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
97
+ echo 'configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py' &
98
+ GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION maskformer_r50_ms-16xb1-75e_coco configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py $WORK_DIR/maskformer_r50_ms-16xb1-75e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
99
+ echo 'configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py' &
100
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ms-rcnn_r50-caffe_fpn_1x_coco configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py $WORK_DIR/ms-rcnn_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
101
+ echo 'configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py' &
102
+ GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py $WORK_DIR/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
103
+ echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py' &
104
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50_nasfpn_crop640-50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py $WORK_DIR/retinanet_r50_nasfpn_crop640-50e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
105
+ echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
106
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $WORK_DIR/paa_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
107
+ echo 'configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py' &
108
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50_pafpn_1x_coco configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py $WORK_DIR/faster-rcnn_r50_pafpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
109
+ echo 'configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py' &
110
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION panoptic-fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py $WORK_DIR/panoptic-fpn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
111
+ echo 'configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py' &
112
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_r50_fpn_pisa_1x_coco configs/pisa/mask-rcnn_r50_fpn_pisa_1x_coco.py $WORK_DIR/mask-rcnn_r50_fpn_pisa_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
113
+ echo 'configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py' &
114
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION point-rend_r50-caffe_fpn_ms-1x_coco configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py $WORK_DIR/point-rend_r50-caffe_fpn_ms-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
115
+ echo 'configs/pvt/retinanet_pvt-t_fpn_1x_coco.py' &
116
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_pvt-t_fpn_1x_coco configs/pvt/retinanet_pvt-t_fpn_1x_coco.py $WORK_DIR/retinanet_pvt-t_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
117
+ echo 'configs/queryinst/queryinst_r50_fpn_1x_coco.py' &
118
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION queryinst_r50_fpn_1x_coco configs/queryinst/queryinst_r50_fpn_1x_coco.py $WORK_DIR/queryinst_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
119
+ echo 'configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py' &
120
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_regnetx-800MF_fpn_1x_coco configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py $WORK_DIR/retinanet_regnetx-800MF_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
121
+ echo 'configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py' &
122
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION reppoints-moment_r50_fpn_1x_coco configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py $WORK_DIR/reppoints-moment_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
123
+ echo 'configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py' &
124
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_res2net-101_fpn_2x_coco configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py $WORK_DIR/faster-rcnn_res2net-101_fpn_2x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
125
+ echo 'configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py' &
126
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py $WORK_DIR/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
127
+ echo 'configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py' &
128
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50-rsb-pre_fpn_1x_coco configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py $WORK_DIR/retinanet_r50-rsb-pre_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
129
+ echo 'configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py' &
130
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION retinanet_r50-caffe_fpn_1x_coco configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py $WORK_DIR/retinanet_r50-caffe_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
131
+ echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
132
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $WORK_DIR/rpn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
133
+ echo 'configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py' &
134
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION sabl-retinanet_r50_fpn_1x_coco configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py $WORK_DIR/sabl-retinanet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
135
+ echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
136
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $WORK_DIR/scnet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
137
+ echo 'configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py' &
138
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION faster-rcnn_r50-scratch_fpn_gn-all_6x_coco configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py $WORK_DIR/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
139
+ echo 'configs/solo/solo_r50_fpn_1x_coco.py' &
140
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION solo_r50_fpn_1x_coco configs/solo/solo_r50_fpn_1x_coco.py $WORK_DIR/solo_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
141
+ echo 'configs/solov2/solov2_r50_fpn_1x_coco.py' &
142
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION solov2_r50_fpn_1x_coco configs/solov2/solov2_r50_fpn_1x_coco.py $WORK_DIR/solov2_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
143
+ echo 'configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py' &
144
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION sparse-rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py $WORK_DIR/sparse-rcnn_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
145
+ echo 'configs/ssd/ssd300_coco.py' &
146
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $WORK_DIR/ssd300_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
147
+ echo 'configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py' &
148
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION ssdlite_mobilenetv2-scratch_8xb24-600e_coco configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py $WORK_DIR/ssdlite_mobilenetv2-scratch_8xb24-600e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
149
+ echo 'configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py' &
150
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION mask-rcnn_swin-t-p4-w7_fpn_1x_coco configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py $WORK_DIR/mask-rcnn_swin-t-p4-w7_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
151
+ echo 'configs/tood/tood_r50_fpn_1x_coco.py' &
152
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION tood_r50_fpn_1x_coco configs/tood/tood_r50_fpn_1x_coco.py $WORK_DIR/tood_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
153
+ echo ''configs/tridentnet/tridentnet_r50-caffe_1x_coco.py' &
154
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION tridentnet_r50-caffe_1x_coco 'configs/tridentnet/tridentnet_r50-caffe_1x_coco.py $WORK_DIR/tridentnet_r50-caffe_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
155
+ echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
156
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $WORK_DIR/vfnet_r50_fpn_1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
157
+ echo 'configs/yolact/yolact_r50_8xb8-55e_coco.py' &
158
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolact_r50_8xb8-55e_coco configs/yolact/yolact_r50_8xb8-55e_coco.py $WORK_DIR/yolact_r50_8xb8-55e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
159
+ echo 'configs/yolo/yolov3_d53_8xb8-320-273e_coco.py' &
160
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolov3_d53_8xb8-320-273e_coco configs/yolo/yolov3_d53_8xb8-320-273e_coco.py $WORK_DIR/yolov3_d53_8xb8-320-273e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
161
+ echo 'configs/yolof/yolof_r50-c5_8xb8-1x_coco.py' &
162
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolof_r50-c5_8xb8-1x_coco configs/yolof/yolof_r50-c5_8xb8-1x_coco.py $WORK_DIR/yolof_r50-c5_8xb8-1x_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
163
+ echo 'configs/yolox/yolox_tiny_8xb8-300e_coco.py' &
164
+ GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=$CPUS_PRE_TASK ./tools/slurm_train.sh $PARTITION yolox_tiny_8xb8-300e_coco configs/yolox/yolox_tiny_8xb8-300e_coco.py $WORK_DIR/yolox_tiny_8xb8-300e_coco --cfg-options default_hooks.checkpoint.max_keep_ckpts=1 &
mmdetection/.gitignore ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+ db.sqlite3
58
+
59
+ # Flask stuff:
60
+ instance/
61
+ .webassets-cache
62
+
63
+ # Scrapy stuff:
64
+ .scrapy
65
+
66
+ # Sphinx documentation
67
+ docs/en/_build/
68
+ docs/zh_cn/_build/
69
+
70
+ # PyBuilder
71
+ target/
72
+
73
+ # Jupyter Notebook
74
+ .ipynb_checkpoints
75
+
76
+ # pyenv
77
+ .python-version
78
+
79
+ # celery beat schedule file
80
+ celerybeat-schedule
81
+
82
+ # SageMath parsed files
83
+ *.sage.py
84
+
85
+ # Environments
86
+ .env
87
+ .venv
88
+ env/
89
+ venv/
90
+ ENV/
91
+ env.bak/
92
+ venv.bak/
93
+
94
+ # Spyder project settings
95
+ .spyderproject
96
+ .spyproject
97
+
98
+ # Rope project settings
99
+ .ropeproject
100
+
101
+ # mkdocs documentation
102
+ /site
103
+
104
+ # mypy
105
+ .mypy_cache/
106
+ data/
107
+ data
108
+ .vscode
109
+ .idea
110
+ .DS_Store
111
+
112
+ # custom
113
+ *.pkl
114
+ *.pkl.json
115
+ *.log.json
116
+ docs/modelzoo_statistics.md
117
+ mmdet/.mim
118
+ work_dirs/
119
+
120
+ # Pytorch
121
+ *.pth
122
+ *.py~
123
+ *.sh~
mmdetection/.owners.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ assign:
2
+ strategy:
3
+ # random
4
+ daily-shift-based
5
+ scedule:
6
+ '*/1 * * * *'
7
+ assignees:
8
+ - Czm369
9
+ - hhaAndroid
10
+ - jbwang1997
11
+ - RangiLyu
12
+ - BIGWangYuDong
13
+ - chhluo
14
+ - ZwwWayne
mmdetection/.pre-commit-config-zh-cn.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: ^tests/data/
2
+ repos:
3
+ - repo: https://gitee.com/openmmlab/mirrors-flake8
4
+ rev: 5.0.4
5
+ hooks:
6
+ - id: flake8
7
+ - repo: https://gitee.com/openmmlab/mirrors-isort
8
+ rev: 5.11.5
9
+ hooks:
10
+ - id: isort
11
+ - repo: https://gitee.com/openmmlab/mirrors-yapf
12
+ rev: v0.32.0
13
+ hooks:
14
+ - id: yapf
15
+ - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
16
+ rev: v4.3.0
17
+ hooks:
18
+ - id: trailing-whitespace
19
+ - id: check-yaml
20
+ - id: end-of-file-fixer
21
+ - id: requirements-txt-fixer
22
+ - id: double-quote-string-fixer
23
+ - id: check-merge-conflict
24
+ - id: fix-encoding-pragma
25
+ args: ["--remove"]
26
+ - id: mixed-line-ending
27
+ args: ["--fix=lf"]
28
+ - repo: https://gitee.com/openmmlab/mirrors-mdformat
29
+ rev: 0.7.9
30
+ hooks:
31
+ - id: mdformat
32
+ args: ["--number"]
33
+ additional_dependencies:
34
+ - mdformat-openmmlab
35
+ - mdformat_frontmatter
36
+ - linkify-it-py
37
+ - repo: https://gitee.com/openmmlab/mirrors-codespell
38
+ rev: v2.2.1
39
+ hooks:
40
+ - id: codespell
41
+ - repo: https://gitee.com/openmmlab/mirrors-docformatter
42
+ rev: v1.3.1
43
+ hooks:
44
+ - id: docformatter
45
+ args: ["--in-place", "--wrap-descriptions", "79"]
46
+ - repo: https://gitee.com/openmmlab/mirrors-pyupgrade
47
+ rev: v3.0.0
48
+ hooks:
49
+ - id: pyupgrade
50
+ args: ["--py36-plus"]
51
+ - repo: https://gitee.com/open-mmlab/pre-commit-hooks
52
+ rev: v0.2.0
53
+ hooks:
54
+ - id: check-algo-readme
55
+ - id: check-copyright
56
+ args: ["mmdet"]
57
+ # - repo: https://gitee.com/openmmlab/mirrors-mypy
58
+ # rev: v0.812
59
+ # hooks:
60
+ # - id: mypy
61
+ # exclude: "docs"
mmdetection/.pre-commit-config.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/PyCQA/flake8
3
+ rev: 5.0.4
4
+ hooks:
5
+ - id: flake8
6
+ - repo: https://github.com/PyCQA/isort
7
+ rev: 5.11.5
8
+ hooks:
9
+ - id: isort
10
+ - repo: https://github.com/pre-commit/mirrors-yapf
11
+ rev: v0.32.0
12
+ hooks:
13
+ - id: yapf
14
+ - repo: https://github.com/pre-commit/pre-commit-hooks
15
+ rev: v4.3.0
16
+ hooks:
17
+ - id: trailing-whitespace
18
+ - id: check-yaml
19
+ - id: end-of-file-fixer
20
+ - id: requirements-txt-fixer
21
+ - id: double-quote-string-fixer
22
+ - id: check-merge-conflict
23
+ - id: fix-encoding-pragma
24
+ args: ["--remove"]
25
+ - id: mixed-line-ending
26
+ args: ["--fix=lf"]
27
+ - repo: https://github.com/codespell-project/codespell
28
+ rev: v2.2.1
29
+ hooks:
30
+ - id: codespell
31
+ - repo: https://github.com/executablebooks/mdformat
32
+ rev: 0.7.9
33
+ hooks:
34
+ - id: mdformat
35
+ args: ["--number"]
36
+ additional_dependencies:
37
+ - mdformat-openmmlab
38
+ - mdformat_frontmatter
39
+ - linkify-it-py
40
+ - repo: https://github.com/myint/docformatter
41
+ rev: v1.3.1
42
+ hooks:
43
+ - id: docformatter
44
+ args: ["--in-place", "--wrap-descriptions", "79"]
45
+ - repo: https://github.com/open-mmlab/pre-commit-hooks
46
+ rev: v0.2.0 # Use the ref you want to point at
47
+ hooks:
48
+ - id: check-algo-readme
49
+ - id: check-copyright
50
+ args: ["mmdet"] # replace the dir_to_check with your expected directory to check
mmdetection/.readthedocs.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+
3
+ build:
4
+ os: ubuntu-22.04
5
+ tools:
6
+ python: "3.8"
7
+
8
+ formats:
9
+ - epub
10
+
11
+ python:
12
+ install:
13
+ - requirements: requirements/docs.txt
14
+ - requirements: requirements/readthedocs.txt
mmdetection/CITATION.cff ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you use this software, please cite it as below."
3
+ authors:
4
+ - name: "MMDetection Contributors"
5
+ title: "OpenMMLab Detection Toolbox and Benchmark"
6
+ date-released: 2018-08-22
7
+ url: "https://github.com/open-mmlab/mmdetection"
8
+ license: Apache-2.0
mmdetection/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2018-2023 OpenMMLab. All rights reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright 2018-2023 OpenMMLab.
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
mmdetection/MANIFEST.in ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include requirements/*.txt
2
+ include mmdet/VERSION
3
+ include mmdet/.mim/model-index.yml
4
+ include mmdet/.mim/dataset-index.yml
5
+ include mmdet/.mim/demo/*/*
6
+ recursive-include mmdet/.mim/configs *.py *.yml
7
+ recursive-include mmdet/.mim/tools *.sh *.py
mmdetection/README.md ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img src="resources/mmdet-logo.png" width="600"/>
3
+ <div>&nbsp;</div>
4
+ <div align="center">
5
+ <b><font size="5">OpenMMLab website</font></b>
6
+ <sup>
7
+ <a href="https://openmmlab.com">
8
+ <i><font size="4">HOT</font></i>
9
+ </a>
10
+ </sup>
11
+ &nbsp;&nbsp;&nbsp;&nbsp;
12
+ <b><font size="5">OpenMMLab platform</font></b>
13
+ <sup>
14
+ <a href="https://platform.openmmlab.com">
15
+ <i><font size="4">TRY IT OUT</font></i>
16
+ </a>
17
+ </sup>
18
+ </div>
19
+ <div>&nbsp;</div>
20
+
21
+ [![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet)
22
+ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/)
23
+ [![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions)
24
+ [![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection)
25
+ [![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/main/LICENSE)
26
+ [![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
27
+ [![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
28
+
29
+ [📘Documentation](https://mmdetection.readthedocs.io/en/latest/) |
30
+ [🛠️Installation](https://mmdetection.readthedocs.io/en/latest/get_started.html) |
31
+ [👀Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html) |
32
+ [🆕Update News](https://mmdetection.readthedocs.io/en/latest/notes/changelog.html) |
33
+ [🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) |
34
+ [🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose)
35
+
36
+ </div>
37
+
38
+ <div align="center">
39
+
40
+ English | [简体中文](README_zh-CN.md)
41
+
42
+ </div>
43
+
44
+ <div align="center">
45
+ <a href="https://openmmlab.medium.com/" style="text-decoration:none;">
46
+ <img src="https://user-images.githubusercontent.com/25839884/219255827-67c1a27f-f8c5-46a9-811d-5e57448c61d1.png" width="3%" alt="" /></a>
47
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
48
+ <a href="https://discord.com/channels/1037617289144569886/1046608014234370059" style="text-decoration:none;">
49
+ <img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
50
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
51
+ <a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
52
+ <img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
53
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
54
+ <a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
55
+ <img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
56
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
57
+ <a href="https://space.bilibili.com/1293512903" style="text-decoration:none;">
58
+ <img src="https://user-images.githubusercontent.com/25839884/219026751-d7d14cce-a7c9-4e82-9942-8375fca65b99.png" width="3%" alt="" /></a>
59
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
60
+ <a href="https://www.zhihu.com/people/openmmlab" style="text-decoration:none;">
61
+ <img src="https://user-images.githubusercontent.com/25839884/219026120-ba71e48b-6e94-4bd4-b4e9-b7d175b5e362.png" width="3%" alt="" /></a>
62
+ </div>
63
+
64
+ <div align="center">
65
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/6c29886f-ae7a-4a55-8be4-352ee85b7d3e"/>
66
+ </div>
67
+
68
+ ## Introduction
69
+
70
+ MMDetection is an open source object detection toolbox based on PyTorch. It is
71
+ a part of the [OpenMMLab](https://openmmlab.com/) project.
72
+
73
+ The main branch works with **PyTorch 1.8+**.
74
+
75
+ <img src="https://user-images.githubusercontent.com/12907710/187674113-2074d658-f2fb-42d1-ac15-9c4a695e64d7.png"/>
76
+
77
+ <details open>
78
+ <summary>Major features</summary>
79
+
80
+ - **Modular Design**
81
+
82
+ We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.
83
+
84
+ - **Support of multiple tasks out of box**
85
+
86
+ The toolbox directly supports multiple detection tasks such as **object detection**, **instance segmentation**, **panoptic segmentation**, and **semi-supervised object detection**.
87
+
88
+ - **High efficiency**
89
+
90
+ All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet).
91
+
92
+ - **State of the art**
93
+
94
+ The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward.
95
+ The newly released [RTMDet](configs/rtmdet) also obtains new state-of-the-art results on real-time instance segmentation and rotated object detection tasks and the best parameter-accuracy trade-off on object detection.
96
+
97
+ </details>
98
+
99
+ Apart from MMDetection, we also released [MMEngine](https://github.com/open-mmlab/mmengine) for model training and [MMCV](https://github.com/open-mmlab/mmcv) for computer vision research, which are heavily depended on by this toolbox.
100
+
101
+ ## What's New
102
+
103
+ ### Highlight
104
+
105
+ We are excited to announce our latest work on real-time object recognition tasks, **RTMDet**, a family of fully convolutional single-stage detectors. RTMDet not only achieves the best parameter-accuracy trade-off on object detection from tiny to extra-large model sizes but also obtains new state-of-the-art performance on instance segmentation and rotated object detection tasks. Details can be found in the [technical report](https://arxiv.org/abs/2212.07784). Pre-trained models are [here](configs/rtmdet).
106
+
107
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
108
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
109
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
110
+
111
+ | Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
112
+ | ------------------------ | ------- | ------------------------------------ | ---------------------- |
113
+ | Object Detection | COCO | 52.8 | 322 |
114
+ | Instance Segmentation | COCO | 44.6 | 188 |
115
+ | Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
116
+
117
+ <div align=center>
118
+ <img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
119
+ </div>
120
+
121
+ **v3.1.0** was released in 30/6/2023:
122
+
123
+ - Supports tracking algorithms including multi-object tracking (MOT) algorithms SORT, DeepSORT, StrongSORT, OCSORT, ByteTrack, QDTrack, and video instance segmentation (VIS) algorithm MaskTrackRCNN, Mask2Former-VIS.
124
+ - Support [ViTDet](projects/ViTDet)
125
+ - Supports inference and evaluation of multimodal algorithms [GLIP](configs/glip) and [XDecoder](projects/XDecoder), and also supports datasets such as COCO semantic segmentation, COCO Caption, ADE20k general segmentation, and RefCOCO. GLIP fine-tuning will be supported in the future.
126
+ - Provides a [gradio demo](https://github.com/open-mmlab/mmdetection/blob/dev-3.x/projects/gradio_demo/README.md) for image type tasks of MMDetection, making it easy for users to experience.
127
+
128
+ ## Installation
129
+
130
+ Please refer to [Installation](https://mmdetection.readthedocs.io/en/latest/get_started.html) for installation instructions.
131
+
132
+ ## Getting Started
133
+
134
+ Please see [Overview](https://mmdetection.readthedocs.io/en/latest/get_started.html) for the general introduction of MMDetection.
135
+
136
+ For detailed user guides and advanced guides, please refer to our [documentation](https://mmdetection.readthedocs.io/en/latest/):
137
+
138
+ - User Guides
139
+
140
+ <details>
141
+
142
+ - [Train & Test](https://mmdetection.readthedocs.io/en/latest/user_guides/index.html#train-test)
143
+ - [Learn about Configs](https://mmdetection.readthedocs.io/en/latest/user_guides/config.html)
144
+ - [Inference with existing models](https://mmdetection.readthedocs.io/en/latest/user_guides/inference.html)
145
+ - [Dataset Prepare](https://mmdetection.readthedocs.io/en/latest/user_guides/dataset_prepare.html)
146
+ - [Test existing models on standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/test.html)
147
+ - [Train predefined models on standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/train.html)
148
+ - [Train with customized datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/train.html#train-with-customized-datasets)
149
+ - [Train with customized models and standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/new_model.html)
150
+ - [Finetuning Models](https://mmdetection.readthedocs.io/en/latest/user_guides/finetune.html)
151
+ - [Test Results Submission](https://mmdetection.readthedocs.io/en/latest/user_guides/test_results_submission.html)
152
+ - [Weight initialization](https://mmdetection.readthedocs.io/en/latest/user_guides/init_cfg.html)
153
+ - [Use a single stage detector as RPN](https://mmdetection.readthedocs.io/en/latest/user_guides/single_stage_as_rpn.html)
154
+ - [Semi-supervised Object Detection](https://mmdetection.readthedocs.io/en/latest/user_guides/semi_det.html)
155
+ - [Useful Tools](https://mmdetection.readthedocs.io/en/latest/user_guides/index.html#useful-tools)
156
+
157
+ </details>
158
+
159
+ - Advanced Guides
160
+
161
+ <details>
162
+
163
+ - [Basic Concepts](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#basic-concepts)
164
+ - [Component Customization](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#component-customization)
165
+ - [How to](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#how-to)
166
+
167
+ </details>
168
+
169
+ We also provide object detection colab tutorial [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb) and instance segmentation colab tutorial [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_InstanceSeg_Tutorial.ipynb).
170
+
171
+ To migrate from MMDetection 2.x, please refer to [migration](https://mmdetection.readthedocs.io/en/latest/migration.html).
172
+
173
+ ## Overview of Benchmark and Model Zoo
174
+
175
+ Results and models are available in the [model zoo](docs/en/model_zoo.md).
176
+
177
+ <div align="center">
178
+ <b>Architectures</b>
179
+ </div>
180
+ <table align="center">
181
+ <tbody>
182
+ <tr align="center" valign="bottom">
183
+ <td>
184
+ <b>Object Detection</b>
185
+ </td>
186
+ <td>
187
+ <b>Instance Segmentation</b>
188
+ </td>
189
+ <td>
190
+ <b>Panoptic Segmentation</b>
191
+ </td>
192
+ <td>
193
+ <b>Other</b>
194
+ </td>
195
+ </tr>
196
+ <tr valign="top">
197
+ <td>
198
+ <ul>
199
+ <li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
200
+ <li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
201
+ <li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
202
+ <li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
203
+ <li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
204
+ <li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
205
+ <li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
206
+ <li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
207
+ <li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
208
+ <li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
209
+ <li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
210
+ <li><a href="configs/centernet">CenterNet (CVPR'2019)</a></li>
211
+ <li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
212
+ <li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
213
+ <li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
214
+ <li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
215
+ <li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
216
+ <li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
217
+ <li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
218
+ <li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
219
+ <li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
220
+ <li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
221
+ <li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
222
+ <li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
223
+ <li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
224
+ <li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
225
+ <li><a href="configs/detr">DETR (ECCV'2020)</a></li>
226
+ <li><a href="configs/paa">PAA (ECCV'2020)</a></li>
227
+ <li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
228
+ <li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
229
+ <li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
230
+ <li><a href="configs/yolox">YOLOX (CVPR'2021)</a></li>
231
+ <li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
232
+ <li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
233
+ <li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
234
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
235
+ <li><a href="configs/conditional_detr">Conditional DETR (ICCV'2021)</a></li>
236
+ <li><a href="configs/dab_detr">DAB-DETR (ICLR'2022)</a></li>
237
+ <li><a href="configs/dino">DINO (ICLR'2023)</a></li>
238
+ <li><a href="configs/glip">GLIP (CVPR'2022)</a></li>
239
+ <li><a href="projects/DiffusionDet">DiffusionDet (ArXiv'2023)</a></li>
240
+ <li><a href="projects/EfficientDet">EfficientDet (CVPR'2020)</a></li>
241
+ <li><a href="projects/Detic">Detic (ECCV'2022)</a></li>
242
+ </ul>
243
+ </td>
244
+ <td>
245
+ <ul>
246
+ <li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
247
+ <li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
248
+ <li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
249
+ <li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
250
+ <li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
251
+ <li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
252
+ <li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
253
+ <li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
254
+ <li><a href="configs/detectors">DetectoRS (ArXiv'2020)</a></li>
255
+ <li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
256
+ <li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
257
+ <li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
258
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
259
+ <li><a href="configs/condinst">CondInst (ECCV'2020)</a></li>
260
+ <li><a href="projects/SparseInst">SparseInst (CVPR'2022)</a></li>
261
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
262
+ <li><a href="configs/boxinst">BoxInst (CVPR'2021)</a></li>
263
+ </ul>
264
+ </td>
265
+ <td>
266
+ <ul>
267
+ <li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
268
+ <li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
269
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
270
+ </ul>
271
+ </td>
272
+ <td>
273
+ </ul>
274
+ <li><b>Contrastive Learning</b></li>
275
+ <ul>
276
+ <ul>
277
+ <li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
278
+ <li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
279
+ <li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
280
+ </ul>
281
+ </ul>
282
+ </ul>
283
+ <li><b>Distillation</b></li>
284
+ <ul>
285
+ <ul>
286
+ <li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
287
+ <li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
288
+ </ul>
289
+ </ul>
290
+ <li><b>Semi-Supervised Object Detection</b></li>
291
+ <ul>
292
+ <ul>
293
+ <li><a href="configs/soft_teacher">Soft Teacher (ICCV'2021)</a></li>
294
+ </ul>
295
+ </ul>
296
+ </ul>
297
+ </td>
298
+ </tr>
299
+ </td>
300
+ </tr>
301
+ </tbody>
302
+ </table>
303
+
304
+ <div align="center">
305
+ <b>Components</b>
306
+ </div>
307
+ <table align="center">
308
+ <tbody>
309
+ <tr align="center" valign="bottom">
310
+ <td>
311
+ <b>Backbones</b>
312
+ </td>
313
+ <td>
314
+ <b>Necks</b>
315
+ </td>
316
+ <td>
317
+ <b>Loss</b>
318
+ </td>
319
+ <td>
320
+ <b>Common</b>
321
+ </td>
322
+ </tr>
323
+ <tr valign="top">
324
+ <td>
325
+ <ul>
326
+ <li>VGG (ICLR'2015)</li>
327
+ <li>ResNet (CVPR'2016)</li>
328
+ <li>ResNeXt (CVPR'2017)</li>
329
+ <li>MobileNetV2 (CVPR'2018)</li>
330
+ <li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
331
+ <li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
332
+ <li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
333
+ <li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
334
+ <li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
335
+ <li><a href="configs/resnest">ResNeSt (ArXiv'2020)</a></li>
336
+ <li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
337
+ <li><a href="configs/swin">Swin (CVPR'2021)</a></li>
338
+ <li><a href="configs/pvt">PVTv2 (ArXiv'2021)</a></li>
339
+ <li><a href="configs/resnet_strikes_back">ResNet strikes back (ArXiv'2021)</a></li>
340
+ <li><a href="configs/efficientnet">EfficientNet (ArXiv'2021)</a></li>
341
+ <li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
342
+ <li><a href="projects/ConvNeXt-V2">ConvNeXtv2 (ArXiv'2023)</a></li>
343
+ </ul>
344
+ </td>
345
+ <td>
346
+ <ul>
347
+ <li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
348
+ <li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
349
+ <li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
350
+ <li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
351
+ <li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
352
+ <li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
353
+ </ul>
354
+ </td>
355
+ <td>
356
+ <ul>
357
+ <li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
358
+ <li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
359
+ <li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
360
+ </ul>
361
+ </td>
362
+ <td>
363
+ <ul>
364
+ <li><a href="configs/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
365
+ <li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
366
+ <li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
367
+ <li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
368
+ <li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
369
+ <li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
370
+ <li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
371
+ <li><a href="configs/resnet_strikes_back">Resnet strikes back (ArXiv'2021)</a></li>
372
+ </ul>
373
+ </td>
374
+ </tr>
375
+ </td>
376
+ </tr>
377
+ </tbody>
378
+ </table>
379
+
380
+ Some other methods are also supported in [projects using MMDetection](./docs/en/notes/projects.md).
381
+
382
+ ## FAQ
383
+
384
+ Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions.
385
+
386
+ ## Contributing
387
+
388
+ We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
389
+
390
+ ## Acknowledgement
391
+
392
+ MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
393
+ We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.
394
+
395
+ ## Citation
396
+
397
+ If you use this toolbox or benchmark in your research, please cite this project.
398
+
399
+ ```
400
+ @article{mmdetection,
401
+ title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
402
+ author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
403
+ Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
404
+ Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
405
+ Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
406
+ Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
407
+ and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
408
+ journal= {arXiv preprint arXiv:1906.07155},
409
+ year={2019}
410
+ }
411
+ ```
412
+
413
+ ## License
414
+
415
+ This project is released under the [Apache 2.0 license](LICENSE).
416
+
417
+ ## Projects in OpenMMLab
418
+
419
+ - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models.
420
+ - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
421
+ - [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark.
422
+ - [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox.
423
+ - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
424
+ - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
425
+ - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
426
+ - [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark.
427
+ - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
428
+ - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
429
+ - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
430
+ - [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
431
+ - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
432
+ - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
433
+ - [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
434
+ - [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
435
+ - [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
436
+ - [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
437
+ - [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
438
+ - [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
439
+ - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
440
+ - [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
441
+ - [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries.
442
+ - [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab.
mmdetection/README_zh-CN.md ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img src="resources/mmdet-logo.png" width="600"/>
3
+ <div>&nbsp;</div>
4
+ <div align="center">
5
+ <b><font size="5">OpenMMLab 官网</font></b>
6
+ <sup>
7
+ <a href="https://openmmlab.com">
8
+ <i><font size="4">HOT</font></i>
9
+ </a>
10
+ </sup>
11
+ &nbsp;&nbsp;&nbsp;&nbsp;
12
+ <b><font size="5">OpenMMLab 开放平台</font></b>
13
+ <sup>
14
+ <a href="https://platform.openmmlab.com">
15
+ <i><font size="4">TRY IT OUT</font></i>
16
+ </a>
17
+ </sup>
18
+ </div>
19
+ <div>&nbsp;</div>
20
+
21
+ [![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet)
22
+ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/)
23
+ [![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions)
24
+ [![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection)
25
+ [![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/main/LICENSE)
26
+ [![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
27
+ [![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
28
+
29
+ [📘使用文档](https://mmdetection.readthedocs.io/zh_CN/latest/) |
30
+ [🛠️安装教程](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html) |
31
+ [👀模型库](https://mmdetection.readthedocs.io/zh_CN/latest/model_zoo.html) |
32
+ [🆕更新日志](https://mmdetection.readthedocs.io/en/latest/notes/changelog.html) |
33
+ [🚀进行中的项目](https://github.com/open-mmlab/mmdetection/projects) |
34
+ [🤔报告问题](https://github.com/open-mmlab/mmdetection/issues/new/choose)
35
+
36
+ </div>
37
+
38
+ <div align="center">
39
+
40
+ [English](README.md) | 简体中文
41
+
42
+ </div>
43
+
44
+ <div align="center">
45
+ <a href="https://openmmlab.medium.com/" style="text-decoration:none;">
46
+ <img src="https://user-images.githubusercontent.com/25839884/219255827-67c1a27f-f8c5-46a9-811d-5e57448c61d1.png" width="3%" alt="" /></a>
47
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
48
+ <a href="https://discord.com/channels/1037617289144569886/1046608014234370059" style="text-decoration:none;">
49
+ <img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
50
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
51
+ <a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
52
+ <img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
53
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
54
+ <a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
55
+ <img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
56
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
57
+ <a href="https://space.bilibili.com/1293512903" style="text-decoration:none;">
58
+ <img src="https://user-images.githubusercontent.com/25839884/219026751-d7d14cce-a7c9-4e82-9942-8375fca65b99.png" width="3%" alt="" /></a>
59
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
60
+ <a href="https://www.zhihu.com/people/openmmlab" style="text-decoration:none;">
61
+ <img src="https://user-images.githubusercontent.com/25839884/219026120-ba71e48b-6e94-4bd4-b4e9-b7d175b5e362.png" width="3%" alt="" /></a>
62
+ </div>
63
+
64
+ <div align="center">
65
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/6c29886f-ae7a-4a55-8be4-352ee85b7d3e"/>
66
+ </div>
67
+
68
+ ## 简介
69
+
70
+ MMDetection 是一个基于 PyTorch 的目标检测开源工具箱。它是 [OpenMMLab](https://openmmlab.com/) 项目的一部分。
71
+
72
+ 主分支代码目前支持 PyTorch 1.8 及其以上的版本。
73
+
74
+ <img src="https://user-images.githubusercontent.com/12907710/187674113-2074d658-f2fb-42d1-ac15-9c4a695e64d7.png"/>
75
+
76
+ <details open>
77
+ <summary>主要特性</summary>
78
+
79
+ - **模块化设计**
80
+
81
+ MMDetection 将检测框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的检测模型
82
+
83
+ - **支持多种检测任务**
84
+
85
+ MMDetection 支持了各种不同的检测任务,包括**目标检测**,**实例分割**,**全景分割**,以及**半监督目标检测**。
86
+
87
+ - **速度快**
88
+
89
+ 基本的框和 mask 操作都实现了 GPU 版本,训练速度比其他代码库更快或者相当,包括 [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) 和 [SimpleDet](https://github.com/TuSimple/simpledet)。
90
+
91
+ - **性能高**
92
+
93
+ MMDetection 这个算法库源自于 COCO 2018 目标检测竞赛的冠军团队 *MMDet* 团队开发的代码,我们在之后持续进行了改进和提升。
94
+ 新发布的 [RTMDet](configs/rtmdet) 还在实时实例分割和旋转目标检测任务中取得了最先进的成果,同时也在目标检测模型中取得了最佳的的参数量和精度平衡。
95
+
96
+ </details>
97
+
98
+ 除了 MMDetection 之外,我们还开源了深度学习训练库 [MMEngine](https://github.com/open-mmlab/mmengine) 和计算机视觉基础库 [MMCV](https://github.com/open-mmlab/mmcv),它们是 MMDetection 的主要依赖。
99
+
100
+ ## 最新进展
101
+
102
+ ### 亮点
103
+
104
+ 我们很高兴向大家介绍我们在实时目标识别任务方面的最新成果 RTMDet,包含了一系列的全卷积单阶段检测模型。 RTMDet 不仅在从 tiny 到 extra-large 尺寸的目标检测模型上实现了最佳的参数量和精度的平衡,而且在实时实例分割和旋转目标检测任务上取得了最先进的成果。 更多细节请参阅[技术报告](https://arxiv.org/abs/2212.07784)。 预训练模型可以在[这里](configs/rtmdet)找到。
105
+
106
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
107
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
108
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
109
+
110
+ | Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
111
+ | ------------------------ | ------- | ------------------------------------ | ---------------------- |
112
+ | Object Detection | COCO | 52.8 | 322 |
113
+ | Instance Segmentation | COCO | 44.6 | 188 |
114
+ | Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
115
+
116
+ <div align=center>
117
+ <img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
118
+ </div>
119
+
120
+ **v3.1.0** 版本已经在 2023.6.30 发布:
121
+
122
+ - 支持 Tracking 类算法,包括多目标跟踪 MOT 算法 SORT、DeepSORT、StrongSORT、OCSORT、ByteTrack、QDTrack 和视频实例分割 VIS 算法 MaskTrackRCNN、Mask2Former-VIS。
123
+ - 支持 [ViTDet](projects/ViTDet)
124
+ - 支持多模态开放检测算法 [GLIP](configs/glip) 和 [XDecoder](projects/XDecoder) 推理和评估,并同时支持了 COCO 语义分割、COCO Caption、ADE20k 通用分割、RefCOCO 等数据集。后续将支持 GLIP 微调
125
+ - 提供了包括 MMDetection 图片任务的 [gradio demo](https://github.com/open-mmlab/mmdetection/blob/dev-3.x/projects/gradio_demo/README.md),方便用户快速体验
126
+
127
+ ## 安装
128
+
129
+ 请参考[快速入门文档](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html)进行安装。
130
+
131
+ ## 教程
132
+
133
+ 请阅读[概述](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html)对 MMDetection 进行初步的了解。
134
+
135
+ 为了帮助用户更进一步了解 MMDetection,我们准备了用户指南和进阶指南,请阅读我们的[文档](https://mmdetection.readthedocs.io/zh_CN/latest/):
136
+
137
+ - 用户指南
138
+
139
+ <details>
140
+
141
+ - [训练 & 测试](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/index.html#train-test)
142
+ - [学习配置文件](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/config.html)
143
+ - [使用已有模型在标准数据集上进行推理](https://mmdetection.readthedocs.io/en/latest/user_guides/inference.html)
144
+ - [数据集准备](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/dataset_prepare.html)
145
+ - [测试现有模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/test.html)
146
+ - [在标准数据集上训练预定义的模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/train.html)
147
+ - [在自定义数据集上进行训练](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/train.html#train-with-customized-datasets)
148
+ - [在标准数据集上训练自定义模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/new_model.html)
149
+ - [模型微调](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/finetune.html)
150
+ - [提交测试结果](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/test_results_submission.html)
151
+ - [权重初始化](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/init_cfg.html)
152
+ - [将单阶段检测器作为 RPN](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/single_stage_as_rpn.html)
153
+ - [半监督目标检测](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/semi_det.html)
154
+ - [实用工具](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/index.html#useful-tools)
155
+
156
+ </details>
157
+
158
+ - 进阶指南
159
+
160
+ <details>
161
+
162
+ - [基础概念](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#basic-concepts)
163
+ - [组件定制](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#component-customization)
164
+ - [How to](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#how-to)
165
+
166
+ </details>
167
+
168
+ 我们提供了检测的 colab 教程 [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb) 和 实例分割的 colab 教程 [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb)
169
+
170
+ 同时,我们还提供了 [MMDetection 中文解读文案汇总](docs/zh_cn/article.md)
171
+
172
+ 若需要将2.x版本的代码迁移至新版,请参考[迁移文档](https://mmdetection.readthedocs.io/en/latest/migration.html)。
173
+
174
+ ## 基准测试和模型库
175
+
176
+ 测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。
177
+
178
+ <div align="center">
179
+ <b>算法架构</b>
180
+ </div>
181
+ <table align="center">
182
+ <tbody>
183
+ <tr align="center" valign="bottom">
184
+ <td>
185
+ <b>Object Detection</b>
186
+ </td>
187
+ <td>
188
+ <b>Instance Segmentation</b>
189
+ </td>
190
+ <td>
191
+ <b>Panoptic Segmentation</b>
192
+ </td>
193
+ <td>
194
+ <b>Other</b>
195
+ </td>
196
+ </tr>
197
+ <tr valign="top">
198
+ <td>
199
+ <ul>
200
+ <li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
201
+ <li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
202
+ <li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
203
+ <li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
204
+ <li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
205
+ <li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
206
+ <li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
207
+ <li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
208
+ <li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
209
+ <li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
210
+ <li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
211
+ <li><a href="configs/centernet">CenterNet (CVPR'2019)</a></li>
212
+ <li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
213
+ <li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
214
+ <li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
215
+ <li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
216
+ <li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
217
+ <li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
218
+ <li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
219
+ <li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
220
+ <li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
221
+ <li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
222
+ <li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
223
+ <li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
224
+ <li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
225
+ <li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
226
+ <li><a href="configs/detr">DETR (ECCV'2020)</a></li>
227
+ <li><a href="configs/paa">PAA (ECCV'2020)</a></li>
228
+ <li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
229
+ <li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
230
+ <li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
231
+ <li><a href="configs/yolox">YOLOX (CVPR'2021)</a></li>
232
+ <li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
233
+ <li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
234
+ <li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
235
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
236
+ <li><a href="configs/conditional_detr">Conditional DETR (ICCV'2021)</a></li>
237
+ <li><a href="configs/dab_detr">DAB-DETR (ICLR'2022)</a></li>
238
+ <li><a href="configs/dino">DINO (ICLR'2023)</a></li>
239
+ <li><a href="configs/glip">GLIP (CVPR'2022)</a></li>
240
+ <li><a href="projects/DiffusionDet">DiffusionDet (ArXiv'2023)</a></li>
241
+ <li><a href="projects/EfficientDet">EfficientDet (CVPR'2020)</a></li>
242
+ <li><a href="projects/Detic">Detic (ECCV'2022)</a></li>
243
+ </ul>
244
+ </td>
245
+ <td>
246
+ <ul>
247
+ <li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
248
+ <li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
249
+ <li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
250
+ <li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
251
+ <li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
252
+ <li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
253
+ <li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
254
+ <li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
255
+ <li><a href="configs/detectors">DetectoRS (ArXiv'2020)</a></li>
256
+ <li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
257
+ <li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
258
+ <li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
259
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
260
+ <li><a href="configs/condinst">CondInst (ECCV'2020)</a></li>
261
+ <li><a href="projects/SparseInst">SparseInst (CVPR'2022)</a></li>
262
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
263
+ <li><a href="configs/boxinst">BoxInst (CVPR'2021)</a></li>
264
+ </ul>
265
+ </td>
266
+ <td>
267
+ <ul>
268
+ <li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
269
+ <li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
270
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
271
+ </ul>
272
+ </td>
273
+ <td>
274
+ </ul>
275
+ <li><b>Contrastive Learning</b></li>
276
+ <ul>
277
+ <ul>
278
+ <li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
279
+ <li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
280
+ <li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
281
+ </ul>
282
+ </ul>
283
+ </ul>
284
+ <li><b>Distillation</b></li>
285
+ <ul>
286
+ <ul>
287
+ <li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
288
+ <li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
289
+ </ul>
290
+ </ul>
291
+ <li><b>Semi-Supervised Object Detection</b></li>
292
+ <ul>
293
+ <ul>
294
+ <li><a href="configs/soft_teacher">Soft Teacher (ICCV'2021)</a></li>
295
+ </ul>
296
+ </ul>
297
+ </ul>
298
+ </td>
299
+ </tr>
300
+ </td>
301
+ </tr>
302
+ </tbody>
303
+ </table>
304
+
305
+ <div align="center">
306
+ <b>模块组件</b>
307
+ </div>
308
+ <table align="center">
309
+ <tbody>
310
+ <tr align="center" valign="bottom">
311
+ <td>
312
+ <b>Backbones</b>
313
+ </td>
314
+ <td>
315
+ <b>Necks</b>
316
+ </td>
317
+ <td>
318
+ <b>Loss</b>
319
+ </td>
320
+ <td>
321
+ <b>Common</b>
322
+ </td>
323
+ </tr>
324
+ <tr valign="top">
325
+ <td>
326
+ <ul>
327
+ <li>VGG (ICLR'2015)</li>
328
+ <li>ResNet (CVPR'2016)</li>
329
+ <li>ResNeXt (CVPR'2017)</li>
330
+ <li>MobileNetV2 (CVPR'2018)</li>
331
+ <li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
332
+ <li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
333
+ <li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
334
+ <li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
335
+ <li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
336
+ <li><a href="configs/resnest">ResNeSt (ArXiv'2020)</a></li>
337
+ <li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
338
+ <li><a href="configs/swin">Swin (CVPR'2021)</a></li>
339
+ <li><a href="configs/pvt">PVTv2 (ArXiv'2021)</a></li>
340
+ <li><a href="configs/resnet_strikes_back">ResNet strikes back (ArXiv'2021)</a></li>
341
+ <li><a href="configs/efficientnet">EfficientNet (ArXiv'2021)</a></li>
342
+ <li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
343
+ <li><a href="projects/ConvNeXt-V2">ConvNeXtv2 (ArXiv'2023)</a></li>
344
+ </ul>
345
+ </td>
346
+ <td>
347
+ <ul>
348
+ <li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
349
+ <li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
350
+ <li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
351
+ <li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
352
+ <li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
353
+ <li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
354
+ </ul>
355
+ </td>
356
+ <td>
357
+ <ul>
358
+ <li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
359
+ <li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
360
+ <li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
361
+ </ul>
362
+ </td>
363
+ <td>
364
+ <ul>
365
+ <li><a href="configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
366
+ <li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
367
+ <li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
368
+ <li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
369
+ <li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
370
+ <li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
371
+ <li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
372
+ <li><a href="configs/resnet_strikes_back">Resnet strikes back (ArXiv'2021)</a></li>
373
+ </ul>
374
+ </td>
375
+ </tr>
376
+ </td>
377
+ </tr>
378
+ </tbody>
379
+ </table>
380
+
381
+ 我们在[基于 MMDetection 的项目](./docs/zh_cn/notes/projects.md)中列举了一些其他的支持的算法。
382
+
383
+ ## 常见问题
384
+
385
+ 请参考 [FAQ](docs/zh_cn/notes/faq.md) 了解其他用户的常见问题。
386
+
387
+ ## 贡献指南
388
+
389
+ 我们感谢所有的贡献者为改进和提升 MMDetection 所作出的努力。我们将正在进行中的项目添加进了[GitHub Projects](https://github.com/open-mmlab/mmdetection/projects)页面,非常欢迎社区用户能参与进这些项目中来。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
390
+
391
+ ## 致谢
392
+
393
+ MMDetection 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。
394
+
395
+ ## 引用
396
+
397
+ 如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDetection。
398
+
399
+ ```
400
+ @article{mmdetection,
401
+ title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
402
+ author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
403
+ Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
404
+ Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
405
+ Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
406
+ Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
407
+ and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
408
+ journal= {arXiv preprint arXiv:1906.07155},
409
+ year={2019}
410
+ }
411
+ ```
412
+
413
+ ## 开源许可证
414
+
415
+ 该项目采用 [Apache 2.0 开源许可证](LICENSE)。
416
+
417
+ ## OpenMMLab 的其他项目
418
+
419
+ - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库
420
+ - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
421
+ - [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱
422
+ - [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱
423
+ - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
424
+ - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
425
+ - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
426
+ - [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱与测试基准
427
+ - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
428
+ - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
429
+ - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
430
+ - [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
431
+ - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
432
+ - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
433
+ - [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
434
+ - [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
435
+ - [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
436
+ - [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
437
+ - [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
438
+ - [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
439
+ - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
440
+ - [MIM](https://github.com/open-mmlab/mim): OpenMMlab 项目、算法、模型的统一入口
441
+ - [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库
442
+ - [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目
443
+
444
+ ## 欢迎加入 OpenMMLab 社区
445
+
446
+ 扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3)
447
+
448
+ <div align="center">
449
+ <img src="resources/zhihu_qrcode.jpg" height="400" /> <img src="resources/qq_group_qrcode.jpg" height="400" />
450
+ </div>
451
+
452
+ 我们会在 OpenMMLab 社区为大家
453
+
454
+ - 📢 分享 AI 框架的前沿核心技术
455
+ - 💻 解读 PyTorch 常用模块源码
456
+ - 📰 发布 OpenMMLab 的相关新闻
457
+ - 🚀 介绍 OpenMMLab 开发的前沿算法
458
+ - 🏃 获取更高效的问题答疑和意见反馈
459
+ - 🔥 提供与各行各业开发者充分交流的平台
460
+
461
+ 干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
mmdetection/configs/_base_/datasets/ade20k_instance.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'ADE20KInstanceDataset'
3
+ data_root = 'data/ADEChallengeData2016/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/detection/',
16
+ # 'data/': 's3://openmmlab/datasets/detection/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ test_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(type='Resize', scale=(2560, 640), keep_ratio=True),
23
+ # If you don't have a gt annotation, delete the pipeline
24
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
25
+ dict(
26
+ type='PackDetInputs',
27
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
28
+ 'scale_factor'))
29
+ ]
30
+
31
+ val_dataloader = dict(
32
+ batch_size=1,
33
+ num_workers=2,
34
+ persistent_workers=True,
35
+ drop_last=False,
36
+ sampler=dict(type='DefaultSampler', shuffle=False),
37
+ dataset=dict(
38
+ type=dataset_type,
39
+ data_root=data_root,
40
+ ann_file='ade20k_instance_val.json',
41
+ data_prefix=dict(img='images/validation'),
42
+ test_mode=True,
43
+ pipeline=test_pipeline,
44
+ backend_args=backend_args))
45
+ test_dataloader = val_dataloader
46
+
47
+ val_evaluator = dict(
48
+ type='CocoMetric',
49
+ ann_file=data_root + 'ade20k_instance_val.json',
50
+ metric=['bbox', 'segm'],
51
+ format_only=False,
52
+ backend_args=backend_args)
53
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/ade20k_panoptic.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'ADE20KPanopticDataset'
3
+ data_root = 'data/ADEChallengeData2016/'
4
+
5
+ backend_args = None
6
+
7
+ test_pipeline = [
8
+ dict(type='LoadImageFromFile', backend_args=backend_args),
9
+ dict(type='Resize', scale=(2560, 640), keep_ratio=True),
10
+ dict(type='LoadPanopticAnnotations', backend_args=backend_args),
11
+ dict(
12
+ type='PackDetInputs',
13
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
14
+ 'scale_factor'))
15
+ ]
16
+
17
+ val_dataloader = dict(
18
+ batch_size=1,
19
+ num_workers=0,
20
+ persistent_workers=False,
21
+ drop_last=False,
22
+ sampler=dict(type='DefaultSampler', shuffle=False),
23
+ dataset=dict(
24
+ type=dataset_type,
25
+ data_root=data_root,
26
+ ann_file='ade20k_panoptic_val.json',
27
+ data_prefix=dict(img='images/validation/', seg='ade20k_panoptic_val/'),
28
+ test_mode=True,
29
+ pipeline=test_pipeline,
30
+ backend_args=backend_args))
31
+ test_dataloader = val_dataloader
32
+
33
+ val_evaluator = dict(
34
+ type='CocoPanopticMetric',
35
+ ann_file=data_root + 'ade20k_panoptic_val.json',
36
+ seg_prefix=data_root + 'ade20k_panoptic_val/',
37
+ backend_args=backend_args)
38
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/ade20k_semantic.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_type = 'ADE20KSegDataset'
2
+ data_root = 'data/ADEChallengeData2016/'
3
+
4
+ # Example to use different file client
5
+ # Method 1: simply set the data root and let the file I/O module
6
+ # automatically infer from prefix (not support LMDB and Memcache yet)
7
+
8
+ # data_root = 's3://openmmlab/datasets/detection/ADEChallengeData2016/'
9
+
10
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
11
+ # backend_args = dict(
12
+ # backend='petrel',
13
+ # path_mapping=dict({
14
+ # './data/': 's3://openmmlab/datasets/detection/',
15
+ # 'data/': 's3://openmmlab/datasets/detection/'
16
+ # }))
17
+ backend_args = None
18
+
19
+ test_pipeline = [
20
+ dict(type='LoadImageFromFile', backend_args=backend_args),
21
+ dict(type='Resize', scale=(2048, 512), keep_ratio=True),
22
+ dict(
23
+ type='LoadAnnotations',
24
+ with_bbox=False,
25
+ with_mask=False,
26
+ with_seg=True,
27
+ reduce_zero_label=True),
28
+ dict(
29
+ type='PackDetInputs', meta_keys=('img_path', 'ori_shape', 'img_shape'))
30
+ ]
31
+
32
+ val_dataloader = dict(
33
+ batch_size=1,
34
+ num_workers=2,
35
+ persistent_workers=True,
36
+ drop_last=False,
37
+ sampler=dict(type='DefaultSampler', shuffle=False),
38
+ dataset=dict(
39
+ type=dataset_type,
40
+ data_root=data_root,
41
+ data_prefix=dict(
42
+ img_path='images/validation',
43
+ seg_map_path='annotations/validation'),
44
+ pipeline=test_pipeline))
45
+ test_dataloader = val_dataloader
46
+
47
+ val_evaluator = dict(type='SemSegMetric', iou_metrics=['mIoU'])
48
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/cityscapes_detection.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CityscapesDataset'
3
+ data_root = 'data/cityscapes/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/segmentation/',
16
+ # 'data/': 's3://openmmlab/datasets/segmentation/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(type='LoadAnnotations', with_bbox=True),
23
+ dict(
24
+ type='RandomResize',
25
+ scale=[(2048, 800), (2048, 1024)],
26
+ keep_ratio=True),
27
+ dict(type='RandomFlip', prob=0.5),
28
+ dict(type='PackDetInputs')
29
+ ]
30
+
31
+ test_pipeline = [
32
+ dict(type='LoadImageFromFile', backend_args=backend_args),
33
+ dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
34
+ # If you don't have a gt annotation, delete the pipeline
35
+ dict(type='LoadAnnotations', with_bbox=True),
36
+ dict(
37
+ type='PackDetInputs',
38
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
39
+ 'scale_factor'))
40
+ ]
41
+
42
+ train_dataloader = dict(
43
+ batch_size=1,
44
+ num_workers=2,
45
+ persistent_workers=True,
46
+ sampler=dict(type='DefaultSampler', shuffle=True),
47
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
48
+ dataset=dict(
49
+ type='RepeatDataset',
50
+ times=8,
51
+ dataset=dict(
52
+ type=dataset_type,
53
+ data_root=data_root,
54
+ ann_file='annotations/instancesonly_filtered_gtFine_train.json',
55
+ data_prefix=dict(img='leftImg8bit/train/'),
56
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
57
+ pipeline=train_pipeline,
58
+ backend_args=backend_args)))
59
+
60
+ val_dataloader = dict(
61
+ batch_size=1,
62
+ num_workers=2,
63
+ persistent_workers=True,
64
+ drop_last=False,
65
+ sampler=dict(type='DefaultSampler', shuffle=False),
66
+ dataset=dict(
67
+ type=dataset_type,
68
+ data_root=data_root,
69
+ ann_file='annotations/instancesonly_filtered_gtFine_val.json',
70
+ data_prefix=dict(img='leftImg8bit/val/'),
71
+ test_mode=True,
72
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
73
+ pipeline=test_pipeline,
74
+ backend_args=backend_args))
75
+
76
+ test_dataloader = val_dataloader
77
+
78
+ val_evaluator = dict(
79
+ type='CocoMetric',
80
+ ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
81
+ metric='bbox',
82
+ backend_args=backend_args)
83
+
84
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/cityscapes_instance.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CityscapesDataset'
3
+ data_root = 'data/cityscapes/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
10
+
11
+ # Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/segmentation/',
16
+ # 'data/': 's3://openmmlab/datasets/segmentation/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
23
+ dict(
24
+ type='RandomResize',
25
+ scale=[(2048, 800), (2048, 1024)],
26
+ keep_ratio=True),
27
+ dict(type='RandomFlip', prob=0.5),
28
+ dict(type='PackDetInputs')
29
+ ]
30
+
31
+ test_pipeline = [
32
+ dict(type='LoadImageFromFile', backend_args=backend_args),
33
+ dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
34
+ # If you don't have a gt annotation, delete the pipeline
35
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
36
+ dict(
37
+ type='PackDetInputs',
38
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
39
+ 'scale_factor'))
40
+ ]
41
+
42
+ train_dataloader = dict(
43
+ batch_size=1,
44
+ num_workers=2,
45
+ persistent_workers=True,
46
+ sampler=dict(type='DefaultSampler', shuffle=True),
47
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
48
+ dataset=dict(
49
+ type='RepeatDataset',
50
+ times=8,
51
+ dataset=dict(
52
+ type=dataset_type,
53
+ data_root=data_root,
54
+ ann_file='annotations/instancesonly_filtered_gtFine_train.json',
55
+ data_prefix=dict(img='leftImg8bit/train/'),
56
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
57
+ pipeline=train_pipeline,
58
+ backend_args=backend_args)))
59
+
60
+ val_dataloader = dict(
61
+ batch_size=1,
62
+ num_workers=2,
63
+ persistent_workers=True,
64
+ drop_last=False,
65
+ sampler=dict(type='DefaultSampler', shuffle=False),
66
+ dataset=dict(
67
+ type=dataset_type,
68
+ data_root=data_root,
69
+ ann_file='annotations/instancesonly_filtered_gtFine_val.json',
70
+ data_prefix=dict(img='leftImg8bit/val/'),
71
+ test_mode=True,
72
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
73
+ pipeline=test_pipeline,
74
+ backend_args=backend_args))
75
+
76
+ test_dataloader = val_dataloader
77
+
78
+ val_evaluator = [
79
+ dict(
80
+ type='CocoMetric',
81
+ ann_file=data_root +
82
+ 'annotations/instancesonly_filtered_gtFine_val.json',
83
+ metric=['bbox', 'segm'],
84
+ backend_args=backend_args),
85
+ dict(
86
+ type='CityScapesMetric',
87
+ seg_prefix=data_root + 'gtFine/val',
88
+ outfile_prefix='./work_dirs/cityscapes_metric/instance',
89
+ backend_args=backend_args)
90
+ ]
91
+
92
+ test_evaluator = val_evaluator
93
+
94
+ # inference on test dataset and
95
+ # format the output results for submission.
96
+ # test_dataloader = dict(
97
+ # batch_size=1,
98
+ # num_workers=2,
99
+ # persistent_workers=True,
100
+ # drop_last=False,
101
+ # sampler=dict(type='DefaultSampler', shuffle=False),
102
+ # dataset=dict(
103
+ # type=dataset_type,
104
+ # data_root=data_root,
105
+ # ann_file='annotations/instancesonly_filtered_gtFine_test.json',
106
+ # data_prefix=dict(img='leftImg8bit/test/'),
107
+ # test_mode=True,
108
+ # filter_cfg=dict(filter_empty_gt=True, min_size=32),
109
+ # pipeline=test_pipeline))
110
+ # test_evaluator = dict(
111
+ # type='CityScapesMetric',
112
+ # format_only=True,
113
+ # outfile_prefix='./work_dirs/cityscapes_metric/test')
mmdetection/configs/_base_/datasets/coco_caption.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # data settings
2
+
3
+ dataset_type = 'CocoCaptionDataset'
4
+ data_root = 'data/coco/'
5
+
6
+ # Example to use different file client
7
+ # Method 1: simply set the data root and let the file I/O module
8
+ # automatically infer from prefix (not support LMDB and Memcache yet)
9
+
10
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
11
+
12
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
13
+ # backend_args = dict(
14
+ # backend='petrel',
15
+ # path_mapping=dict({
16
+ # './data/': 's3://openmmlab/datasets/detection/',
17
+ # 'data/': 's3://openmmlab/datasets/detection/'
18
+ # }))
19
+ backend_args = None
20
+
21
+ test_pipeline = [
22
+ dict(
23
+ type='LoadImageFromFile',
24
+ imdecode_backend='pillow',
25
+ backend_args=backend_args),
26
+ dict(
27
+ type='Resize',
28
+ scale=(224, 224),
29
+ interpolation='bicubic',
30
+ backend='pillow'),
31
+ dict(type='PackInputs', meta_keys=['image_id']),
32
+ ]
33
+
34
+ # ann_file download from
35
+ # train dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json # noqa
36
+ # val dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json # noqa
37
+ # test dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json # noqa
38
+ # val evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json # noqa
39
+ # test evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json # noqa
40
+ val_dataloader = dict(
41
+ batch_size=1,
42
+ num_workers=2,
43
+ persistent_workers=True,
44
+ drop_last=False,
45
+ sampler=dict(type='DefaultSampler', shuffle=False),
46
+ dataset=dict(
47
+ type=dataset_type,
48
+ data_root=data_root,
49
+ ann_file='annotations/coco_karpathy_val.json',
50
+ pipeline=test_pipeline,
51
+ ))
52
+
53
+ val_evaluator = dict(
54
+ type='COCOCaptionMetric',
55
+ ann_file=data_root + 'annotations/coco_karpathy_val_gt.json',
56
+ )
57
+
58
+ # # If you want standard test, please manually configure the test dataset
59
+ test_dataloader = val_dataloader
60
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/coco_detection.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CocoDataset'
3
+ data_root = 'data/coco/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/detection/',
16
+ # 'data/': 's3://openmmlab/datasets/detection/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(type='LoadAnnotations', with_bbox=True),
23
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
24
+ dict(type='RandomFlip', prob=0.5),
25
+ dict(type='PackDetInputs')
26
+ ]
27
+ test_pipeline = [
28
+ dict(type='LoadImageFromFile', backend_args=backend_args),
29
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
30
+ # If you don't have a gt annotation, delete the pipeline
31
+ dict(type='LoadAnnotations', with_bbox=True),
32
+ dict(
33
+ type='PackDetInputs',
34
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
35
+ 'scale_factor'))
36
+ ]
37
+ train_dataloader = dict(
38
+ batch_size=2,
39
+ num_workers=2,
40
+ persistent_workers=True,
41
+ sampler=dict(type='DefaultSampler', shuffle=True),
42
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
43
+ dataset=dict(
44
+ type=dataset_type,
45
+ data_root=data_root,
46
+ ann_file='annotations/instances_train2017.json',
47
+ data_prefix=dict(img='train2017/'),
48
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
49
+ pipeline=train_pipeline,
50
+ backend_args=backend_args))
51
+ val_dataloader = dict(
52
+ batch_size=1,
53
+ num_workers=2,
54
+ persistent_workers=True,
55
+ drop_last=False,
56
+ sampler=dict(type='DefaultSampler', shuffle=False),
57
+ dataset=dict(
58
+ type=dataset_type,
59
+ data_root=data_root,
60
+ ann_file='annotations/instances_val2017.json',
61
+ data_prefix=dict(img='val2017/'),
62
+ test_mode=True,
63
+ pipeline=test_pipeline,
64
+ backend_args=backend_args))
65
+ test_dataloader = val_dataloader
66
+
67
+ val_evaluator = dict(
68
+ type='CocoMetric',
69
+ ann_file=data_root + 'annotations/instances_val2017.json',
70
+ metric='bbox',
71
+ format_only=False,
72
+ backend_args=backend_args)
73
+ test_evaluator = val_evaluator
74
+
75
+ # inference on test dataset and
76
+ # format the output results for submission.
77
+ # test_dataloader = dict(
78
+ # batch_size=1,
79
+ # num_workers=2,
80
+ # persistent_workers=True,
81
+ # drop_last=False,
82
+ # sampler=dict(type='DefaultSampler', shuffle=False),
83
+ # dataset=dict(
84
+ # type=dataset_type,
85
+ # data_root=data_root,
86
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
87
+ # data_prefix=dict(img='test2017/'),
88
+ # test_mode=True,
89
+ # pipeline=test_pipeline))
90
+ # test_evaluator = dict(
91
+ # type='CocoMetric',
92
+ # metric='bbox',
93
+ # format_only=True,
94
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
95
+ # outfile_prefix='./work_dirs/coco_detection/test')
mmdetection/configs/_base_/datasets/coco_instance.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CocoDataset'
3
+ data_root = '/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/'
4
+
5
+
6
+ # Example to use different file client
7
+ # Method 1: simply set the data root and let the file I/O module
8
+ # automatically infer from prefix (not support LMDB and Memcache yet)
9
+
10
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
11
+
12
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
13
+ # backend_args = dict(
14
+ # backend='petrel',
15
+ # path_mapping=dict({
16
+ # './data/': 's3://openmmlab/datasets/detection/',
17
+ # 'data/': 's3://openmmlab/datasets/detection/'
18
+ # }))
19
+ backend_args = None
20
+
21
+ train_pipeline = [
22
+ dict(type='LoadImageFromFile', backend_args=backend_args),
23
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
24
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
25
+ dict(type='RandomFlip', prob=0.5),
26
+ dict(type='PackDetInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='LoadImageFromFile', backend_args=backend_args),
30
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
31
+ # If you don't have a gt annotation, delete the pipeline
32
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
33
+ dict(
34
+ type='PackDetInputs',
35
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
36
+ 'scale_factor'))
37
+ ]
38
+ train_dataloader = dict(
39
+ batch_size=8,
40
+ num_workers=4,
41
+ persistent_workers=True,
42
+ sampler=dict(type='DefaultSampler', shuffle=True),
43
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
44
+ dataset=dict(
45
+ type=dataset_type,
46
+ data_root=data_root,
47
+ ann_file='/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/annotations/instances_train2014.json',
48
+ data_prefix=dict(img='/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/images/train2014/'),
49
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
50
+ pipeline=train_pipeline,
51
+ backend_args=backend_args))
52
+ val_dataloader = dict(
53
+ batch_size=1,
54
+ num_workers=4,
55
+ persistent_workers=True,
56
+ drop_last=False,
57
+ sampler=dict(type='DefaultSampler', shuffle=False),
58
+ dataset=dict(
59
+ type=dataset_type,
60
+ data_root=data_root,
61
+ # ann_file='/mnt/bn/panxuran/instances_val2014_part_1000.json',
62
+ ann_file='/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/annotations/instances_val2014.json',
63
+ data_prefix=dict(img='/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/images/val2014/'),
64
+ test_mode=True,
65
+ pipeline=test_pipeline,
66
+ backend_args=backend_args))
67
+ test_dataloader = val_dataloader
68
+
69
+ val_evaluator = dict(
70
+ type='CocoMetric',
71
+ # ann_file='/mnt/bn/panxuran/instances_val2014_part_1000.json',
72
+ ann_file='/mnt/bn/zhicheng-dev-v6/dataset/coco-caption/annotations/instances_val2014.json',
73
+ metric=['segm'],
74
+ format_only=False,
75
+ backend_args=backend_args)
76
+ test_evaluator = val_evaluator
77
+
78
+ # inference on test dataset and
79
+ # format the output results for submission.
80
+ # test_dataloader = dict(
81
+ # batch_size=1,
82
+ # num_workers=2,
83
+ # persistent_workers=True,
84
+ # drop_last=False,
85
+ # sampler=dict(type='DefaultSampler', shuffle=False),
86
+ # dataset=dict(
87
+ # type=dataset_type,
88
+ # data_root=data_root,
89
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
90
+ # data_prefix=dict(img='test2017/'),
91
+ # test_mode=True,
92
+ # pipeline=test_pipeline))
93
+ # test_evaluator = dict(
94
+ # type='CocoMetric',
95
+ # metric=['bbox', 'segm'],
96
+ # format_only=True,
97
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
98
+ # outfile_prefix='./work_dirs/coco_instance/test')
mmdetection/configs/_base_/datasets/coco_instance_semantic.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CocoDataset'
3
+ data_root = 'data/coco/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/detection/',
16
+ # 'data/': 's3://openmmlab/datasets/detection/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(
23
+ type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
24
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
25
+ dict(type='RandomFlip', prob=0.5),
26
+ dict(type='PackDetInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='LoadImageFromFile', backend_args=backend_args),
30
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
31
+ # If you don't have a gt annotation, delete the pipeline
32
+ dict(
33
+ type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
34
+ dict(
35
+ type='PackDetInputs',
36
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
37
+ 'scale_factor'))
38
+ ]
39
+
40
+ train_dataloader = dict(
41
+ batch_size=2,
42
+ num_workers=2,
43
+ persistent_workers=True,
44
+ sampler=dict(type='DefaultSampler', shuffle=True),
45
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
46
+ dataset=dict(
47
+ type=dataset_type,
48
+ data_root=data_root,
49
+ ann_file='annotations/instances_train2017.json',
50
+ data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
51
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
52
+ pipeline=train_pipeline,
53
+ backend_args=backend_args))
54
+
55
+ val_dataloader = dict(
56
+ batch_size=1,
57
+ num_workers=2,
58
+ persistent_workers=True,
59
+ drop_last=False,
60
+ sampler=dict(type='DefaultSampler', shuffle=False),
61
+ dataset=dict(
62
+ type=dataset_type,
63
+ data_root=data_root,
64
+ ann_file='annotations/instances_val2017.json',
65
+ data_prefix=dict(img='val2017/'),
66
+ test_mode=True,
67
+ pipeline=test_pipeline,
68
+ backend_args=backend_args))
69
+
70
+ test_dataloader = val_dataloader
71
+
72
+ val_evaluator = dict(
73
+ type='CocoMetric',
74
+ ann_file=data_root + 'annotations/instances_val2017.json',
75
+ metric=['bbox', 'segm'],
76
+ format_only=False,
77
+ backend_args=backend_args)
78
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/coco_panoptic.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CocoPanopticDataset'
3
+ data_root = 'data/coco/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/detection/',
16
+ # 'data/': 's3://openmmlab/datasets/detection/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(type='LoadPanopticAnnotations', backend_args=backend_args),
23
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
24
+ dict(type='RandomFlip', prob=0.5),
25
+ dict(type='PackDetInputs')
26
+ ]
27
+ test_pipeline = [
28
+ dict(type='LoadImageFromFile', backend_args=backend_args),
29
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
30
+ dict(type='LoadPanopticAnnotations', backend_args=backend_args),
31
+ dict(
32
+ type='PackDetInputs',
33
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
34
+ 'scale_factor'))
35
+ ]
36
+
37
+ train_dataloader = dict(
38
+ batch_size=1,
39
+ num_workers=0,
40
+ persistent_workers=False,
41
+ sampler=dict(type='DefaultSampler', shuffle=True),
42
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
43
+ dataset=dict(
44
+ type=dataset_type,
45
+ data_root=data_root,
46
+ ann_file='annotations/panoptic_train2017.json',
47
+ data_prefix=dict(
48
+ img='train2017/', seg='annotations/panoptic_train2017/'),
49
+ filter_cfg=dict(filter_empty_gt=True, min_size=32),
50
+ pipeline=train_pipeline,
51
+ backend_args=backend_args))
52
+ val_dataloader = dict(
53
+ batch_size=1,
54
+ num_workers=2,
55
+ persistent_workers=True,
56
+ drop_last=False,
57
+ sampler=dict(type='DefaultSampler', shuffle=False),
58
+ dataset=dict(
59
+ type=dataset_type,
60
+ data_root=data_root,
61
+ ann_file='annotations/panoptic_val2017.json',
62
+ data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
63
+ test_mode=True,
64
+ pipeline=test_pipeline,
65
+ backend_args=backend_args))
66
+ test_dataloader = val_dataloader
67
+
68
+ val_evaluator = dict(
69
+ type='CocoPanopticMetric',
70
+ ann_file=data_root + 'annotations/panoptic_val2017.json',
71
+ seg_prefix=data_root + 'annotations/panoptic_val2017/',
72
+ backend_args=backend_args)
73
+ test_evaluator = val_evaluator
74
+
75
+ # inference on test dataset and
76
+ # format the output results for submission.
77
+ # test_dataloader = dict(
78
+ # batch_size=1,
79
+ # num_workers=1,
80
+ # persistent_workers=True,
81
+ # drop_last=False,
82
+ # sampler=dict(type='DefaultSampler', shuffle=False),
83
+ # dataset=dict(
84
+ # type=dataset_type,
85
+ # data_root=data_root,
86
+ # ann_file='annotations/panoptic_image_info_test-dev2017.json',
87
+ # data_prefix=dict(img='test2017/'),
88
+ # test_mode=True,
89
+ # pipeline=test_pipeline))
90
+ # test_evaluator = dict(
91
+ # type='CocoPanopticMetric',
92
+ # format_only=True,
93
+ # ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
94
+ # outfile_prefix='./work_dirs/coco_panoptic/test')
mmdetection/configs/_base_/datasets/coco_semantic.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CocoSegDataset'
3
+ data_root = 'data/coco/'
4
+
5
+ # Example to use different file client
6
+ # Method 1: simply set the data root and let the file I/O module
7
+ # automatically infer from prefix (not support LMDB and Memcache yet)
8
+
9
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
10
+
11
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
12
+ # backend_args = dict(
13
+ # backend='petrel',
14
+ # path_mapping=dict({
15
+ # './data/': 's3://openmmlab/datasets/detection/',
16
+ # 'data/': 's3://openmmlab/datasets/detection/'
17
+ # }))
18
+ backend_args = None
19
+
20
+ train_pipeline = [
21
+ dict(type='LoadImageFromFile', backend_args=backend_args),
22
+ dict(
23
+ type='LoadAnnotations',
24
+ with_bbox=False,
25
+ with_label=False,
26
+ with_seg=True),
27
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
28
+ dict(type='RandomFlip', prob=0.5),
29
+ dict(type='PackDetInputs')
30
+ ]
31
+
32
+ test_pipeline = [
33
+ dict(type='LoadImageFromFile', backend_args=backend_args),
34
+ dict(type='Resize', scale=(1333, 800), keep_ratio=True),
35
+ dict(
36
+ type='LoadAnnotations',
37
+ with_bbox=False,
38
+ with_label=False,
39
+ with_seg=True),
40
+ dict(
41
+ type='PackDetInputs',
42
+ meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
43
+ ]
44
+
45
+ # For stuffthingmaps_semseg, please refer to
46
+ # `docs/en/user_guides/dataset_prepare.md`
47
+ train_dataloader = dict(
48
+ batch_size=2,
49
+ num_workers=2,
50
+ persistent_workers=True,
51
+ sampler=dict(type='DefaultSampler', shuffle=True),
52
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
53
+ dataset=dict(
54
+ type=dataset_type,
55
+ data_root=data_root,
56
+ data_prefix=dict(
57
+ img_path='train2017/',
58
+ seg_map_path='stuffthingmaps_semseg/train2017/'),
59
+ pipeline=train_pipeline))
60
+
61
+ val_dataloader = dict(
62
+ batch_size=1,
63
+ num_workers=2,
64
+ persistent_workers=True,
65
+ drop_last=False,
66
+ sampler=dict(type='DefaultSampler', shuffle=False),
67
+ dataset=dict(
68
+ type=dataset_type,
69
+ data_root=data_root,
70
+ data_prefix=dict(
71
+ img_path='val2017/',
72
+ seg_map_path='stuffthingmaps_semseg/val2017/'),
73
+ pipeline=test_pipeline))
74
+
75
+ test_dataloader = val_dataloader
76
+
77
+ val_evaluator = dict(type='SemSegMetric', iou_metrics=['mIoU'])
78
+ test_evaluator = val_evaluator
mmdetection/configs/_base_/datasets/ct_detection.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # dataset settings
3
+ dataset_type = 'CocoCTDataset'
4
+ data_root = '/mnt/bn/panxuran/Slice_Data/slice_dataset_maximum_0402/'
5
+
6
+ # Example to use different file client
7
+ # Method 1: simply set the data root and let the file I/O module
8
+ # automatically infer from prefix (not support LMDB and Memcache yet)
9
+
10
+ # data_root = 's3://openmmlab/datasets/detection/coco/'
11
+
12
+ # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
13
+ # backend_args = dict(
14
+ # backend='petrel',
15
+ # path_mapping=dict({
16
+ # './data/': 's3://openmmlab/datasets/detection/',
17
+ # 'data/': 's3://openmmlab/datasets/detection/'
18
+ # }))
19
+ backend_args = None
20
+
21
+ train_pipeline = [
22
+ dict(type='LoadImageFromFile', backend_args=backend_args),
23
+ dict(type='LoadAnnotations', with_bbox=True),
24
+ dict(type='Resize', scale=(512, 512), keep_ratio=True),
25
+ dict(type='RandomFlip', prob=0.5),
26
+ dict(type='PackDetInputs')
27
+ ]
28
+ test_pipeline = [
29
+ dict(type='LoadImageFromFile', backend_args=backend_args),
30
+ dict(type='Resize', scale=(512, 512), keep_ratio=True),
31
+ # If you don't have a gt annotation, delete the pipeline
32
+ dict(type='LoadAnnotations', with_bbox=True),
33
+ dict(
34
+ type='PackDetInputs',
35
+ meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
36
+ 'scale_factor'))
37
+ ]
38
+ train_dataloader = dict(
39
+ batch_size=8,
40
+ num_workers=4,
41
+ persistent_workers=True,
42
+ sampler=dict(type='DefaultSampler', shuffle=True),
43
+ batch_sampler=dict(type='AspectRatioBatchSampler'),
44
+ dataset=dict(
45
+ type=dataset_type,
46
+ data_root=data_root,
47
+ ann_file='annotations/train_wsyn.json',
48
+ data_prefix=dict(img='images/train/'),
49
+ filter_cfg=dict(filter_empty_gt=False, min_size=32),
50
+ pipeline=train_pipeline,
51
+ backend_args=backend_args))
52
+ val_dataloader = dict(
53
+ batch_size=8,
54
+ num_workers=4,
55
+ persistent_workers=True,
56
+ drop_last=False,
57
+ sampler=dict(type='DefaultSampler', shuffle=False),
58
+ dataset=dict(
59
+ type=dataset_type,
60
+ data_root=data_root,
61
+ ann_file='annotations/test.json',
62
+ data_prefix=dict(img='images/test/'),
63
+ test_mode=True,
64
+ pipeline=test_pipeline,
65
+ backend_args=backend_args))
66
+ test_dataloader = val_dataloader
67
+
68
+ val_evaluator = dict(
69
+ type='CocoMetric',
70
+ ann_file=data_root + 'annotations/test.json',
71
+ metric='bbox',
72
+ format_only=False,
73
+ backend_args=backend_args)
74
+ test_evaluator = val_evaluator
75
+
76
+ # inference on test dataset and
77
+ # format the output results for submission.
78
+ # test_dataloader = dict(
79
+ # batch_size=1,
80
+ # num_workers=2,
81
+ # persistent_workers=True,
82
+ # drop_last=False,
83
+ # sampler=dict(type='DefaultSampler', shuffle=False),
84
+ # dataset=dict(
85
+ # type=dataset_type,
86
+ # data_root=data_root,
87
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
88
+ # data_prefix=dict(img='test2017/'),
89
+ # test_mode=True,
90
+ # pipeline=test_pipeline))
91
+ # test_evaluator = dict(
92
+ # type='CocoMetric',
93
+ # metric='bbox',
94
+ # format_only=True,
95
+ # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
96
+ # outfile_prefix='./work_dirs/coco_detection/test')