jerpelhan commited on
Commit
6146368
·
0 Parent(s):

Initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .gitignore +191 -0
  3. Deformable-DETR/LICENSE +220 -0
  4. Deformable-DETR/README.md +169 -0
  5. Deformable-DETR/configs/r50_deformable_detr.sh +10 -0
  6. Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh +11 -0
  7. Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh +12 -0
  8. Deformable-DETR/configs/r50_deformable_detr_single_scale.sh +11 -0
  9. Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh +12 -0
  10. Deformable-DETR/datasets/__init__.py +33 -0
  11. Deformable-DETR/datasets/coco.py +169 -0
  12. Deformable-DETR/datasets/coco_eval.py +265 -0
  13. Deformable-DETR/datasets/coco_panoptic.py +107 -0
  14. Deformable-DETR/datasets/data_prefetcher.py +70 -0
  15. Deformable-DETR/datasets/panoptic_eval.py +52 -0
  16. Deformable-DETR/datasets/samplers.py +139 -0
  17. Deformable-DETR/datasets/transforms.py +284 -0
  18. Deformable-DETR/docs/changelog.md +3 -0
  19. Deformable-DETR/models/__init__.py +15 -0
  20. Deformable-DETR/models/backbone.py +138 -0
  21. Deformable-DETR/models/deformable_detr.py +492 -0
  22. Deformable-DETR/models/deformable_transformer.py +394 -0
  23. Deformable-DETR/models/matcher.py +102 -0
  24. Deformable-DETR/models/ops/functions/__init__.py +10 -0
  25. Deformable-DETR/models/ops/functions/ms_deform_attn_func.py +61 -0
  26. Deformable-DETR/models/ops/make.sh +10 -0
  27. Deformable-DETR/models/ops/modules/__init__.py +9 -0
  28. Deformable-DETR/models/ops/modules/ms_deform_attn.py +115 -0
  29. Deformable-DETR/models/ops/setup.py +73 -0
  30. Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp +41 -0
  31. Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h +33 -0
  32. Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu +153 -0
  33. Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h +30 -0
  34. Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh +1327 -0
  35. Deformable-DETR/models/ops/src/ms_deform_attn.h +62 -0
  36. Deformable-DETR/models/ops/src/vision.cpp +16 -0
  37. Deformable-DETR/models/ops/test.py +89 -0
  38. Deformable-DETR/models/position_encoding.py +97 -0
  39. Deformable-DETR/models/segmentation.py +369 -0
  40. Deformable-DETR/tools/launch.py +192 -0
  41. Deformable-DETR/tools/run_dist_launch.sh +29 -0
  42. Deformable-DETR/tools/run_dist_slurm.sh +33 -0
  43. Deformable-DETR/util/__init__.py +8 -0
  44. Deformable-DETR/util/box_ops.py +96 -0
  45. Deformable-DETR/util/misc.py +518 -0
  46. Deformable-DETR/util/plot_utils.py +111 -0
  47. README.md +25 -0
  48. configs/ABC123.yaml +37 -0
  49. configs/_DEFAULT.yml +51 -0
  50. configs/__init__.py +6 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.pth filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by .ignore support plugin (hsz.mobi)
2
+ ### Python template
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+ *.pth
8
+ *.whl
9
+ # C extensions
10
+ *.so
11
+ *.jpg
12
+ *vis*/
13
+ *.png
14
+ *.json
15
+ *.pkl
16
+ # Distribution / packaging
17
+ .Python
18
+ env/
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ *.txt
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *,cover
54
+ .hypothesis/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # IPython Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # pyenv
81
+ .python-version
82
+
83
+ # celery beat schedule file
84
+ celerybeat-schedule
85
+
86
+ # dotenv
87
+ .env
88
+
89
+ # virtualenv
90
+ venv/
91
+ ENV/
92
+
93
+ # Spyder project settings
94
+ .spyderproject
95
+
96
+ # Rope project settings
97
+ .ropeproject
98
+ ### VirtualEnv template
99
+ # Virtualenv
100
+ # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
101
+ [Bb]in
102
+ [Ii]nclude
103
+ [Ll]ib
104
+ [Ll]ib64
105
+ [Ll]ocal
106
+ [Ss]cripts
107
+ pyvenv.cfg
108
+ .venv
109
+ pip-selfcheck.json
110
+
111
+ ### JetBrains template
112
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
113
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
114
+
115
+ # User-specific stuff
116
+ .idea/**/workspace.xml
117
+ .idea/**/tasks.xml
118
+ .idea/**/usage.statistics.xml
119
+ .idea/**/dictionaries
120
+ .idea/**/shelf
121
+
122
+ # AWS User-specific
123
+ .idea/**/aws.xml
124
+
125
+ # Generated files
126
+ .idea/**/contentModel.xml
127
+
128
+ # Sensitive or high-churn files
129
+ .idea/**/dataSources/
130
+ .idea/**/dataSources.ids
131
+ .idea/**/dataSources.local.xml
132
+ .idea/**/sqlDataSources.xml
133
+ .idea/**/dynamic.xml
134
+ .idea/**/uiDesigner.xml
135
+ .idea/**/dbnavigator.xml
136
+
137
+ # Gradle
138
+ .idea/**/gradle.xml
139
+ .idea/**/libraries
140
+ imgs
141
+ # Gradle and Maven with auto-import
142
+ # When using Gradle or Maven with auto-import, you should exclude module files,
143
+ # since they will be recreated, and may cause churn. Uncomment if using
144
+ # auto-import.
145
+ # .idea/artifacts
146
+ # .idea/compiler.xml
147
+ # .idea/jarRepositories.xml
148
+ # .idea/modules.xml
149
+ # .idea/*.iml
150
+ # .idea/modules
151
+ # *.iml
152
+ # *.ipr
153
+
154
+ # CMake
155
+ cmake-build-*/
156
+
157
+ # Mongo Explorer plugin
158
+ .idea/**/mongoSettings.xml
159
+
160
+ # File-based project format
161
+ *.iws
162
+
163
+ # IntelliJ
164
+ out/
165
+ *.pth
166
+ # mpeltonen/sbt-idea plugin
167
+ .idea_modules/
168
+ .idea
169
+ # JIRA plugin
170
+ atlassian-ide-plugin.xml
171
+
172
+ # Cursive Clojure plugin
173
+ .idea/replstate.xml
174
+
175
+ # SonarLint plugin
176
+ .idea/sonarlint/
177
+
178
+ # Crashlytics plugin (for Android Studio and IntelliJ)
179
+ com_crashlytics_export_strings.xml
180
+ crashlytics.properties
181
+ crashlytics-build.properties
182
+ fabric.properties
183
+
184
+ # Editor-based Rest Client
185
+ .idea/httpRequests
186
+
187
+ # Android studio 3.1+ serialized cache file
188
+ .idea/caches/build_file_checksums.ser
189
+
190
+ # idea folder, uncomment if you don't need it
191
+ # .idea
Deformable-DETR/LICENSE ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2020 SenseTime. All Rights Reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright 2020 SenseTime
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
204
+
205
+
206
+ DETR
207
+
208
+ Copyright 2020 - present, Facebook, Inc
209
+
210
+ Licensed under the Apache License, Version 2.0 (the "License");
211
+ you may not use this file except in compliance with the License.
212
+ You may obtain a copy of the License at
213
+
214
+ http://www.apache.org/licenses/LICENSE-2.0
215
+
216
+ Unless required by applicable law or agreed to in writing, software
217
+ distributed under the License is distributed on an "AS IS" BASIS,
218
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
219
+ See the License for the specific language governing permissions and
220
+ limitations under the License.
Deformable-DETR/README.md ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deformable DETR
2
+
3
+ By [Xizhou Zhu](https://scholar.google.com/citations?user=02RXI00AAAAJ), [Weijie Su](https://www.weijiesu.com/), [Lewei Lu](https://www.linkedin.com/in/lewei-lu-94015977/), [Bin Li](http://staff.ustc.edu.cn/~binli/), [Xiaogang Wang](http://www.ee.cuhk.edu.hk/~xgwang/), [Jifeng Dai](https://jifengdai.org/).
4
+
5
+ This repository is an official implementation of the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159).
6
+
7
+
8
+ ## Introduction
9
+
10
+ **TL; DR.** Deformable DETR is an efficient and fast-converging end-to-end object detector. It mitigates the high complexity and slow convergence issues of DETR via a novel sampling-based efficient attention mechanism.
11
+
12
+ ![deformable_detr](./figs/illustration.png)
13
+
14
+ ![deformable_detr](./figs/convergence.png)
15
+
16
+ **Abstract.** DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10× less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach.
17
+
18
+ ## License
19
+
20
+ This project is released under the [Apache 2.0 license](./LICENSE).
21
+
22
+ ## Changelog
23
+
24
+ See [changelog.md](./docs/changelog.md) for detailed logs of major changes.
25
+
26
+
27
+ ## Citing Deformable DETR
28
+ If you find Deformable DETR useful in your research, please consider citing:
29
+ ```bibtex
30
+ @article{zhu2020deformable,
31
+ title={Deformable DETR: Deformable Transformers for End-to-End Object Detection},
32
+ author={Zhu, Xizhou and Su, Weijie and Lu, Lewei and Li, Bin and Wang, Xiaogang and Dai, Jifeng},
33
+ journal={arXiv preprint arXiv:2010.04159},
34
+ year={2020}
35
+ }
36
+ ```
37
+
38
+ ## Main Results
39
+
40
+ | <sub><sub>Method</sub></sub> | <sub><sub>Epochs</sub></sub> | <sub><sub>AP</sub></sub> | <sub><sub>AP<sub>S</sub></sub></sub> | <sub><sub>AP<sub>M</sub></sub></sub> | <sub><sub>AP<sub>L</sub></sub></sub> | <sub><sub>params<br>(M)</sub></sub> | <sub><sub>FLOPs<br>(G)</sub></sub> | <sub><sub>Total<br>Train<br>Time<br>(GPU<br/>hours)</sub></sub> | <sub><sub>Train<br/>Speed<br>(GPU<br/>hours<br/>/epoch)</sub></sub> | <sub><sub>Infer<br/>Speed<br/>(FPS)</sub></sub> | <sub><sub>Batch<br/>Infer<br/>Speed<br>(FPS)</sub></sub> | <sub><sub>URL</sub></sub> |
41
+ | ----------------------------------- | :----: | :--: | :----: | :---: | :------------------------------: | :--------------------:| :----------------------------------------------------------: | :--: | :---: | :---: | ----- | ----- |
42
+ | <sub><sub>Faster R-CNN + FPN</sub></sub> | <sub>109</sub> | <sub>42.0</sub> | <sub>26.6</sub> | <sub>45.4</sub> | <sub>53.4</sub> | <sub>42</sub> | <sub>180</sub> | <sub>380</sub> | <sub>3.5</sub> | <sub>25.6</sub> | <sub>28.0</sub> | <sub>-</sub> |
43
+ | <sub><sub>DETR</sub></sub> | <sub>500</sub> | <sub>42.0</sub> | <sub>20.5</sub> | <sub>45.8</sub> | <sub>61.1</sub> | <sub>41</sub> | <sub>86</sub> | <sub>2000</sub> | <sub>4.0</sub> | <sub>27.0</sub> | <sub>38.3</sub> | <sub>-</sub> |
44
+ | <sub><sub>DETR-DC5</sub></sub> | <sub>500</sub> | <sub>43.3</sub> | <sub>22.5</sub> | <sub>47.3</sub> | <sub>61.1</sub> | <sub>41</sub> |<sub>187</sub>|<sub>7000</sub>|<sub>14.0</sub>|<sub>11.4</sub>|<sub>12.4</sub>| <sub>-</sub> |
45
+ | <sub><sub>DETR-DC5</sub></sub> | <sub>50</sub> | <sub>35.3</sub> | <sub>15.2</sub> | <sub>37.5</sub> | <sub>53.6</sub> | <sub>41</sub> |<sub>187</sub>|<sub>700</sub>|<sub>14.0</sub>|<sub>11.4</sub>|<sub>12.4</sub>| <sub>-</sub> |
46
+ | <sub><sub>DETR-DC5+</sub></sub> | <sub>50</sub> | <sub>36.2</sub> | <sub>16.3</sub> | <sub>39.2</sub> | <sub>53.9</sub> | <sub>41</sub> |<sub>187</sub>|<sub>700</sub>|<sub>14.0</sub>|<sub>11.4</sub>|<sub>12.4</sub>| <sub>-</sub> |
47
+ | **<sub><sub>Deformable DETR<br>(single scale)</sub></sub>** | <sub>50</sub> | <sub>39.4</sub> | <sub>20.6</sub> | <sub>43.0</sub> | <sub>55.5</sub> | <sub>34</sub> |<sub>78</sub>|<sub>160</sub>|<sub>3.2</sub>|<sub>27.0</sub>|<sub>42.4</sub>| <sub>[config](./configs/r50_deformable_detr_single_scale.sh)<br/>[log](https://drive.google.com/file/d/1n3ZnZ-UAqmTUR4AZoM4qQntIDn6qCZx4/view?usp=sharing)<br/>[model](https://drive.google.com/file/d/1WEjQ9_FgfI5sw5OZZ4ix-OKk-IJ_-SDU/view?usp=sharing)</sub> |
48
+ | **<sub><sub>Deformable DETR<br>(single scale, DC5)</sub></sub>** | <sub>50</sub> | <sub>41.5</sub> | <sub>24.1</sub> | <sub>45.3</sub> | <sub>56.0</sub> | <sub>34</sub> |<sub>128</sub>|<sub>215</sub>|<sub>4.3</sub>|<sub>22.1</sub>|<sub>29.4</sub>| <sub>[config](./configs/r50_deformable_detr_single_scale_dc5.sh)<br/>[log](https://drive.google.com/file/d/1-UfTp2q4GIkJjsaMRIkQxa5k5vn8_n-B/view?usp=sharing)<br/>[model](https://drive.google.com/file/d/1m_TgMjzH7D44fbA-c_jiBZ-xf-odxGdk/view?usp=sharing)</sub> |
49
+ | **<sub><sub>Deformable DETR</sub></sub>** | <sub>50</sub> | <sub>44.5</sub> | <sub>27.1</sub> | <sub>47.6</sub> | <sub>59.6</sub> | <sub>40</sub> |<sub>173</sub>|<sub>325</sub>|<sub>6.5</sub>|<sub>15.0</sub>|<sub>19.4</sub>|<sub>[config](./configs/r50_deformable_detr.sh)<br/>[log](https://drive.google.com/file/d/18YSLshFjc_erOLfFC-hHu4MX4iyz1Dqr/view?usp=sharing)<br/>[model](https://drive.google.com/file/d/1nDWZWHuRwtwGden77NLM9JoWe-YisJnA/view?usp=sharing)</sub> |
50
+ | **<sub><sub>+ iterative bounding box refinement</sub></sub>** | <sub>50</sub> | <sub>46.2</sub> | <sub>28.3</sub> | <sub>49.2</sub> | <sub>61.5</sub> | <sub>41</sub> |<sub>173</sub>|<sub>325</sub>|<sub>6.5</sub>|<sub>15.0</sub>|<sub>19.4</sub>|<sub>[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh)<br/>[log](https://drive.google.com/file/d/1DFNloITi1SFBWjYzvVEAI75ndwmGM1Uj/view?usp=sharing)<br/>[model](https://drive.google.com/file/d/1JYKyRYzUH7uo9eVfDaVCiaIGZb5YTCuI/view?usp=sharing)</sub> |
51
+ | **<sub><sub>++ two-stage Deformable DETR</sub></sub>** | <sub>50</sub> | <sub>46.9</sub> | <sub>29.6</sub> | <sub>50.1</sub> | <sub>61.6</sub> | <sub>41</sub> |<sub>173</sub>|<sub>340</sub>|<sub>6.8</sub>|<sub>14.5</sub>|<sub>18.8</sub>|<sub>[config](./configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh)<br/>[log](https://drive.google.com/file/d/1ozi0wbv5-Sc5TbWt1jAuXco72vEfEtbY/view?usp=sharing) <br/>[model](https://drive.google.com/file/d/15I03A7hNTpwuLNdfuEmW9_taZMNVssEp/view?usp=sharing)</sub> |
52
+
53
+ *Note:*
54
+
55
+ 1. All models of Deformable DETR are trained with total batch size of 32.
56
+ 2. Training and inference speed are measured on NVIDIA Tesla V100 GPU.
57
+ 3. "Deformable DETR (single scale)" means only using res5 feature map (of stride 32) as input feature maps for Deformable Transformer Encoder.
58
+ 4. "DC5" means removing the stride in C5 stage of ResNet and add a dilation of 2 instead.
59
+ 5. "DETR-DC5+" indicates DETR-DC5 with some modifications, including using Focal Loss for bounding box classification and increasing number of object queries to 300.
60
+ 6. "Batch Infer Speed" refer to inference with batch size = 4 to maximize GPU utilization.
61
+ 7. The original implementation is based on our internal codebase. There are slight differences in the final accuracy and running time due to the plenty details in platform switch.
62
+
63
+
64
+ ## Installation
65
+
66
+ ### Requirements
67
+
68
+ * Linux, CUDA>=9.2, GCC>=5.4
69
+
70
+ * Python>=3.7
71
+
72
+ We recommend you to use Anaconda to create a conda environment:
73
+ ```bash
74
+ conda create -n deformable_detr python=3.7 pip
75
+ ```
76
+ Then, activate the environment:
77
+ ```bash
78
+ conda activate deformable_detr
79
+ ```
80
+
81
+ * PyTorch>=1.5.1, torchvision>=0.6.1 (following instructions [here](https://pytorch.org/))
82
+
83
+ For example, if your CUDA version is 9.2, you could install pytorch and torchvision as following:
84
+ ```bash
85
+ conda install pytorch=1.5.1 torchvision=0.6.1 cudatoolkit=9.2 -c pytorch
86
+ ```
87
+
88
+ * Other requirements
89
+ ```bash
90
+ pip install -r requirements.txt
91
+ ```
92
+
93
+ ### Compiling CUDA operators
94
+ ```bash
95
+ cd ./models/ops
96
+ sh ./make.sh
97
+ # unit test (should see all checking is True)
98
+ python test.py
99
+ ```
100
+
101
+ ## Usage
102
+
103
+ ### Dataset preparation
104
+
105
+ Please download [COCO 2017 dataset](https://cocodataset.org/) and organize them as following:
106
+
107
+ ```
108
+ code_root/
109
+ └── data/
110
+ └── coco/
111
+ ├── train2017/
112
+ ├── val2017/
113
+ └── annotations/
114
+ ├── instances_train2017.json
115
+ └── instances_val2017.json
116
+ ```
117
+
118
+ ### Training
119
+
120
+ #### Training on single node
121
+
122
+ For example, the command for training Deformable DETR on 8 GPUs is as following:
123
+
124
+ ```bash
125
+ GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 8 ./configs/r50_deformable_detr.sh
126
+ ```
127
+
128
+ #### Training on multiple nodes
129
+
130
+ For example, the command for training Deformable DETR on 2 nodes of each with 8 GPUs is as following:
131
+
132
+ On node 1:
133
+
134
+ ```bash
135
+ MASTER_ADDR=<IP address of node 1> NODE_RANK=0 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh
136
+ ```
137
+
138
+ On node 2:
139
+
140
+ ```bash
141
+ MASTER_ADDR=<IP address of node 1> NODE_RANK=1 GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 16 ./configs/r50_deformable_detr.sh
142
+ ```
143
+
144
+ #### Training on slurm cluster
145
+
146
+ If you are using slurm cluster, you can simply run the following command to train on 1 node with 8 GPUs:
147
+
148
+ ```bash
149
+ GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh <partition> deformable_detr 8 configs/r50_deformable_detr.sh
150
+ ```
151
+
152
+ Or 2 nodes of each with 8 GPUs:
153
+
154
+ ```bash
155
+ GPUS_PER_NODE=8 ./tools/run_dist_slurm.sh <partition> deformable_detr 16 configs/r50_deformable_detr.sh
156
+ ```
157
+ #### Some tips to speed-up training
158
+ * If your file system is slow to read images, you may consider enabling '--cache_mode' option to load whole dataset into memory at the beginning of training.
159
+ * You may increase the batch size to maximize the GPU utilization, according to GPU memory of yours, e.g., set '--batch_size 3' or '--batch_size 4'.
160
+
161
+ ### Evaluation
162
+
163
+ You can get the config file and pretrained model of Deformable DETR (the link is in "Main Results" session), then run following command to evaluate it on COCO 2017 validation set:
164
+
165
+ ```bash
166
+ <path to config file> --resume <path to pre-trained model> --eval
167
+ ```
168
+
169
+ You can also run distributed evaluation by using ```./tools/run_dist_launch.sh``` or ```./tools/run_dist_slurm.sh```.
Deformable-DETR/configs/r50_deformable_detr.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ EXP_DIR=exps/r50_deformable_detr
6
+ PY_ARGS=${@:1}
7
+
8
+ python -u main.py \
9
+ --output_dir ${EXP_DIR} \
10
+ ${PY_ARGS}
Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ EXP_DIR=exps/r50_deformable_detr_plus_iterative_bbox_refinement
6
+ PY_ARGS=${@:1}
7
+
8
+ python -u main.py \
9
+ --output_dir ${EXP_DIR} \
10
+ --with_box_refine \
11
+ ${PY_ARGS}
Deformable-DETR/configs/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ EXP_DIR=exps/r50_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage
6
+ PY_ARGS=${@:1}
7
+
8
+ python -u main.py \
9
+ --output_dir ${EXP_DIR} \
10
+ --with_box_refine \
11
+ --two_stage \
12
+ ${PY_ARGS}
Deformable-DETR/configs/r50_deformable_detr_single_scale.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ EXP_DIR=exps/r50_deformable_detr_single_scale
6
+ PY_ARGS=${@:1}
7
+
8
+ python -u main.py \
9
+ --num_feature_levels 1 \
10
+ --output_dir ${EXP_DIR} \
11
+ ${PY_ARGS}
Deformable-DETR/configs/r50_deformable_detr_single_scale_dc5.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ EXP_DIR=exps/r50_deformable_detr_single_scale_dc5
6
+ PY_ARGS=${@:1}
7
+
8
+ python -u main.py \
9
+ --num_feature_levels 1 \
10
+ --dilation \
11
+ --output_dir ${EXP_DIR} \
12
+ ${PY_ARGS}
Deformable-DETR/datasets/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ import torch.utils.data
11
+ from .torchvision_datasets import CocoDetection
12
+
13
+ from .coco import build as build_coco
14
+
15
+
16
+ def get_coco_api_from_dataset(dataset):
17
+ for _ in range(10):
18
+ # if isinstance(dataset, torchvision.datasets.CocoDetection):
19
+ # break
20
+ if isinstance(dataset, torch.utils.data.Subset):
21
+ dataset = dataset.dataset
22
+ if isinstance(dataset, CocoDetection):
23
+ return dataset.coco
24
+
25
+
26
+ def build_dataset(image_set, args):
27
+ if args.dataset_file == 'coco':
28
+ return build_coco(image_set, args)
29
+ if args.dataset_file == 'coco_panoptic':
30
+ # to avoid making panopticapi required for coco
31
+ from .coco_panoptic import build as build_coco_panoptic
32
+ return build_coco_panoptic(image_set, args)
33
+ raise ValueError(f'dataset {args.dataset_file} not supported')
Deformable-DETR/datasets/coco.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ COCO dataset which returns image_id for evaluation.
12
+
13
+ Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
14
+ """
15
+ from pathlib import Path
16
+
17
+ import torch
18
+ import torch.utils.data
19
+ from pycocotools import mask as coco_mask
20
+
21
+ from .torchvision_datasets import CocoDetection as TvCocoDetection
22
+ from util.misc import get_local_rank, get_local_size
23
+ import datasets.transforms as T
24
+
25
+
26
+ class CocoDetection(TvCocoDetection):
27
+ def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1):
28
+ super(CocoDetection, self).__init__(img_folder, ann_file,
29
+ cache_mode=cache_mode, local_rank=local_rank, local_size=local_size)
30
+ self._transforms = transforms
31
+ self.prepare = ConvertCocoPolysToMask(return_masks)
32
+
33
+ def __getitem__(self, idx):
34
+ img, target = super(CocoDetection, self).__getitem__(idx)
35
+ image_id = self.ids[idx]
36
+ target = {'image_id': image_id, 'annotations': target}
37
+ img, target = self.prepare(img, target)
38
+ if self._transforms is not None:
39
+ img, target = self._transforms(img, target)
40
+ return img, target
41
+
42
+
43
+ def convert_coco_poly_to_mask(segmentations, height, width):
44
+ masks = []
45
+ for polygons in segmentations:
46
+ rles = coco_mask.frPyObjects(polygons, height, width)
47
+ mask = coco_mask.decode(rles)
48
+ if len(mask.shape) < 3:
49
+ mask = mask[..., None]
50
+ mask = torch.as_tensor(mask, dtype=torch.uint8)
51
+ mask = mask.any(dim=2)
52
+ masks.append(mask)
53
+ if masks:
54
+ masks = torch.stack(masks, dim=0)
55
+ else:
56
+ masks = torch.zeros((0, height, width), dtype=torch.uint8)
57
+ return masks
58
+
59
+
60
+ class ConvertCocoPolysToMask(object):
61
+ def __init__(self, return_masks=False):
62
+ self.return_masks = return_masks
63
+
64
+ def __call__(self, image, target):
65
+ w, h = image.size
66
+
67
+ image_id = target["image_id"]
68
+ image_id = torch.tensor([image_id])
69
+
70
+ anno = target["annotations"]
71
+
72
+ anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
73
+
74
+ boxes = [obj["bbox"] for obj in anno]
75
+ # guard against no boxes via resizing
76
+ boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
77
+ boxes[:, 2:] += boxes[:, :2]
78
+ boxes[:, 0::2].clamp_(min=0, max=w)
79
+ boxes[:, 1::2].clamp_(min=0, max=h)
80
+
81
+ classes = [obj["category_id"] for obj in anno]
82
+ classes = torch.tensor(classes, dtype=torch.int64)
83
+
84
+ if self.return_masks:
85
+ segmentations = [obj["segmentation"] for obj in anno]
86
+ masks = convert_coco_poly_to_mask(segmentations, h, w)
87
+
88
+ keypoints = None
89
+ if anno and "keypoints" in anno[0]:
90
+ keypoints = [obj["keypoints"] for obj in anno]
91
+ keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
92
+ num_keypoints = keypoints.shape[0]
93
+ if num_keypoints:
94
+ keypoints = keypoints.view(num_keypoints, -1, 3)
95
+
96
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
97
+ boxes = boxes[keep]
98
+ classes = classes[keep]
99
+ if self.return_masks:
100
+ masks = masks[keep]
101
+ if keypoints is not None:
102
+ keypoints = keypoints[keep]
103
+
104
+ target = {}
105
+ target["boxes"] = boxes
106
+ target["labels"] = classes
107
+ if self.return_masks:
108
+ target["masks"] = masks
109
+ target["image_id"] = image_id
110
+ if keypoints is not None:
111
+ target["keypoints"] = keypoints
112
+
113
+ # for conversion to coco api
114
+ area = torch.tensor([obj["area"] for obj in anno])
115
+ iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
116
+ target["area"] = area[keep]
117
+ target["iscrowd"] = iscrowd[keep]
118
+
119
+ target["orig_size"] = torch.as_tensor([int(h), int(w)])
120
+ target["size"] = torch.as_tensor([int(h), int(w)])
121
+
122
+ return image, target
123
+
124
+
125
+ def make_coco_transforms(image_set):
126
+
127
+ normalize = T.Compose([
128
+ T.ToTensor(),
129
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
130
+ ])
131
+
132
+ scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
133
+
134
+ if image_set == 'train':
135
+ return T.Compose([
136
+ T.RandomHorizontalFlip(),
137
+ T.RandomSelect(
138
+ T.RandomResize(scales, max_size=1333),
139
+ T.Compose([
140
+ T.RandomResize([400, 500, 600]),
141
+ T.RandomSizeCrop(384, 600),
142
+ T.RandomResize(scales, max_size=1333),
143
+ ])
144
+ ),
145
+ normalize,
146
+ ])
147
+
148
+ if image_set == 'val':
149
+ return T.Compose([
150
+ T.RandomResize([800], max_size=1333),
151
+ normalize,
152
+ ])
153
+
154
+ raise ValueError(f'unknown {image_set}')
155
+
156
+
157
+ def build(image_set, args):
158
+ root = Path(args.coco_path)
159
+ assert root.exists(), f'provided COCO path {root} does not exist'
160
+ mode = 'instances'
161
+ PATHS = {
162
+ "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
163
+ "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
164
+ }
165
+
166
+ img_folder, ann_file = PATHS[image_set]
167
+ dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks,
168
+ cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
169
+ return dataset
Deformable-DETR/datasets/coco_eval.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ COCO evaluator that works in distributed mode.
12
+
13
+ Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
14
+ The difference is that there is less copy-pasting from pycocotools
15
+ in the end of the file, as python3 can suppress prints with contextlib
16
+ """
17
+ import os
18
+ import contextlib
19
+ import copy
20
+ import numpy as np
21
+ import torch
22
+
23
+ from pycocotools.cocoeval import COCOeval
24
+ from pycocotools.coco import COCO
25
+ import pycocotools.mask as mask_util
26
+
27
+ from util.misc import all_gather
28
+
29
+
30
+ class CocoEvaluator(object):
31
+ def __init__(self, coco_gt, iou_types):
32
+ assert isinstance(iou_types, (list, tuple))
33
+ coco_gt = copy.deepcopy(coco_gt)
34
+ self.coco_gt = coco_gt
35
+
36
+ self.iou_types = iou_types
37
+ self.coco_eval = {}
38
+ for iou_type in iou_types:
39
+ self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
40
+
41
+ self.img_ids = []
42
+ self.eval_imgs = {k: [] for k in iou_types}
43
+
44
+ def update(self, predictions):
45
+ img_ids = list(np.unique(list(predictions.keys())))
46
+ self.img_ids.extend(img_ids)
47
+
48
+ for iou_type in self.iou_types:
49
+ results = self.prepare(predictions, iou_type)
50
+
51
+ # suppress pycocotools prints
52
+ with open(os.devnull, 'w') as devnull:
53
+ with contextlib.redirect_stdout(devnull):
54
+ coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
55
+ coco_eval = self.coco_eval[iou_type]
56
+
57
+ coco_eval.cocoDt = coco_dt
58
+ coco_eval.params.imgIds = list(img_ids)
59
+ img_ids, eval_imgs = evaluate(coco_eval)
60
+
61
+ self.eval_imgs[iou_type].append(eval_imgs)
62
+
63
+ def synchronize_between_processes(self):
64
+ for iou_type in self.iou_types:
65
+ self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
66
+ create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
67
+
68
+ def accumulate(self):
69
+ for coco_eval in self.coco_eval.values():
70
+ coco_eval.accumulate()
71
+
72
+ def summarize(self):
73
+ for iou_type, coco_eval in self.coco_eval.items():
74
+ print("IoU metric: {}".format(iou_type))
75
+ coco_eval.summarize()
76
+
77
+ def prepare(self, predictions, iou_type):
78
+ if iou_type == "bbox":
79
+ return self.prepare_for_coco_detection(predictions)
80
+ elif iou_type == "segm":
81
+ return self.prepare_for_coco_segmentation(predictions)
82
+ elif iou_type == "keypoints":
83
+ return self.prepare_for_coco_keypoint(predictions)
84
+ else:
85
+ raise ValueError("Unknown iou type {}".format(iou_type))
86
+
87
+ def prepare_for_coco_detection(self, predictions):
88
+ coco_results = []
89
+ for original_id, prediction in predictions.items():
90
+ if len(prediction) == 0:
91
+ continue
92
+
93
+ boxes = prediction["boxes"]
94
+ boxes = convert_to_xywh(boxes).tolist()
95
+ scores = prediction["scores"].tolist()
96
+ labels = prediction["labels"].tolist()
97
+
98
+ coco_results.extend(
99
+ [
100
+ {
101
+ "image_id": original_id,
102
+ "category_id": labels[k],
103
+ "bbox": box,
104
+ "score": scores[k],
105
+ }
106
+ for k, box in enumerate(boxes)
107
+ ]
108
+ )
109
+ return coco_results
110
+
111
+ def prepare_for_coco_segmentation(self, predictions):
112
+ coco_results = []
113
+ for original_id, prediction in predictions.items():
114
+ if len(prediction) == 0:
115
+ continue
116
+
117
+ scores = prediction["scores"]
118
+ labels = prediction["labels"]
119
+ masks = prediction["masks"]
120
+
121
+ masks = masks > 0.5
122
+
123
+ scores = prediction["scores"].tolist()
124
+ labels = prediction["labels"].tolist()
125
+
126
+ rles = [
127
+ mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
128
+ for mask in masks
129
+ ]
130
+ for rle in rles:
131
+ rle["counts"] = rle["counts"].decode("utf-8")
132
+
133
+ coco_results.extend(
134
+ [
135
+ {
136
+ "image_id": original_id,
137
+ "category_id": labels[k],
138
+ "segmentation": rle,
139
+ "score": scores[k],
140
+ }
141
+ for k, rle in enumerate(rles)
142
+ ]
143
+ )
144
+ return coco_results
145
+
146
+ def prepare_for_coco_keypoint(self, predictions):
147
+ coco_results = []
148
+ for original_id, prediction in predictions.items():
149
+ if len(prediction) == 0:
150
+ continue
151
+
152
+ boxes = prediction["boxes"]
153
+ boxes = convert_to_xywh(boxes).tolist()
154
+ scores = prediction["scores"].tolist()
155
+ labels = prediction["labels"].tolist()
156
+ keypoints = prediction["keypoints"]
157
+ keypoints = keypoints.flatten(start_dim=1).tolist()
158
+
159
+ coco_results.extend(
160
+ [
161
+ {
162
+ "image_id": original_id,
163
+ "category_id": labels[k],
164
+ 'keypoints': keypoint,
165
+ "score": scores[k],
166
+ }
167
+ for k, keypoint in enumerate(keypoints)
168
+ ]
169
+ )
170
+ return coco_results
171
+
172
+
173
+ def convert_to_xywh(boxes):
174
+ xmin, ymin, xmax, ymax = boxes.unbind(1)
175
+ return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
176
+
177
+
178
+ def merge(img_ids, eval_imgs):
179
+ all_img_ids = all_gather(img_ids)
180
+ all_eval_imgs = all_gather(eval_imgs)
181
+
182
+ merged_img_ids = []
183
+ for p in all_img_ids:
184
+ merged_img_ids.extend(p)
185
+
186
+ merged_eval_imgs = []
187
+ for p in all_eval_imgs:
188
+ merged_eval_imgs.append(p)
189
+
190
+ merged_img_ids = np.array(merged_img_ids)
191
+ merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
192
+
193
+ # keep only unique (and in sorted order) images
194
+ merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
195
+ merged_eval_imgs = merged_eval_imgs[..., idx]
196
+
197
+ return merged_img_ids, merged_eval_imgs
198
+
199
+
200
+ def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
201
+ img_ids, eval_imgs = merge(img_ids, eval_imgs)
202
+ img_ids = list(img_ids)
203
+ eval_imgs = list(eval_imgs.flatten())
204
+
205
+ coco_eval.evalImgs = eval_imgs
206
+ coco_eval.params.imgIds = img_ids
207
+ coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
208
+
209
+
210
+ #################################################################
211
+ # From pycocotools, just removed the prints and fixed
212
+ # a Python3 bug about unicode not defined
213
+ #################################################################
214
+
215
+
216
+ def evaluate(self):
217
+ '''
218
+ Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
219
+ :return: None
220
+ '''
221
+ # tic = time.time()
222
+ # print('Running per image evaluation...')
223
+ p = self.params
224
+ # add backward compatibility if useSegm is specified in params
225
+ if p.useSegm is not None:
226
+ p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
227
+ print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
228
+ # print('Evaluate annotation type *{}*'.format(p.iouType))
229
+ p.imgIds = list(np.unique(p.imgIds))
230
+ if p.useCats:
231
+ p.catIds = list(np.unique(p.catIds))
232
+ p.maxDets = sorted(p.maxDets)
233
+ self.params = p
234
+
235
+ self._prepare()
236
+ # loop through images, area range, max detection number
237
+ catIds = p.catIds if p.useCats else [-1]
238
+
239
+ if p.iouType == 'segm' or p.iouType == 'bbox':
240
+ computeIoU = self.computeIoU
241
+ elif p.iouType == 'keypoints':
242
+ computeIoU = self.computeOks
243
+ self.ious = {
244
+ (imgId, catId): computeIoU(imgId, catId)
245
+ for imgId in p.imgIds
246
+ for catId in catIds}
247
+
248
+ evaluateImg = self.evaluateImg
249
+ maxDet = p.maxDets[-1]
250
+ evalImgs = [
251
+ evaluateImg(imgId, catId, areaRng, maxDet)
252
+ for catId in catIds
253
+ for areaRng in p.areaRng
254
+ for imgId in p.imgIds
255
+ ]
256
+ # this is NOT in the pycocotools code, but could be done outside
257
+ evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
258
+ self._paramsEval = copy.deepcopy(self.params)
259
+ # toc = time.time()
260
+ # print('DONE (t={:0.2f}s).'.format(toc-tic))
261
+ return p.imgIds, evalImgs
262
+
263
+ #################################################################
264
+ # end of straight copy from pycocotools, just removing the prints
265
+ #################################################################
Deformable-DETR/datasets/coco_panoptic.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ import json
11
+ from pathlib import Path
12
+
13
+ import numpy as np
14
+ import torch
15
+ from PIL import Image
16
+
17
+ from panopticapi.utils import rgb2id
18
+ from util.box_ops import masks_to_boxes
19
+
20
+ from .coco import make_coco_transforms
21
+
22
+
23
+ class CocoPanoptic:
24
+ def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
25
+ with open(ann_file, 'r') as f:
26
+ self.coco = json.load(f)
27
+
28
+ # sort 'images' field so that they are aligned with 'annotations'
29
+ # i.e., in alphabetical order
30
+ self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id'])
31
+ # sanity check
32
+ if "annotations" in self.coco:
33
+ for img, ann in zip(self.coco['images'], self.coco['annotations']):
34
+ assert img['file_name'][:-4] == ann['file_name'][:-4]
35
+
36
+ self.img_folder = img_folder
37
+ self.ann_folder = ann_folder
38
+ self.ann_file = ann_file
39
+ self.transforms = transforms
40
+ self.return_masks = return_masks
41
+
42
+ def __getitem__(self, idx):
43
+ ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx]
44
+ img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg')
45
+ ann_path = Path(self.ann_folder) / ann_info['file_name']
46
+
47
+ img = Image.open(img_path).convert('RGB')
48
+ w, h = img.size
49
+ if "segments_info" in ann_info:
50
+ masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
51
+ masks = rgb2id(masks)
52
+
53
+ ids = np.array([ann['id'] for ann in ann_info['segments_info']])
54
+ masks = masks == ids[:, None, None]
55
+
56
+ masks = torch.as_tensor(masks, dtype=torch.uint8)
57
+ labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64)
58
+
59
+ target = {}
60
+ target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
61
+ if self.return_masks:
62
+ target['masks'] = masks
63
+ target['labels'] = labels
64
+
65
+ target["boxes"] = masks_to_boxes(masks)
66
+
67
+ target['size'] = torch.as_tensor([int(h), int(w)])
68
+ target['orig_size'] = torch.as_tensor([int(h), int(w)])
69
+ if "segments_info" in ann_info:
70
+ for name in ['iscrowd', 'area']:
71
+ target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
72
+
73
+ if self.transforms is not None:
74
+ img, target = self.transforms(img, target)
75
+
76
+ return img, target
77
+
78
+ def __len__(self):
79
+ return len(self.coco['images'])
80
+
81
+ def get_height_and_width(self, idx):
82
+ img_info = self.coco['images'][idx]
83
+ height = img_info['height']
84
+ width = img_info['width']
85
+ return height, width
86
+
87
+
88
+ def build(image_set, args):
89
+ img_folder_root = Path(args.coco_path)
90
+ ann_folder_root = Path(args.coco_panoptic_path)
91
+ assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist'
92
+ assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist'
93
+ mode = 'panoptic'
94
+ PATHS = {
95
+ "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'),
96
+ "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'),
97
+ }
98
+
99
+ img_folder, ann_file = PATHS[image_set]
100
+ img_folder_path = img_folder_root / img_folder
101
+ ann_folder = ann_folder_root / f'{mode}_{img_folder}'
102
+ ann_file = ann_folder_root / ann_file
103
+
104
+ dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file,
105
+ transforms=make_coco_transforms(image_set), return_masks=args.masks)
106
+
107
+ return dataset
Deformable-DETR/datasets/data_prefetcher.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+
7
+ import torch
8
+
9
+ def to_cuda(samples, targets, device):
10
+ samples = samples.to(device, non_blocking=True)
11
+ targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
12
+ return samples, targets
13
+
14
+ class data_prefetcher():
15
+ def __init__(self, loader, device, prefetch=True):
16
+ self.loader = iter(loader)
17
+ self.prefetch = prefetch
18
+ self.device = device
19
+ if prefetch:
20
+ self.stream = torch.cuda.Stream()
21
+ self.preload()
22
+
23
+ def preload(self):
24
+ try:
25
+ self.next_samples, self.next_targets = next(self.loader)
26
+ except StopIteration:
27
+ self.next_samples = None
28
+ self.next_targets = None
29
+ return
30
+ # if record_stream() doesn't work, another option is to make sure device inputs are created
31
+ # on the main stream.
32
+ # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
33
+ # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
34
+ # Need to make sure the memory allocated for next_* is not still in use by the main stream
35
+ # at the time we start copying to next_*:
36
+ # self.stream.wait_stream(torch.cuda.current_stream())
37
+ with torch.cuda.stream(self.stream):
38
+ self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)
39
+ # more code for the alternative if record_stream() doesn't work:
40
+ # copy_ will record the use of the pinned source tensor in this side stream.
41
+ # self.next_input_gpu.copy_(self.next_input, non_blocking=True)
42
+ # self.next_target_gpu.copy_(self.next_target, non_blocking=True)
43
+ # self.next_input = self.next_input_gpu
44
+ # self.next_target = self.next_target_gpu
45
+
46
+ # With Amp, it isn't necessary to manually convert data to half.
47
+ # if args.fp16:
48
+ # self.next_input = self.next_input.half()
49
+ # else:
50
+
51
+ def next(self):
52
+ if self.prefetch:
53
+ torch.cuda.current_stream().wait_stream(self.stream)
54
+ samples = self.next_samples
55
+ targets = self.next_targets
56
+ if samples is not None:
57
+ samples.record_stream(torch.cuda.current_stream())
58
+ if targets is not None:
59
+ for t in targets:
60
+ for k, v in t.items():
61
+ v.record_stream(torch.cuda.current_stream())
62
+ self.preload()
63
+ else:
64
+ try:
65
+ samples, targets = next(self.loader)
66
+ samples, targets = to_cuda(samples, targets, self.device)
67
+ except StopIteration:
68
+ samples = None
69
+ targets = None
70
+ return samples, targets
Deformable-DETR/datasets/panoptic_eval.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ import json
11
+ import os
12
+
13
+ import util.misc as utils
14
+
15
+ try:
16
+ from panopticapi.evaluation import pq_compute
17
+ except ImportError:
18
+ pass
19
+
20
+
21
+ class PanopticEvaluator(object):
22
+ def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
23
+ self.gt_json = ann_file
24
+ self.gt_folder = ann_folder
25
+ if utils.is_main_process():
26
+ if not os.path.exists(output_dir):
27
+ os.mkdir(output_dir)
28
+ self.output_dir = output_dir
29
+ self.predictions = []
30
+
31
+ def update(self, predictions):
32
+ for p in predictions:
33
+ with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
34
+ f.write(p.pop("png_string"))
35
+
36
+ self.predictions += predictions
37
+
38
+ def synchronize_between_processes(self):
39
+ all_predictions = utils.all_gather(self.predictions)
40
+ merged_predictions = []
41
+ for p in all_predictions:
42
+ merged_predictions += p
43
+ self.predictions = merged_predictions
44
+
45
+ def summarize(self):
46
+ if utils.is_main_process():
47
+ json_data = {"annotations": self.predictions}
48
+ predictions_json = os.path.join(self.output_dir, "predictions.json")
49
+ with open(predictions_json, "w") as f:
50
+ f.write(json.dumps(json_data))
51
+ return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
52
+ return None
Deformable-DETR/datasets/samplers.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from codes in torch.utils.data.distributed
7
+ # ------------------------------------------------------------------------
8
+
9
+ import os
10
+ import math
11
+ import torch
12
+ import torch.distributed as dist
13
+ from torch.utils.data.sampler import Sampler
14
+
15
+
16
+ class DistributedSampler(Sampler):
17
+ """Sampler that restricts data loading to a subset of the dataset.
18
+ It is especially useful in conjunction with
19
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
20
+ process can pass a DistributedSampler instance as a DataLoader sampler,
21
+ and load a subset of the original dataset that is exclusive to it.
22
+ .. note::
23
+ Dataset is assumed to be of constant size.
24
+ Arguments:
25
+ dataset: Dataset used for sampling.
26
+ num_replicas (optional): Number of processes participating in
27
+ distributed training.
28
+ rank (optional): Rank of the current process within num_replicas.
29
+ """
30
+
31
+ def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
32
+ if num_replicas is None:
33
+ if not dist.is_available():
34
+ raise RuntimeError("Requires distributed package to be available")
35
+ num_replicas = dist.get_world_size()
36
+ if rank is None:
37
+ if not dist.is_available():
38
+ raise RuntimeError("Requires distributed package to be available")
39
+ rank = dist.get_rank()
40
+ self.dataset = dataset
41
+ self.num_replicas = num_replicas
42
+ self.rank = rank
43
+ self.epoch = 0
44
+ self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
45
+ self.total_size = self.num_samples * self.num_replicas
46
+ self.shuffle = shuffle
47
+
48
+ def __iter__(self):
49
+ if self.shuffle:
50
+ # deterministically shuffle based on epoch
51
+ g = torch.Generator()
52
+ g.manual_seed(self.epoch)
53
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
54
+ else:
55
+ indices = torch.arange(len(self.dataset)).tolist()
56
+
57
+ # add extra samples to make it evenly divisible
58
+ indices += indices[: (self.total_size - len(indices))]
59
+ assert len(indices) == self.total_size
60
+
61
+ # subsample
62
+ offset = self.num_samples * self.rank
63
+ indices = indices[offset : offset + self.num_samples]
64
+ assert len(indices) == self.num_samples
65
+
66
+ return iter(indices)
67
+
68
+ def __len__(self):
69
+ return self.num_samples
70
+
71
+ def set_epoch(self, epoch):
72
+ self.epoch = epoch
73
+
74
+
75
+ class NodeDistributedSampler(Sampler):
76
+ """Sampler that restricts data loading to a subset of the dataset.
77
+ It is especially useful in conjunction with
78
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
79
+ process can pass a DistributedSampler instance as a DataLoader sampler,
80
+ and load a subset of the original dataset that is exclusive to it.
81
+ .. note::
82
+ Dataset is assumed to be of constant size.
83
+ Arguments:
84
+ dataset: Dataset used for sampling.
85
+ num_replicas (optional): Number of processes participating in
86
+ distributed training.
87
+ rank (optional): Rank of the current process within num_replicas.
88
+ """
89
+
90
+ def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True):
91
+ if num_replicas is None:
92
+ if not dist.is_available():
93
+ raise RuntimeError("Requires distributed package to be available")
94
+ num_replicas = dist.get_world_size()
95
+ if rank is None:
96
+ if not dist.is_available():
97
+ raise RuntimeError("Requires distributed package to be available")
98
+ rank = dist.get_rank()
99
+ if local_rank is None:
100
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
101
+ if local_size is None:
102
+ local_size = int(os.environ.get('LOCAL_SIZE', 1))
103
+ self.dataset = dataset
104
+ self.shuffle = shuffle
105
+ self.num_replicas = num_replicas
106
+ self.num_parts = local_size
107
+ self.rank = rank
108
+ self.local_rank = local_rank
109
+ self.epoch = 0
110
+ self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
111
+ self.total_size = self.num_samples * self.num_replicas
112
+
113
+ self.total_size_parts = self.num_samples * self.num_replicas // self.num_parts
114
+
115
+ def __iter__(self):
116
+ if self.shuffle:
117
+ # deterministically shuffle based on epoch
118
+ g = torch.Generator()
119
+ g.manual_seed(self.epoch)
120
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
121
+ else:
122
+ indices = torch.arange(len(self.dataset)).tolist()
123
+ indices = [i for i in indices if i % self.num_parts == self.local_rank]
124
+
125
+ # add extra samples to make it evenly divisible
126
+ indices += indices[:(self.total_size_parts - len(indices))]
127
+ assert len(indices) == self.total_size_parts
128
+
129
+ # subsample
130
+ indices = indices[self.rank // self.num_parts:self.total_size_parts:self.num_replicas // self.num_parts]
131
+ assert len(indices) == self.num_samples
132
+
133
+ return iter(indices)
134
+
135
+ def __len__(self):
136
+ return self.num_samples
137
+
138
+ def set_epoch(self, epoch):
139
+ self.epoch = epoch
Deformable-DETR/datasets/transforms.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Transforms and data augmentation for both image + bbox.
12
+ """
13
+ import random
14
+
15
+ import PIL
16
+ import torch
17
+ import torchvision.transforms as T
18
+ import torchvision.transforms.functional as F
19
+
20
+ from util.box_ops import box_xyxy_to_cxcywh
21
+ from util.misc import interpolate
22
+
23
+
24
+ def crop(image, target, region):
25
+ cropped_image = F.crop(image, *region)
26
+
27
+ target = target.copy()
28
+ i, j, h, w = region
29
+
30
+ # should we do something wrt the original size?
31
+ target["size"] = torch.tensor([h, w])
32
+
33
+ fields = ["labels", "area", "iscrowd"]
34
+
35
+ if "boxes" in target:
36
+ boxes = target["boxes"]
37
+ max_size = torch.as_tensor([w, h], dtype=torch.float32)
38
+ cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
39
+ cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
40
+ cropped_boxes = cropped_boxes.clamp(min=0)
41
+ area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
42
+ target["boxes"] = cropped_boxes.reshape(-1, 4)
43
+ target["area"] = area
44
+ fields.append("boxes")
45
+
46
+ if "masks" in target:
47
+ # FIXME should we update the area here if there are no boxes?
48
+ target['masks'] = target['masks'][:, i:i + h, j:j + w]
49
+ fields.append("masks")
50
+
51
+ # remove elements for which the boxes or masks that have zero area
52
+ if "boxes" in target or "masks" in target:
53
+ # favor boxes selection when defining which elements to keep
54
+ # this is compatible with previous implementation
55
+ if "boxes" in target:
56
+ cropped_boxes = target['boxes'].reshape(-1, 2, 2)
57
+ keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
58
+ else:
59
+ keep = target['masks'].flatten(1).any(1)
60
+
61
+ for field in fields:
62
+ target[field] = target[field][keep]
63
+
64
+ return cropped_image, target
65
+
66
+
67
+ def hflip(image, target):
68
+ flipped_image = F.hflip(image)
69
+
70
+ w, h = image.size
71
+
72
+ target = target.copy()
73
+ if "boxes" in target:
74
+ boxes = target["boxes"]
75
+ boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
76
+ target["boxes"] = boxes
77
+
78
+ if "masks" in target:
79
+ target['masks'] = target['masks'].flip(-1)
80
+
81
+ return flipped_image, target
82
+
83
+
84
+ def resize(image, target, size, max_size=None):
85
+ # size can be min_size (scalar) or (w, h) tuple
86
+
87
+ def get_size_with_aspect_ratio(image_size, size, max_size=None):
88
+ w, h = image_size
89
+ if max_size is not None:
90
+ min_original_size = float(min((w, h)))
91
+ max_original_size = float(max((w, h)))
92
+ if max_original_size / min_original_size * size > max_size:
93
+ size = int(round(max_size * min_original_size / max_original_size))
94
+
95
+ if (w <= h and w == size) or (h <= w and h == size):
96
+ return (h, w)
97
+
98
+ if w < h:
99
+ ow = size
100
+ oh = int(size * h / w)
101
+ else:
102
+ oh = size
103
+ ow = int(size * w / h)
104
+
105
+ return (oh, ow)
106
+
107
+ def get_size(image_size, size, max_size=None):
108
+ if isinstance(size, (list, tuple)):
109
+ return size[::-1]
110
+ else:
111
+ return get_size_with_aspect_ratio(image_size, size, max_size)
112
+
113
+ size = get_size(image.size, size, max_size)
114
+ rescaled_image = F.resize(image, size)
115
+
116
+ if target is None:
117
+ return rescaled_image, None
118
+
119
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
120
+ ratio_width, ratio_height = ratios
121
+
122
+ target = target.copy()
123
+ if "boxes" in target:
124
+ boxes = target["boxes"]
125
+ scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
126
+ target["boxes"] = scaled_boxes
127
+
128
+ if "area" in target:
129
+ area = target["area"]
130
+ scaled_area = area * (ratio_width * ratio_height)
131
+ target["area"] = scaled_area
132
+
133
+ h, w = size
134
+ target["size"] = torch.tensor([h, w])
135
+
136
+ if "masks" in target:
137
+ target['masks'] = interpolate(
138
+ target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
139
+
140
+ return rescaled_image, target
141
+
142
+
143
+ def pad(image, target, padding):
144
+ # assumes that we only pad on the bottom right corners
145
+ padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
146
+ if target is None:
147
+ return padded_image, None
148
+ target = target.copy()
149
+ # should we do something wrt the original size?
150
+ target["size"] = torch.tensor(padded_image[::-1])
151
+ if "masks" in target:
152
+ target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
153
+ return padded_image, target
154
+
155
+
156
+ class RandomCrop(object):
157
+ def __init__(self, size):
158
+ self.size = size
159
+
160
+ def __call__(self, img, target):
161
+ region = T.RandomCrop.get_params(img, self.size)
162
+ return crop(img, target, region)
163
+
164
+
165
+ class RandomSizeCrop(object):
166
+ def __init__(self, min_size: int, max_size: int):
167
+ self.min_size = min_size
168
+ self.max_size = max_size
169
+
170
+ def __call__(self, img: PIL.Image.Image, target: dict):
171
+ w = random.randint(self.min_size, min(img.width, self.max_size))
172
+ h = random.randint(self.min_size, min(img.height, self.max_size))
173
+ region = T.RandomCrop.get_params(img, [h, w])
174
+ return crop(img, target, region)
175
+
176
+
177
+ class CenterCrop(object):
178
+ def __init__(self, size):
179
+ self.size = size
180
+
181
+ def __call__(self, img, target):
182
+ image_width, image_height = img.size
183
+ crop_height, crop_width = self.size
184
+ crop_top = int(round((image_height - crop_height) / 2.))
185
+ crop_left = int(round((image_width - crop_width) / 2.))
186
+ return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
187
+
188
+
189
+ class RandomHorizontalFlip(object):
190
+ def __init__(self, p=0.5):
191
+ self.p = p
192
+
193
+ def __call__(self, img, target):
194
+ if random.random() < self.p:
195
+ return hflip(img, target)
196
+ return img, target
197
+
198
+
199
+ class RandomResize(object):
200
+ def __init__(self, sizes, max_size=None):
201
+ assert isinstance(sizes, (list, tuple))
202
+ self.sizes = sizes
203
+ self.max_size = max_size
204
+
205
+ def __call__(self, img, target=None):
206
+ size = random.choice(self.sizes)
207
+ return resize(img, target, size, self.max_size)
208
+
209
+
210
+ class RandomPad(object):
211
+ def __init__(self, max_pad):
212
+ self.max_pad = max_pad
213
+
214
+ def __call__(self, img, target):
215
+ pad_x = random.randint(0, self.max_pad)
216
+ pad_y = random.randint(0, self.max_pad)
217
+ return pad(img, target, (pad_x, pad_y))
218
+
219
+
220
+ class RandomSelect(object):
221
+ """
222
+ Randomly selects between transforms1 and transforms2,
223
+ with probability p for transforms1 and (1 - p) for transforms2
224
+ """
225
+ def __init__(self, transforms1, transforms2, p=0.5):
226
+ self.transforms1 = transforms1
227
+ self.transforms2 = transforms2
228
+ self.p = p
229
+
230
+ def __call__(self, img, target):
231
+ if random.random() < self.p:
232
+ return self.transforms1(img, target)
233
+ return self.transforms2(img, target)
234
+
235
+
236
+ class ToTensor(object):
237
+ def __call__(self, img, target):
238
+ return F.to_tensor(img), target
239
+
240
+
241
+ class RandomErasing(object):
242
+
243
+ def __init__(self, *args, **kwargs):
244
+ self.eraser = T.RandomErasing(*args, **kwargs)
245
+
246
+ def __call__(self, img, target):
247
+ return self.eraser(img), target
248
+
249
+
250
+ class Normalize(object):
251
+ def __init__(self, mean, std):
252
+ self.mean = mean
253
+ self.std = std
254
+
255
+ def __call__(self, image, target=None):
256
+ image = F.normalize(image, mean=self.mean, std=self.std)
257
+ if target is None:
258
+ return image, None
259
+ target = target.copy()
260
+ h, w = image.shape[-2:]
261
+ if "boxes" in target:
262
+ boxes = target["boxes"]
263
+ boxes = box_xyxy_to_cxcywh(boxes)
264
+ boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
265
+ target["boxes"] = boxes
266
+ return image, target
267
+
268
+
269
+ class Compose(object):
270
+ def __init__(self, transforms):
271
+ self.transforms = transforms
272
+
273
+ def __call__(self, image, target):
274
+ for t in self.transforms:
275
+ image, target = t(image, target)
276
+ return image, target
277
+
278
+ def __repr__(self):
279
+ format_string = self.__class__.__name__ + "("
280
+ for t in self.transforms:
281
+ format_string += "\n"
282
+ format_string += " {0}".format(t)
283
+ format_string += "\n)"
284
+ return format_string
Deformable-DETR/docs/changelog.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ## Changelog
2
+
3
+ **[2020.12.07]** Fix a bug of sampling offset normalization (see [this issue](https://github.com/fundamentalvision/Deformable-DETR/issues/6)) in the MSDeformAttn module. The final accuracy on COCO is slightly improved. Code and pre-trained models have been updated. This bug only occurs in this released version but not in the original implementation used in our paper.
Deformable-DETR/models/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ from .deformable_detr import build
11
+
12
+
13
+ def build_model(args):
14
+ return build(args)
15
+
Deformable-DETR/models/backbone.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Backbone modules.
12
+ """
13
+ from collections import OrderedDict
14
+
15
+ import torch
16
+ import torch.nn.functional as F
17
+ import torchvision
18
+ from torch import nn
19
+ from torchvision.models._utils import IntermediateLayerGetter
20
+ from typing import Dict, List
21
+
22
+ from util.misc import NestedTensor, is_main_process
23
+
24
+ from .position_encoding import build_position_encoding
25
+
26
+
27
+ class FrozenBatchNorm2d(torch.nn.Module):
28
+ """
29
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
30
+
31
+ Copy-paste from torchvision.misc.ops with added eps before rqsrt,
32
+ without which any other models than torchvision.models.resnet[18,34,50,101]
33
+ produce nans.
34
+ """
35
+
36
+ def __init__(self, n, eps=1e-5):
37
+ super(FrozenBatchNorm2d, self).__init__()
38
+ self.register_buffer("weight", torch.ones(n))
39
+ self.register_buffer("bias", torch.zeros(n))
40
+ self.register_buffer("running_mean", torch.zeros(n))
41
+ self.register_buffer("running_var", torch.ones(n))
42
+ self.eps = eps
43
+
44
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
45
+ missing_keys, unexpected_keys, error_msgs):
46
+ num_batches_tracked_key = prefix + 'num_batches_tracked'
47
+ if num_batches_tracked_key in state_dict:
48
+ del state_dict[num_batches_tracked_key]
49
+
50
+ super(FrozenBatchNorm2d, self)._load_from_state_dict(
51
+ state_dict, prefix, local_metadata, strict,
52
+ missing_keys, unexpected_keys, error_msgs)
53
+
54
+ def forward(self, x):
55
+ # move reshapes to the beginning
56
+ # to make it fuser-friendly
57
+ w = self.weight.reshape(1, -1, 1, 1)
58
+ b = self.bias.reshape(1, -1, 1, 1)
59
+ rv = self.running_var.reshape(1, -1, 1, 1)
60
+ rm = self.running_mean.reshape(1, -1, 1, 1)
61
+ eps = self.eps
62
+ scale = w * (rv + eps).rsqrt()
63
+ bias = b - rm * scale
64
+ return x * scale + bias
65
+
66
+
67
+ class BackboneBase(nn.Module):
68
+
69
+ def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool):
70
+ super().__init__()
71
+ for name, parameter in backbone.named_parameters():
72
+ if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
73
+ parameter.requires_grad_(False)
74
+ if return_interm_layers:
75
+ # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
76
+ return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
77
+ self.strides = [8, 16, 32]
78
+ self.num_channels = [512, 1024, 2048]
79
+ else:
80
+ return_layers = {'layer4': "0"}
81
+ self.strides = [32]
82
+ self.num_channels = [2048]
83
+ self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
84
+
85
+ def forward(self, tensor_list: NestedTensor):
86
+ xs = self.body(tensor_list.tensors)
87
+ out: Dict[str, NestedTensor] = {}
88
+ for name, x in xs.items():
89
+ m = tensor_list.mask
90
+ assert m is not None
91
+ mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
92
+ out[name] = NestedTensor(x, mask)
93
+ return out
94
+
95
+
96
+ class Backbone(BackboneBase):
97
+ """ResNet backbone with frozen BatchNorm."""
98
+ def __init__(self, name: str,
99
+ train_backbone: bool,
100
+ return_interm_layers: bool,
101
+ dilation: bool):
102
+ norm_layer = FrozenBatchNorm2d
103
+ backbone = getattr(torchvision.models, name)(
104
+ replace_stride_with_dilation=[False, False, dilation],
105
+ pretrained=is_main_process(), norm_layer=norm_layer)
106
+ assert name not in ('resnet18', 'resnet34'), "number of channels are hard coded"
107
+ super().__init__(backbone, train_backbone, return_interm_layers)
108
+ if dilation:
109
+ self.strides[-1] = self.strides[-1] // 2
110
+
111
+
112
+ class Joiner(nn.Sequential):
113
+ def __init__(self, backbone, position_embedding):
114
+ super().__init__(backbone, position_embedding)
115
+ self.strides = backbone.strides
116
+ self.num_channels = backbone.num_channels
117
+
118
+ def forward(self, tensor_list: NestedTensor):
119
+ xs = self[0](tensor_list)
120
+ out: List[NestedTensor] = []
121
+ pos = []
122
+ for name, x in sorted(xs.items()):
123
+ out.append(x)
124
+
125
+ # position encoding
126
+ for x in out:
127
+ pos.append(self[1](x).to(x.tensors.dtype))
128
+
129
+ return out, pos
130
+
131
+
132
+ def build_backbone(args):
133
+ position_embedding = build_position_encoding(args)
134
+ train_backbone = args.lr_backbone > 0
135
+ return_interm_layers = args.masks or (args.num_feature_levels > 1)
136
+ backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
137
+ model = Joiner(backbone, position_embedding)
138
+ return model
Deformable-DETR/models/deformable_detr.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Deformable DETR model and criterion classes.
12
+ """
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch import nn
16
+ import math
17
+
18
+ from util import box_ops
19
+ from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
20
+ accuracy, get_world_size, interpolate,
21
+ is_dist_avail_and_initialized, inverse_sigmoid)
22
+
23
+ from .backbone import build_backbone
24
+ from .matcher import build_matcher
25
+ from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
26
+ dice_loss, sigmoid_focal_loss)
27
+ from .deformable_transformer import build_deforamble_transformer
28
+ import copy
29
+
30
+
31
+ def _get_clones(module, N):
32
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
33
+
34
+
35
+ class DeformableDETR(nn.Module):
36
+ """ This is the Deformable DETR module that performs object detection """
37
+ def __init__(self, backbone, transformer, num_classes, num_queries, num_feature_levels,
38
+ aux_loss=True, with_box_refine=False, two_stage=False):
39
+ """ Initializes the model.
40
+ Parameters:
41
+ backbone: torch module of the backbone to be used. See backbone.py
42
+ transformer: torch module of the transformer architecture. See transformer.py
43
+ num_classes: number of object classes
44
+ num_queries: number of object queries, ie detection slot. This is the maximal number of objects
45
+ DETR can detect in a single image. For COCO, we recommend 100 queries.
46
+ aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
47
+ with_box_refine: iterative bounding box refinement
48
+ two_stage: two-stage Deformable DETR
49
+ """
50
+ super().__init__()
51
+ self.num_queries = num_queries
52
+ self.transformer = transformer
53
+ hidden_dim = transformer.d_model
54
+ self.class_embed = nn.Linear(hidden_dim, num_classes)
55
+ self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
56
+ self.num_feature_levels = num_feature_levels
57
+ if not two_stage:
58
+ self.query_embed = nn.Embedding(num_queries, hidden_dim*2)
59
+ if num_feature_levels > 1:
60
+ num_backbone_outs = len(backbone.strides)
61
+ input_proj_list = []
62
+ for _ in range(num_backbone_outs):
63
+ in_channels = backbone.num_channels[_]
64
+ input_proj_list.append(nn.Sequential(
65
+ nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
66
+ nn.GroupNorm(32, hidden_dim),
67
+ ))
68
+ for _ in range(num_feature_levels - num_backbone_outs):
69
+ input_proj_list.append(nn.Sequential(
70
+ nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
71
+ nn.GroupNorm(32, hidden_dim),
72
+ ))
73
+ in_channels = hidden_dim
74
+ self.input_proj = nn.ModuleList(input_proj_list)
75
+ else:
76
+ self.input_proj = nn.ModuleList([
77
+ nn.Sequential(
78
+ nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
79
+ nn.GroupNorm(32, hidden_dim),
80
+ )])
81
+ self.backbone = backbone
82
+ self.aux_loss = aux_loss
83
+ self.with_box_refine = with_box_refine
84
+ self.two_stage = two_stage
85
+
86
+ prior_prob = 0.01
87
+ bias_value = -math.log((1 - prior_prob) / prior_prob)
88
+ self.class_embed.bias.data = torch.ones(num_classes) * bias_value
89
+ nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
90
+ nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
91
+ for proj in self.input_proj:
92
+ nn.init.xavier_uniform_(proj[0].weight, gain=1)
93
+ nn.init.constant_(proj[0].bias, 0)
94
+
95
+ # if two-stage, the last class_embed and bbox_embed is for region proposal generation
96
+ num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers
97
+ if with_box_refine:
98
+ self.class_embed = _get_clones(self.class_embed, num_pred)
99
+ self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
100
+ nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
101
+ # hack implementation for iterative bounding box refinement
102
+ self.transformer.decoder.bbox_embed = self.bbox_embed
103
+ else:
104
+ nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
105
+ self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
106
+ self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
107
+ self.transformer.decoder.bbox_embed = None
108
+ if two_stage:
109
+ # hack implementation for two-stage
110
+ self.transformer.decoder.class_embed = self.class_embed
111
+ for box_embed in self.bbox_embed:
112
+ nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
113
+
114
+ def forward(self, samples: NestedTensor):
115
+ """ The forward expects a NestedTensor, which consists of:
116
+ - samples.tensor: batched images, of shape [batch_size x 3 x H x W]
117
+ - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
118
+
119
+ It returns a dict with the following elements:
120
+ - "pred_logits": the classification logits (including no-object) for all queries.
121
+ Shape= [batch_size x num_queries x (num_classes + 1)]
122
+ - "pred_boxes": The normalized boxes coordinates for all queries, represented as
123
+ (center_x, center_y, height, width). These values are normalized in [0, 1],
124
+ relative to the size of each individual image (disregarding possible padding).
125
+ See PostProcess for information on how to retrieve the unnormalized bounding box.
126
+ - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
127
+ dictionnaries containing the two above keys for each decoder layer.
128
+ """
129
+ if not isinstance(samples, NestedTensor):
130
+ samples = nested_tensor_from_tensor_list(samples)
131
+ features, pos = self.backbone(samples)
132
+
133
+ srcs = []
134
+ masks = []
135
+ for l, feat in enumerate(features):
136
+ src, mask = feat.decompose()
137
+ srcs.append(self.input_proj[l](src))
138
+ masks.append(mask)
139
+ assert mask is not None
140
+ if self.num_feature_levels > len(srcs):
141
+ _len_srcs = len(srcs)
142
+ for l in range(_len_srcs, self.num_feature_levels):
143
+ if l == _len_srcs:
144
+ src = self.input_proj[l](features[-1].tensors)
145
+ else:
146
+ src = self.input_proj[l](srcs[-1])
147
+ m = samples.mask
148
+ mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
149
+ pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
150
+ srcs.append(src)
151
+ masks.append(mask)
152
+ pos.append(pos_l)
153
+
154
+ query_embeds = None
155
+ if not self.two_stage:
156
+ query_embeds = self.query_embed.weight
157
+ hs, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact = self.transformer(srcs, masks, pos, query_embeds)
158
+
159
+ outputs_classes = []
160
+ outputs_coords = []
161
+ for lvl in range(hs.shape[0]):
162
+ if lvl == 0:
163
+ reference = init_reference
164
+ else:
165
+ reference = inter_references[lvl - 1]
166
+ reference = inverse_sigmoid(reference)
167
+ outputs_class = self.class_embed[lvl](hs[lvl])
168
+ tmp = self.bbox_embed[lvl](hs[lvl])
169
+ if reference.shape[-1] == 4:
170
+ tmp += reference
171
+ else:
172
+ assert reference.shape[-1] == 2
173
+ tmp[..., :2] += reference
174
+ outputs_coord = tmp.sigmoid()
175
+ outputs_classes.append(outputs_class)
176
+ outputs_coords.append(outputs_coord)
177
+ outputs_class = torch.stack(outputs_classes)
178
+ outputs_coord = torch.stack(outputs_coords)
179
+
180
+ out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
181
+ if self.aux_loss:
182
+ out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
183
+
184
+ if self.two_stage:
185
+ enc_outputs_coord = enc_outputs_coord_unact.sigmoid()
186
+ out['enc_outputs'] = {'pred_logits': enc_outputs_class, 'pred_boxes': enc_outputs_coord}
187
+ return out
188
+
189
+ @torch.jit.unused
190
+ def _set_aux_loss(self, outputs_class, outputs_coord):
191
+ # this is a workaround to make torchscript happy, as torchscript
192
+ # doesn't support dictionary with non-homogeneous values, such
193
+ # as a dict having both a Tensor and a list.
194
+ return [{'pred_logits': a, 'pred_boxes': b}
195
+ for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
196
+
197
+
198
+ class SetCriterion(nn.Module):
199
+ """ This class computes the loss for DETR.
200
+ The process happens in two steps:
201
+ 1) we compute hungarian assignment between ground truth boxes and the outputs of the model
202
+ 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
203
+ """
204
+ def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):
205
+ """ Create the criterion.
206
+ Parameters:
207
+ num_classes: number of object categories, omitting the special no-object category
208
+ matcher: module able to compute a matching between targets and proposals
209
+ weight_dict: dict containing as key the names of the losses and as values their relative weight.
210
+ losses: list of all the losses to be applied. See get_loss for list of available losses.
211
+ focal_alpha: alpha in Focal Loss
212
+ """
213
+ super().__init__()
214
+ self.num_classes = num_classes
215
+ self.matcher = matcher
216
+ self.weight_dict = weight_dict
217
+ self.losses = losses
218
+ self.focal_alpha = focal_alpha
219
+
220
+ def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
221
+ """Classification loss (NLL)
222
+ targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
223
+ """
224
+ assert 'pred_logits' in outputs
225
+ src_logits = outputs['pred_logits']
226
+
227
+ idx = self._get_src_permutation_idx(indices)
228
+ target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
229
+ target_classes = torch.full(src_logits.shape[:2], self.num_classes,
230
+ dtype=torch.int64, device=src_logits.device)
231
+ target_classes[idx] = target_classes_o
232
+
233
+ target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
234
+ dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
235
+ target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
236
+
237
+ target_classes_onehot = target_classes_onehot[:,:,:-1]
238
+ loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
239
+ losses = {'loss_ce': loss_ce}
240
+
241
+ if log:
242
+ # TODO this should probably be a separate loss, not hacked in this one here
243
+ losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
244
+ return losses
245
+
246
+ @torch.no_grad()
247
+ def loss_cardinality(self, outputs, targets, indices, num_boxes):
248
+ """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
249
+ This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
250
+ """
251
+ pred_logits = outputs['pred_logits']
252
+ device = pred_logits.device
253
+ tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
254
+ # Count the number of predictions that are NOT "no-object" (which is the last class)
255
+ card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
256
+ card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
257
+ losses = {'cardinality_error': card_err}
258
+ return losses
259
+
260
+ def loss_boxes(self, outputs, targets, indices, num_boxes):
261
+ """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
262
+ targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
263
+ The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
264
+ """
265
+ assert 'pred_boxes' in outputs
266
+ idx = self._get_src_permutation_idx(indices)
267
+ src_boxes = outputs['pred_boxes'][idx]
268
+ target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
269
+
270
+ loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
271
+
272
+ losses = {}
273
+ losses['loss_bbox'] = loss_bbox.sum() / num_boxes
274
+
275
+ loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
276
+ box_ops.box_cxcywh_to_xyxy(src_boxes),
277
+ box_ops.box_cxcywh_to_xyxy(target_boxes)))
278
+ losses['loss_giou'] = loss_giou.sum() / num_boxes
279
+ return losses
280
+
281
+ def loss_masks(self, outputs, targets, indices, num_boxes):
282
+ """Compute the losses related to the masks: the focal loss and the dice loss.
283
+ targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
284
+ """
285
+ assert "pred_masks" in outputs
286
+
287
+ src_idx = self._get_src_permutation_idx(indices)
288
+ tgt_idx = self._get_tgt_permutation_idx(indices)
289
+
290
+ src_masks = outputs["pred_masks"]
291
+
292
+ # TODO use valid to mask invalid areas due to padding in loss
293
+ target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose()
294
+ target_masks = target_masks.to(src_masks)
295
+
296
+ src_masks = src_masks[src_idx]
297
+ # upsample predictions to the target size
298
+ src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
299
+ mode="bilinear", align_corners=False)
300
+ src_masks = src_masks[:, 0].flatten(1)
301
+
302
+ target_masks = target_masks[tgt_idx].flatten(1)
303
+
304
+ losses = {
305
+ "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
306
+ "loss_dice": dice_loss(src_masks, target_masks, num_boxes),
307
+ }
308
+ return losses
309
+
310
+ def _get_src_permutation_idx(self, indices):
311
+ # permute predictions following indices
312
+ batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
313
+ src_idx = torch.cat([src for (src, _) in indices])
314
+ return batch_idx, src_idx
315
+
316
+ def _get_tgt_permutation_idx(self, indices):
317
+ # permute targets following indices
318
+ batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
319
+ tgt_idx = torch.cat([tgt for (_, tgt) in indices])
320
+ return batch_idx, tgt_idx
321
+
322
+ def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
323
+ loss_map = {
324
+ 'labels': self.loss_labels,
325
+ 'cardinality': self.loss_cardinality,
326
+ 'boxes': self.loss_boxes,
327
+ 'masks': self.loss_masks
328
+ }
329
+ assert loss in loss_map, f'do you really want to compute {loss} loss?'
330
+ return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
331
+
332
+ def forward(self, outputs, targets):
333
+ """ This performs the loss computation.
334
+ Parameters:
335
+ outputs: dict of tensors, see the output specification of the model for the format
336
+ targets: list of dicts, such that len(targets) == batch_size.
337
+ The expected keys in each dict depends on the losses applied, see each loss' doc
338
+ """
339
+ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'}
340
+
341
+ # Retrieve the matching between the outputs of the last layer and the targets
342
+ indices = self.matcher(outputs_without_aux, targets)
343
+
344
+ # Compute the average number of target boxes accross all nodes, for normalization purposes
345
+ num_boxes = sum(len(t["labels"]) for t in targets)
346
+ num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
347
+ if is_dist_avail_and_initialized():
348
+ torch.distributed.all_reduce(num_boxes)
349
+ num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
350
+
351
+ # Compute all the requested losses
352
+ losses = {}
353
+ for loss in self.losses:
354
+ kwargs = {}
355
+ losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))
356
+
357
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
358
+ if 'aux_outputs' in outputs:
359
+ for i, aux_outputs in enumerate(outputs['aux_outputs']):
360
+ indices = self.matcher(aux_outputs, targets)
361
+ for loss in self.losses:
362
+ if loss == 'masks':
363
+ # Intermediate masks losses are too costly to compute, we ignore them.
364
+ continue
365
+ kwargs = {}
366
+ if loss == 'labels':
367
+ # Logging is enabled only for the last layer
368
+ kwargs['log'] = False
369
+ l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
370
+ l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
371
+ losses.update(l_dict)
372
+
373
+ if 'enc_outputs' in outputs:
374
+ enc_outputs = outputs['enc_outputs']
375
+ bin_targets = copy.deepcopy(targets)
376
+ for bt in bin_targets:
377
+ bt['labels'] = torch.zeros_like(bt['labels'])
378
+ indices = self.matcher(enc_outputs, bin_targets)
379
+ for loss in self.losses:
380
+ if loss == 'masks':
381
+ # Intermediate masks losses are too costly to compute, we ignore them.
382
+ continue
383
+ kwargs = {}
384
+ if loss == 'labels':
385
+ # Logging is enabled only for the last layer
386
+ kwargs['log'] = False
387
+ l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)
388
+ l_dict = {k + f'_enc': v for k, v in l_dict.items()}
389
+ losses.update(l_dict)
390
+
391
+ return losses
392
+
393
+
394
+ class PostProcess(nn.Module):
395
+ """ This module converts the model's output into the format expected by the coco api"""
396
+
397
+ @torch.no_grad()
398
+ def forward(self, outputs, target_sizes):
399
+ """ Perform the computation
400
+ Parameters:
401
+ outputs: raw outputs of the model
402
+ target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
403
+ For evaluation, this must be the original image size (before any data augmentation)
404
+ For visualization, this should be the image size after data augment, but before padding
405
+ """
406
+ out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
407
+
408
+ assert len(out_logits) == len(target_sizes)
409
+ assert target_sizes.shape[1] == 2
410
+
411
+ prob = out_logits.sigmoid()
412
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
413
+ scores = topk_values
414
+ topk_boxes = topk_indexes // out_logits.shape[2]
415
+ labels = topk_indexes % out_logits.shape[2]
416
+ boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
417
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
418
+
419
+ # and from relative [0, 1] to absolute [0, height] coordinates
420
+ img_h, img_w = target_sizes.unbind(1)
421
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
422
+ boxes = boxes * scale_fct[:, None, :]
423
+
424
+ results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
425
+
426
+ return results
427
+
428
+
429
+ class MLP(nn.Module):
430
+ """ Very simple multi-layer perceptron (also called FFN)"""
431
+
432
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
433
+ super().__init__()
434
+ self.num_layers = num_layers
435
+ h = [hidden_dim] * (num_layers - 1)
436
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
437
+
438
+ def forward(self, x):
439
+ for i, layer in enumerate(self.layers):
440
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
441
+ return x
442
+
443
+
444
+ def build(args):
445
+ num_classes = 20 if args.dataset_file != 'coco' else 91
446
+ if args.dataset_file == "coco_panoptic":
447
+ num_classes = 250
448
+ device = torch.device(args.device)
449
+
450
+ backbone = build_backbone(args)
451
+
452
+ transformer = build_deforamble_transformer(args)
453
+ model = DeformableDETR(
454
+ backbone,
455
+ transformer,
456
+ num_classes=num_classes,
457
+ num_queries=args.num_queries,
458
+ num_feature_levels=args.num_feature_levels,
459
+ aux_loss=args.aux_loss,
460
+ with_box_refine=args.with_box_refine,
461
+ two_stage=args.two_stage,
462
+ )
463
+ if args.masks:
464
+ model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
465
+ matcher = build_matcher(args)
466
+ weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef}
467
+ weight_dict['loss_giou'] = args.giou_loss_coef
468
+ if args.masks:
469
+ weight_dict["loss_mask"] = args.mask_loss_coef
470
+ weight_dict["loss_dice"] = args.dice_loss_coef
471
+ # TODO this is a hack
472
+ if args.aux_loss:
473
+ aux_weight_dict = {}
474
+ for i in range(args.dec_layers - 1):
475
+ aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
476
+ aux_weight_dict.update({k + f'_enc': v for k, v in weight_dict.items()})
477
+ weight_dict.update(aux_weight_dict)
478
+
479
+ losses = ['labels', 'boxes', 'cardinality']
480
+ if args.masks:
481
+ losses += ["masks"]
482
+ # num_classes, matcher, weight_dict, losses, focal_alpha=0.25
483
+ criterion = SetCriterion(num_classes, matcher, weight_dict, losses, focal_alpha=args.focal_alpha)
484
+ criterion.to(device)
485
+ postprocessors = {'bbox': PostProcess()}
486
+ if args.masks:
487
+ postprocessors['segm'] = PostProcessSegm()
488
+ if args.dataset_file == "coco_panoptic":
489
+ is_thing_map = {i: i <= 90 for i in range(201)}
490
+ postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)
491
+
492
+ return model, criterion, postprocessors
Deformable-DETR/models/deformable_transformer.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ import copy
11
+ from typing import Optional, List
12
+ import math
13
+
14
+ import torch
15
+ import torch.nn.functional as F
16
+ from torch import nn, Tensor
17
+ from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
18
+
19
+ from util.misc import inverse_sigmoid
20
+ from models.ops.modules import MSDeformAttn
21
+
22
+
23
+ class DeformableTransformer(nn.Module):
24
+ def __init__(self, d_model=256, nhead=8,
25
+ num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.1,
26
+ activation="relu", return_intermediate_dec=False,
27
+ num_feature_levels=4, dec_n_points=4, enc_n_points=4,
28
+ two_stage=False, two_stage_num_proposals=300):
29
+ super().__init__()
30
+
31
+ self.d_model = d_model
32
+ self.nhead = nhead
33
+ self.two_stage = two_stage
34
+ self.two_stage_num_proposals = two_stage_num_proposals
35
+
36
+ encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
37
+ dropout, activation,
38
+ num_feature_levels, nhead, enc_n_points)
39
+ self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)
40
+
41
+ decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
42
+ dropout, activation,
43
+ num_feature_levels, nhead, dec_n_points)
44
+ self.decoder = DeformableTransformerDecoder(decoder_layer, num_decoder_layers, return_intermediate_dec)
45
+
46
+ self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
47
+
48
+ if two_stage:
49
+ self.enc_output = nn.Linear(d_model, d_model)
50
+ self.enc_output_norm = nn.LayerNorm(d_model)
51
+ self.pos_trans = nn.Linear(d_model * 2, d_model * 2)
52
+ self.pos_trans_norm = nn.LayerNorm(d_model * 2)
53
+ else:
54
+ self.reference_points = nn.Linear(d_model, 2)
55
+
56
+ self._reset_parameters()
57
+
58
+ def _reset_parameters(self):
59
+ for p in self.parameters():
60
+ if p.dim() > 1:
61
+ nn.init.xavier_uniform_(p)
62
+ for m in self.modules():
63
+ if isinstance(m, MSDeformAttn):
64
+ m._reset_parameters()
65
+ if not self.two_stage:
66
+ xavier_uniform_(self.reference_points.weight.data, gain=1.0)
67
+ constant_(self.reference_points.bias.data, 0.)
68
+ normal_(self.level_embed)
69
+
70
+ def get_proposal_pos_embed(self, proposals):
71
+ num_pos_feats = 128
72
+ temperature = 10000
73
+ scale = 2 * math.pi
74
+
75
+ dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)
76
+ dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
77
+ # N, L, 4
78
+ proposals = proposals.sigmoid() * scale
79
+ # N, L, 4, 128
80
+ pos = proposals[:, :, :, None] / dim_t
81
+ # N, L, 4, 64, 2
82
+ pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
83
+ return pos
84
+
85
+ def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):
86
+ N_, S_, C_ = memory.shape
87
+ base_scale = 4.0
88
+ proposals = []
89
+ _cur = 0
90
+ for lvl, (H_, W_) in enumerate(spatial_shapes):
91
+ mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
92
+ valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
93
+ valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
94
+
95
+ grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
96
+ torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
97
+ grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
98
+
99
+ scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
100
+ grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
101
+ wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
102
+ proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
103
+ proposals.append(proposal)
104
+ _cur += (H_ * W_)
105
+ output_proposals = torch.cat(proposals, 1)
106
+ output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
107
+ output_proposals = torch.log(output_proposals / (1 - output_proposals))
108
+ output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
109
+ output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
110
+
111
+ output_memory = memory
112
+ output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
113
+ output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
114
+ output_memory = self.enc_output_norm(self.enc_output(output_memory))
115
+ return output_memory, output_proposals
116
+
117
+ def get_valid_ratio(self, mask):
118
+ _, H, W = mask.shape
119
+ valid_H = torch.sum(~mask[:, :, 0], 1)
120
+ valid_W = torch.sum(~mask[:, 0, :], 1)
121
+ valid_ratio_h = valid_H.float() / H
122
+ valid_ratio_w = valid_W.float() / W
123
+ valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
124
+ return valid_ratio
125
+
126
+ def forward(self, srcs, masks, pos_embeds, query_embed=None):
127
+ assert self.two_stage or query_embed is not None
128
+
129
+ # prepare input for encoder
130
+ src_flatten = []
131
+ mask_flatten = []
132
+ lvl_pos_embed_flatten = []
133
+ spatial_shapes = []
134
+ for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
135
+ bs, c, h, w = src.shape
136
+ spatial_shape = (h, w)
137
+ spatial_shapes.append(spatial_shape)
138
+ src = src.flatten(2).transpose(1, 2)
139
+ mask = mask.flatten(1)
140
+ pos_embed = pos_embed.flatten(2).transpose(1, 2)
141
+ lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
142
+ lvl_pos_embed_flatten.append(lvl_pos_embed)
143
+ src_flatten.append(src)
144
+ mask_flatten.append(mask)
145
+ src_flatten = torch.cat(src_flatten, 1)
146
+ mask_flatten = torch.cat(mask_flatten, 1)
147
+ lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
148
+ spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
149
+ level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
150
+ valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
151
+
152
+ # encoder
153
+ memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
154
+
155
+ # prepare input for decoder
156
+ bs, _, c = memory.shape
157
+ if self.two_stage:
158
+ output_memory, output_proposals = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)
159
+
160
+ # hack implementation for two-stage Deformable DETR
161
+ enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)
162
+ enc_outputs_coord_unact = self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals
163
+
164
+ topk = self.two_stage_num_proposals
165
+ topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
166
+ topk_coords_unact = torch.gather(enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
167
+ topk_coords_unact = topk_coords_unact.detach()
168
+ reference_points = topk_coords_unact.sigmoid()
169
+ init_reference_out = reference_points
170
+ pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))
171
+ query_embed, tgt = torch.split(pos_trans_out, c, dim=2)
172
+ else:
173
+ query_embed, tgt = torch.split(query_embed, c, dim=1)
174
+ query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)
175
+ tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
176
+ reference_points = self.reference_points(query_embed).sigmoid()
177
+ init_reference_out = reference_points
178
+
179
+ # decoder
180
+ hs, inter_references = self.decoder(tgt, reference_points, memory,
181
+ spatial_shapes, level_start_index, valid_ratios, query_embed, mask_flatten)
182
+
183
+ inter_references_out = inter_references
184
+ if self.two_stage:
185
+ return hs, init_reference_out, inter_references_out, enc_outputs_class, enc_outputs_coord_unact
186
+ return hs, init_reference_out, inter_references_out, None, None
187
+
188
+
189
+ class DeformableTransformerEncoderLayer(nn.Module):
190
+ def __init__(self,
191
+ d_model=256, d_ffn=1024,
192
+ dropout=0.1, activation="relu",
193
+ n_levels=4, n_heads=8, n_points=4):
194
+ super().__init__()
195
+
196
+ # self attention
197
+ self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
198
+ self.dropout1 = nn.Dropout(dropout)
199
+ self.norm1 = nn.LayerNorm(d_model)
200
+
201
+ # ffn
202
+ self.linear1 = nn.Linear(d_model, d_ffn)
203
+ self.activation = _get_activation_fn(activation)
204
+ self.dropout2 = nn.Dropout(dropout)
205
+ self.linear2 = nn.Linear(d_ffn, d_model)
206
+ self.dropout3 = nn.Dropout(dropout)
207
+ self.norm2 = nn.LayerNorm(d_model)
208
+
209
+ @staticmethod
210
+ def with_pos_embed(tensor, pos):
211
+ return tensor if pos is None else tensor + pos
212
+
213
+ def forward_ffn(self, src):
214
+ src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
215
+ src = src + self.dropout3(src2)
216
+ src = self.norm2(src)
217
+ return src
218
+
219
+ def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
220
+ # self attention
221
+ src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
222
+ src = src + self.dropout1(src2)
223
+ src = self.norm1(src)
224
+
225
+ # ffn
226
+ src = self.forward_ffn(src)
227
+
228
+ return src
229
+
230
+
231
+ class DeformableTransformerEncoder(nn.Module):
232
+ def __init__(self, encoder_layer, num_layers):
233
+ super().__init__()
234
+ self.layers = _get_clones(encoder_layer, num_layers)
235
+ self.num_layers = num_layers
236
+
237
+ @staticmethod
238
+ def get_reference_points(spatial_shapes, valid_ratios, device):
239
+ reference_points_list = []
240
+ for lvl, (H_, W_) in enumerate(spatial_shapes):
241
+
242
+ ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
243
+ torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
244
+ ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
245
+ ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
246
+ ref = torch.stack((ref_x, ref_y), -1)
247
+ reference_points_list.append(ref)
248
+ reference_points = torch.cat(reference_points_list, 1)
249
+ reference_points = reference_points[:, :, None] * valid_ratios[:, None]
250
+ return reference_points
251
+
252
+ def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
253
+ output = src
254
+ reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
255
+ for _, layer in enumerate(self.layers):
256
+ output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
257
+
258
+ return output
259
+
260
+
261
+ class DeformableTransformerDecoderLayer(nn.Module):
262
+ def __init__(self, d_model=256, d_ffn=1024,
263
+ dropout=0.1, activation="relu",
264
+ n_levels=4, n_heads=8, n_points=4):
265
+ super().__init__()
266
+
267
+ # cross attention
268
+ self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
269
+ self.dropout1 = nn.Dropout(dropout)
270
+ self.norm1 = nn.LayerNorm(d_model)
271
+
272
+ # self attention
273
+ self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
274
+ self.dropout2 = nn.Dropout(dropout)
275
+ self.norm2 = nn.LayerNorm(d_model)
276
+
277
+ # ffn
278
+ self.linear1 = nn.Linear(d_model, d_ffn)
279
+ self.activation = _get_activation_fn(activation)
280
+ self.dropout3 = nn.Dropout(dropout)
281
+ self.linear2 = nn.Linear(d_ffn, d_model)
282
+ self.dropout4 = nn.Dropout(dropout)
283
+ self.norm3 = nn.LayerNorm(d_model)
284
+
285
+ @staticmethod
286
+ def with_pos_embed(tensor, pos):
287
+ return tensor if pos is None else tensor + pos
288
+
289
+ def forward_ffn(self, tgt):
290
+ tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
291
+ tgt = tgt + self.dropout4(tgt2)
292
+ tgt = self.norm3(tgt)
293
+ return tgt
294
+
295
+ def forward(self, tgt, query_pos, reference_points, src, src_spatial_shapes, level_start_index, src_padding_mask=None):
296
+ # self attention
297
+ q = k = self.with_pos_embed(tgt, query_pos)
298
+ tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
299
+ tgt = tgt + self.dropout2(tgt2)
300
+ tgt = self.norm2(tgt)
301
+
302
+ # cross attention
303
+ tgt2 = self.cross_attn(self.with_pos_embed(tgt, query_pos),
304
+ reference_points,
305
+ src, src_spatial_shapes, level_start_index, src_padding_mask)
306
+ tgt = tgt + self.dropout1(tgt2)
307
+ tgt = self.norm1(tgt)
308
+
309
+ # ffn
310
+ tgt = self.forward_ffn(tgt)
311
+
312
+ return tgt
313
+
314
+
315
+ class DeformableTransformerDecoder(nn.Module):
316
+ def __init__(self, decoder_layer, num_layers, return_intermediate=False):
317
+ super().__init__()
318
+ self.layers = _get_clones(decoder_layer, num_layers)
319
+ self.num_layers = num_layers
320
+ self.return_intermediate = return_intermediate
321
+ # hack implementation for iterative bounding box refinement and two-stage Deformable DETR
322
+ self.bbox_embed = None
323
+ self.class_embed = None
324
+
325
+ def forward(self, tgt, reference_points, src, src_spatial_shapes, src_level_start_index, src_valid_ratios,
326
+ query_pos=None, src_padding_mask=None):
327
+ output = tgt
328
+
329
+ intermediate = []
330
+ intermediate_reference_points = []
331
+ for lid, layer in enumerate(self.layers):
332
+ if reference_points.shape[-1] == 4:
333
+ reference_points_input = reference_points[:, :, None] \
334
+ * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]
335
+ else:
336
+ assert reference_points.shape[-1] == 2
337
+ reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]
338
+ output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index, src_padding_mask)
339
+
340
+ # hack implementation for iterative bounding box refinement
341
+ if self.bbox_embed is not None:
342
+ tmp = self.bbox_embed[lid](output)
343
+ if reference_points.shape[-1] == 4:
344
+ new_reference_points = tmp + inverse_sigmoid(reference_points)
345
+ new_reference_points = new_reference_points.sigmoid()
346
+ else:
347
+ assert reference_points.shape[-1] == 2
348
+ new_reference_points = tmp
349
+ new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
350
+ new_reference_points = new_reference_points.sigmoid()
351
+ reference_points = new_reference_points.detach()
352
+
353
+ if self.return_intermediate:
354
+ intermediate.append(output)
355
+ intermediate_reference_points.append(reference_points)
356
+
357
+ if self.return_intermediate:
358
+ return torch.stack(intermediate), torch.stack(intermediate_reference_points)
359
+
360
+ return output, reference_points
361
+
362
+
363
+ def _get_clones(module, N):
364
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
365
+
366
+
367
+ def _get_activation_fn(activation):
368
+ """Return an activation function given a string"""
369
+ if activation == "relu":
370
+ return F.relu
371
+ if activation == "gelu":
372
+ return F.gelu
373
+ if activation == "glu":
374
+ return F.glu
375
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
376
+
377
+
378
+ def build_deforamble_transformer(args):
379
+ return DeformableTransformer(
380
+ d_model=args.hidden_dim,
381
+ nhead=args.nheads,
382
+ num_encoder_layers=args.enc_layers,
383
+ num_decoder_layers=args.dec_layers,
384
+ dim_feedforward=args.dim_feedforward,
385
+ dropout=args.dropout,
386
+ activation="relu",
387
+ return_intermediate_dec=True,
388
+ num_feature_levels=args.num_feature_levels,
389
+ dec_n_points=args.dec_n_points,
390
+ enc_n_points=args.enc_n_points,
391
+ two_stage=args.two_stage,
392
+ two_stage_num_proposals=args.num_queries)
393
+
394
+
Deformable-DETR/models/matcher.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Modules to compute the matching cost and solve the corresponding LSAP.
12
+ """
13
+ import torch
14
+ from scipy.optimize import linear_sum_assignment
15
+ from torch import nn
16
+
17
+ from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
18
+
19
+
20
+ class HungarianMatcher(nn.Module):
21
+ """This class computes an assignment between the targets and the predictions of the network
22
+
23
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general,
24
+ there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
25
+ while the others are un-matched (and thus treated as non-objects).
26
+ """
27
+
28
+ def __init__(self,
29
+ cost_class: float = 1,
30
+ cost_bbox: float = 1,
31
+ cost_giou: float = 1):
32
+ """Creates the matcher
33
+
34
+ Params:
35
+ cost_class: This is the relative weight of the classification error in the matching cost
36
+ cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
37
+ cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
38
+ """
39
+ super().__init__()
40
+ self.cost_class = cost_class
41
+ self.cost_bbox = cost_bbox
42
+ self.cost_giou = cost_giou
43
+ assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
44
+
45
+ def forward(self, outputs, targets):
46
+ """ Performs the matching
47
+
48
+ Params:
49
+ outputs: This is a dict that contains at least these entries:
50
+ "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
51
+ "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
52
+
53
+ targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
54
+ "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
55
+ objects in the target) containing the class labels
56
+ "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
57
+
58
+ Returns:
59
+ A list of size batch_size, containing tuples of (index_i, index_j) where:
60
+ - index_i is the indices of the selected predictions (in order)
61
+ - index_j is the indices of the corresponding selected targets (in order)
62
+ For each batch element, it holds:
63
+ len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
64
+ """
65
+ with torch.no_grad():
66
+ bs, num_queries = outputs["pred_logits"].shape[:2]
67
+
68
+ # We flatten to compute the cost matrices in a batch
69
+ out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid()
70
+ out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
71
+
72
+ # Also concat the target labels and boxes
73
+ tgt_ids = torch.cat([v["labels"] for v in targets])
74
+ tgt_bbox = torch.cat([v["boxes"] for v in targets])
75
+
76
+ # Compute the classification cost.
77
+ alpha = 0.25
78
+ gamma = 2.0
79
+ neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
80
+ pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
81
+ cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
82
+
83
+ # Compute the L1 cost between boxes
84
+ cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
85
+
86
+ # Compute the giou cost betwen boxes
87
+ cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),
88
+ box_cxcywh_to_xyxy(tgt_bbox))
89
+
90
+ # Final cost matrix
91
+ C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
92
+ C = C.view(bs, num_queries, -1).cpu()
93
+
94
+ sizes = [len(v["boxes"]) for v in targets]
95
+ indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
96
+ return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
97
+
98
+
99
+ def build_matcher(args):
100
+ return HungarianMatcher(cost_class=args.set_cost_class,
101
+ cost_bbox=args.set_cost_bbox,
102
+ cost_giou=args.set_cost_giou)
Deformable-DETR/models/ops/functions/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ from .ms_deform_attn_func import MSDeformAttnFunction
10
+
Deformable-DETR/models/ops/functions/ms_deform_attn_func.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ from __future__ import absolute_import
10
+ from __future__ import print_function
11
+ from __future__ import division
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch.autograd import Function
16
+ from torch.autograd.function import once_differentiable
17
+
18
+ import MultiScaleDeformableAttention as MSDA
19
+
20
+
21
+ class MSDeformAttnFunction(Function):
22
+ @staticmethod
23
+ def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
24
+ ctx.im2col_step = im2col_step
25
+ output = MSDA.ms_deform_attn_forward(
26
+ value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
27
+ ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
28
+ return output
29
+
30
+ @staticmethod
31
+ @once_differentiable
32
+ def backward(ctx, grad_output):
33
+ value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
34
+ grad_value, grad_sampling_loc, grad_attn_weight = \
35
+ MSDA.ms_deform_attn_backward(
36
+ value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
37
+
38
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
39
+
40
+
41
+ def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
42
+ # for debug and test only,
43
+ # need to use cuda version instead
44
+ N_, S_, M_, D_ = value.shape
45
+ _, Lq_, M_, L_, P_, _ = sampling_locations.shape
46
+ value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
47
+ sampling_grids = 2 * sampling_locations - 1
48
+ sampling_value_list = []
49
+ for lid_, (H_, W_) in enumerate(value_spatial_shapes):
50
+ # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
51
+ value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
52
+ # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
53
+ sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
54
+ # N_*M_, D_, Lq_, P_
55
+ sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
56
+ mode='bilinear', padding_mode='zeros', align_corners=False)
57
+ sampling_value_list.append(sampling_value_l_)
58
+ # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
59
+ attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
60
+ output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
61
+ return output.transpose(1, 2).contiguous()
Deformable-DETR/models/ops/make.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # ------------------------------------------------------------------------------------------------
3
+ # Deformable DETR
4
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ # ------------------------------------------------------------------------------------------------
7
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ # ------------------------------------------------------------------------------------------------
9
+
10
+ python setup.py build install
Deformable-DETR/models/ops/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ from .ms_deform_attn import MSDeformAttn
Deformable-DETR/models/ops/modules/ms_deform_attn.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ from __future__ import absolute_import
10
+ from __future__ import print_function
11
+ from __future__ import division
12
+
13
+ import warnings
14
+ import math
15
+
16
+ import torch
17
+ from torch import nn
18
+ import torch.nn.functional as F
19
+ from torch.nn.init import xavier_uniform_, constant_
20
+
21
+ from ..functions import MSDeformAttnFunction
22
+
23
+
24
+ def _is_power_of_2(n):
25
+ if (not isinstance(n, int)) or (n < 0):
26
+ raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
27
+ return (n & (n-1) == 0) and n != 0
28
+
29
+
30
+ class MSDeformAttn(nn.Module):
31
+ def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
32
+ """
33
+ Multi-Scale Deformable Attention Module
34
+ :param d_model hidden dimension
35
+ :param n_levels number of feature levels
36
+ :param n_heads number of attention heads
37
+ :param n_points number of sampling points per attention head per feature level
38
+ """
39
+ super().__init__()
40
+ if d_model % n_heads != 0:
41
+ raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
42
+ _d_per_head = d_model // n_heads
43
+ # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
44
+ if not _is_power_of_2(_d_per_head):
45
+ warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
46
+ "which is more efficient in our CUDA implementation.")
47
+
48
+ self.im2col_step = 64
49
+
50
+ self.d_model = d_model
51
+ self.n_levels = n_levels
52
+ self.n_heads = n_heads
53
+ self.n_points = n_points
54
+
55
+ self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
56
+ self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
57
+ self.value_proj = nn.Linear(d_model, d_model)
58
+ self.output_proj = nn.Linear(d_model, d_model)
59
+
60
+ self._reset_parameters()
61
+
62
+ def _reset_parameters(self):
63
+ constant_(self.sampling_offsets.weight.data, 0.)
64
+ thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
65
+ grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
66
+ grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
67
+ for i in range(self.n_points):
68
+ grid_init[:, :, i, :] *= i + 1
69
+ with torch.no_grad():
70
+ self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
71
+ constant_(self.attention_weights.weight.data, 0.)
72
+ constant_(self.attention_weights.bias.data, 0.)
73
+ xavier_uniform_(self.value_proj.weight.data)
74
+ constant_(self.value_proj.bias.data, 0.)
75
+ xavier_uniform_(self.output_proj.weight.data)
76
+ constant_(self.output_proj.bias.data, 0.)
77
+
78
+ def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
79
+ """
80
+ :param query (N, Length_{query}, C)
81
+ :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
82
+ or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
83
+ :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
84
+ :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
85
+ :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
86
+ :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
87
+
88
+ :return output (N, Length_{query}, C)
89
+ """
90
+ N, Len_q, _ = query.shape
91
+ N, Len_in, _ = input_flatten.shape
92
+ assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
93
+
94
+ value = self.value_proj(input_flatten)
95
+ if input_padding_mask is not None:
96
+ value = value.masked_fill(input_padding_mask[..., None], float(0))
97
+ value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
98
+ sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
99
+ attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
100
+ attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
101
+ # N, Len_q, n_heads, n_levels, n_points, 2
102
+ if reference_points.shape[-1] == 2:
103
+ offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
104
+ sampling_locations = reference_points[:, :, None, :, None, :] \
105
+ + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
106
+ elif reference_points.shape[-1] == 4:
107
+ sampling_locations = reference_points[:, :, None, :, None, :2] \
108
+ + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
109
+ else:
110
+ raise ValueError(
111
+ 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
112
+ output = MSDeformAttnFunction.apply(
113
+ value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
114
+ output = self.output_proj(output)
115
+ return output
Deformable-DETR/models/ops/setup.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ import os
10
+ import glob
11
+
12
+ import torch
13
+
14
+ from torch.utils.cpp_extension import CUDA_HOME
15
+ from torch.utils.cpp_extension import CppExtension
16
+ from torch.utils.cpp_extension import CUDAExtension
17
+
18
+ from setuptools import find_packages
19
+ from setuptools import setup
20
+
21
+ requirements = ["torch", "torchvision"]
22
+
23
+ def get_extensions():
24
+ this_dir = os.path.dirname(os.path.abspath(__file__))
25
+ extensions_dir = os.path.join(this_dir, "src")
26
+
27
+ main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
28
+ source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
29
+ source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
30
+
31
+ sources = main_file + source_cpu
32
+ extension = CppExtension
33
+ extra_compile_args = {"cxx": []}
34
+ define_macros = []
35
+
36
+ print("inside get_extensions")
37
+ print(CUDA_HOME)
38
+ if CUDA_HOME is not None and (torch.cuda.is_available() or ("TORCH_CUDA_ARCH_LIST" in os.environ) or torch.cuda.get_arch_list()):
39
+ extension = CUDAExtension
40
+ sources += source_cuda
41
+ define_macros += [("WITH_CUDA", None)]
42
+ extra_compile_args["nvcc"] = [
43
+ "-DCUDA_HAS_FP16=1",
44
+ "-D__CUDA_NO_HALF_OPERATORS__",
45
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
46
+ "-D__CUDA_NO_HALF2_OPERATORS__",
47
+ ]
48
+ else:
49
+ raise NotImplementedError('Cuda is not available')
50
+
51
+ sources = [os.path.join(extensions_dir, s) for s in sources]
52
+ include_dirs = [extensions_dir]
53
+ ext_modules = [
54
+ extension(
55
+ "MultiScaleDeformableAttention",
56
+ sources,
57
+ include_dirs=include_dirs,
58
+ define_macros=define_macros,
59
+ extra_compile_args=extra_compile_args,
60
+ )
61
+ ]
62
+ return ext_modules
63
+
64
+ setup(
65
+ name="MultiScaleDeformableAttention",
66
+ version="1.0",
67
+ author="Weijie Su",
68
+ url="https://github.com/fundamentalvision/Deformable-DETR",
69
+ description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
70
+ packages=find_packages(exclude=("configs", "tests",)),
71
+ ext_modules=get_extensions(),
72
+ cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
73
+ )
Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.cpp ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include <vector>
12
+
13
+ #include <ATen/ATen.h>
14
+ #include <ATen/cuda/CUDAContext.h>
15
+
16
+
17
+ at::Tensor
18
+ ms_deform_attn_cpu_forward(
19
+ const at::Tensor &value,
20
+ const at::Tensor &spatial_shapes,
21
+ const at::Tensor &level_start_index,
22
+ const at::Tensor &sampling_loc,
23
+ const at::Tensor &attn_weight,
24
+ const int im2col_step)
25
+ {
26
+ AT_ERROR("Not implement on cpu");
27
+ }
28
+
29
+ std::vector<at::Tensor>
30
+ ms_deform_attn_cpu_backward(
31
+ const at::Tensor &value,
32
+ const at::Tensor &spatial_shapes,
33
+ const at::Tensor &level_start_index,
34
+ const at::Tensor &sampling_loc,
35
+ const at::Tensor &attn_weight,
36
+ const at::Tensor &grad_output,
37
+ const int im2col_step)
38
+ {
39
+ AT_ERROR("Not implement on cpu");
40
+ }
41
+
Deformable-DETR/models/ops/src/cpu/ms_deform_attn_cpu.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+ #include <torch/extension.h>
13
+
14
+ at::Tensor
15
+ ms_deform_attn_cpu_forward(
16
+ const at::Tensor &value,
17
+ const at::Tensor &spatial_shapes,
18
+ const at::Tensor &level_start_index,
19
+ const at::Tensor &sampling_loc,
20
+ const at::Tensor &attn_weight,
21
+ const int im2col_step);
22
+
23
+ std::vector<at::Tensor>
24
+ ms_deform_attn_cpu_backward(
25
+ const at::Tensor &value,
26
+ const at::Tensor &spatial_shapes,
27
+ const at::Tensor &level_start_index,
28
+ const at::Tensor &sampling_loc,
29
+ const at::Tensor &attn_weight,
30
+ const at::Tensor &grad_output,
31
+ const int im2col_step);
32
+
33
+
Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.cu ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include <vector>
12
+ #include "cuda/ms_deform_im2col_cuda.cuh"
13
+
14
+ #include <ATen/ATen.h>
15
+ #include <ATen/cuda/CUDAContext.h>
16
+ #include <cuda.h>
17
+ #include <cuda_runtime.h>
18
+
19
+
20
+ at::Tensor ms_deform_attn_cuda_forward(
21
+ const at::Tensor &value,
22
+ const at::Tensor &spatial_shapes,
23
+ const at::Tensor &level_start_index,
24
+ const at::Tensor &sampling_loc,
25
+ const at::Tensor &attn_weight,
26
+ const int im2col_step)
27
+ {
28
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
29
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
30
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
31
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
32
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
33
+
34
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
35
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
36
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
37
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
38
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
39
+
40
+ const int batch = value.size(0);
41
+ const int spatial_size = value.size(1);
42
+ const int num_heads = value.size(2);
43
+ const int channels = value.size(3);
44
+
45
+ const int num_levels = spatial_shapes.size(0);
46
+
47
+ const int num_query = sampling_loc.size(1);
48
+ const int num_point = sampling_loc.size(4);
49
+
50
+ const int im2col_step_ = std::min(batch, im2col_step);
51
+
52
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
53
+
54
+ auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
55
+
56
+ const int batch_n = im2col_step_;
57
+ auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
58
+ auto per_value_size = spatial_size * num_heads * channels;
59
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
60
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
61
+ for (int n = 0; n < batch/im2col_step_; ++n)
62
+ {
63
+ auto columns = output_n.select(0, n);
64
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
65
+ ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
66
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
67
+ spatial_shapes.data<int64_t>(),
68
+ level_start_index.data<int64_t>(),
69
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
70
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
71
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
72
+ columns.data<scalar_t>());
73
+
74
+ }));
75
+ }
76
+
77
+ output = output.view({batch, num_query, num_heads*channels});
78
+
79
+ return output;
80
+ }
81
+
82
+
83
+ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
84
+ const at::Tensor &value,
85
+ const at::Tensor &spatial_shapes,
86
+ const at::Tensor &level_start_index,
87
+ const at::Tensor &sampling_loc,
88
+ const at::Tensor &attn_weight,
89
+ const at::Tensor &grad_output,
90
+ const int im2col_step)
91
+ {
92
+
93
+ AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
94
+ AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
95
+ AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
96
+ AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
97
+ AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
98
+ AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
99
+
100
+ AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
101
+ AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
102
+ AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
103
+ AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
104
+ AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
105
+ AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
106
+
107
+ const int batch = value.size(0);
108
+ const int spatial_size = value.size(1);
109
+ const int num_heads = value.size(2);
110
+ const int channels = value.size(3);
111
+
112
+ const int num_levels = spatial_shapes.size(0);
113
+
114
+ const int num_query = sampling_loc.size(1);
115
+ const int num_point = sampling_loc.size(4);
116
+
117
+ const int im2col_step_ = std::min(batch, im2col_step);
118
+
119
+ AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
120
+
121
+ auto grad_value = at::zeros_like(value);
122
+ auto grad_sampling_loc = at::zeros_like(sampling_loc);
123
+ auto grad_attn_weight = at::zeros_like(attn_weight);
124
+
125
+ const int batch_n = im2col_step_;
126
+ auto per_value_size = spatial_size * num_heads * channels;
127
+ auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
128
+ auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
129
+ auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
130
+
131
+ for (int n = 0; n < batch/im2col_step_; ++n)
132
+ {
133
+ auto grad_output_g = grad_output_n.select(0, n);
134
+ AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
135
+ ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
136
+ grad_output_g.data<scalar_t>(),
137
+ value.data<scalar_t>() + n * im2col_step_ * per_value_size,
138
+ spatial_shapes.data<int64_t>(),
139
+ level_start_index.data<int64_t>(),
140
+ sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
141
+ attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
142
+ batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
143
+ grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
144
+ grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
145
+ grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
146
+
147
+ }));
148
+ }
149
+
150
+ return {
151
+ grad_value, grad_sampling_loc, grad_attn_weight
152
+ };
153
+ }
Deformable-DETR/models/ops/src/cuda/ms_deform_attn_cuda.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+ #include <torch/extension.h>
13
+
14
+ at::Tensor ms_deform_attn_cuda_forward(
15
+ const at::Tensor &value,
16
+ const at::Tensor &spatial_shapes,
17
+ const at::Tensor &level_start_index,
18
+ const at::Tensor &sampling_loc,
19
+ const at::Tensor &attn_weight,
20
+ const int im2col_step);
21
+
22
+ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
23
+ const at::Tensor &value,
24
+ const at::Tensor &spatial_shapes,
25
+ const at::Tensor &level_start_index,
26
+ const at::Tensor &sampling_loc,
27
+ const at::Tensor &attn_weight,
28
+ const at::Tensor &grad_output,
29
+ const int im2col_step);
30
+
Deformable-DETR/models/ops/src/cuda/ms_deform_im2col_cuda.cuh ADDED
@@ -0,0 +1,1327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************
7
+ * Modified from DCN (https://github.com/msracver/Deformable-ConvNets)
8
+ * Copyright (c) 2018 Microsoft
9
+ **************************************************************************
10
+ */
11
+
12
+ #include <cstdio>
13
+ #include <algorithm>
14
+ #include <cstring>
15
+
16
+ #include <ATen/ATen.h>
17
+ #include <ATen/cuda/CUDAContext.h>
18
+
19
+ #include <THC/THCAtomics.cuh>
20
+
21
+ #define CUDA_KERNEL_LOOP(i, n) \
22
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
23
+ i < (n); \
24
+ i += blockDim.x * gridDim.x)
25
+
26
+ const int CUDA_NUM_THREADS = 1024;
27
+ inline int GET_BLOCKS(const int N, const int num_threads)
28
+ {
29
+ return (N + num_threads - 1) / num_threads;
30
+ }
31
+
32
+
33
+ template <typename scalar_t>
34
+ __device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
35
+ const int &height, const int &width, const int &nheads, const int &channels,
36
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c)
37
+ {
38
+ const int h_low = floor(h);
39
+ const int w_low = floor(w);
40
+ const int h_high = h_low + 1;
41
+ const int w_high = w_low + 1;
42
+
43
+ const scalar_t lh = h - h_low;
44
+ const scalar_t lw = w - w_low;
45
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
46
+
47
+ const int w_stride = nheads * channels;
48
+ const int h_stride = width * w_stride;
49
+ const int h_low_ptr_offset = h_low * h_stride;
50
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
51
+ const int w_low_ptr_offset = w_low * w_stride;
52
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
53
+ const int base_ptr = m * channels + c;
54
+
55
+ scalar_t v1 = 0;
56
+ if (h_low >= 0 && w_low >= 0)
57
+ {
58
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
59
+ v1 = bottom_data[ptr1];
60
+ }
61
+ scalar_t v2 = 0;
62
+ if (h_low >= 0 && w_high <= width - 1)
63
+ {
64
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
65
+ v2 = bottom_data[ptr2];
66
+ }
67
+ scalar_t v3 = 0;
68
+ if (h_high <= height - 1 && w_low >= 0)
69
+ {
70
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
71
+ v3 = bottom_data[ptr3];
72
+ }
73
+ scalar_t v4 = 0;
74
+ if (h_high <= height - 1 && w_high <= width - 1)
75
+ {
76
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
77
+ v4 = bottom_data[ptr4];
78
+ }
79
+
80
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
81
+
82
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
83
+ return val;
84
+ }
85
+
86
+
87
+ template <typename scalar_t>
88
+ __device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
89
+ const int &height, const int &width, const int &nheads, const int &channels,
90
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
91
+ const scalar_t &top_grad,
92
+ const scalar_t &attn_weight,
93
+ scalar_t* &grad_value,
94
+ scalar_t* grad_sampling_loc,
95
+ scalar_t* grad_attn_weight)
96
+ {
97
+ const int h_low = floor(h);
98
+ const int w_low = floor(w);
99
+ const int h_high = h_low + 1;
100
+ const int w_high = w_low + 1;
101
+
102
+ const scalar_t lh = h - h_low;
103
+ const scalar_t lw = w - w_low;
104
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
105
+
106
+ const int w_stride = nheads * channels;
107
+ const int h_stride = width * w_stride;
108
+ const int h_low_ptr_offset = h_low * h_stride;
109
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
110
+ const int w_low_ptr_offset = w_low * w_stride;
111
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
112
+ const int base_ptr = m * channels + c;
113
+
114
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
115
+ const scalar_t top_grad_value = top_grad * attn_weight;
116
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
117
+
118
+ scalar_t v1 = 0;
119
+ if (h_low >= 0 && w_low >= 0)
120
+ {
121
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
122
+ v1 = bottom_data[ptr1];
123
+ grad_h_weight -= hw * v1;
124
+ grad_w_weight -= hh * v1;
125
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
126
+ }
127
+ scalar_t v2 = 0;
128
+ if (h_low >= 0 && w_high <= width - 1)
129
+ {
130
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
131
+ v2 = bottom_data[ptr2];
132
+ grad_h_weight -= lw * v2;
133
+ grad_w_weight += hh * v2;
134
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
135
+ }
136
+ scalar_t v3 = 0;
137
+ if (h_high <= height - 1 && w_low >= 0)
138
+ {
139
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
140
+ v3 = bottom_data[ptr3];
141
+ grad_h_weight += hw * v3;
142
+ grad_w_weight -= lh * v3;
143
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
144
+ }
145
+ scalar_t v4 = 0;
146
+ if (h_high <= height - 1 && w_high <= width - 1)
147
+ {
148
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
149
+ v4 = bottom_data[ptr4];
150
+ grad_h_weight += lw * v4;
151
+ grad_w_weight += lh * v4;
152
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
153
+ }
154
+
155
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
156
+ *grad_attn_weight = top_grad * val;
157
+ *grad_sampling_loc = width * grad_w_weight * top_grad_value;
158
+ *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
159
+ }
160
+
161
+
162
+ template <typename scalar_t>
163
+ __device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
164
+ const int &height, const int &width, const int &nheads, const int &channels,
165
+ const scalar_t &h, const scalar_t &w, const int &m, const int &c,
166
+ const scalar_t &top_grad,
167
+ const scalar_t &attn_weight,
168
+ scalar_t* &grad_value,
169
+ scalar_t* grad_sampling_loc,
170
+ scalar_t* grad_attn_weight)
171
+ {
172
+ const int h_low = floor(h);
173
+ const int w_low = floor(w);
174
+ const int h_high = h_low + 1;
175
+ const int w_high = w_low + 1;
176
+
177
+ const scalar_t lh = h - h_low;
178
+ const scalar_t lw = w - w_low;
179
+ const scalar_t hh = 1 - lh, hw = 1 - lw;
180
+
181
+ const int w_stride = nheads * channels;
182
+ const int h_stride = width * w_stride;
183
+ const int h_low_ptr_offset = h_low * h_stride;
184
+ const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
185
+ const int w_low_ptr_offset = w_low * w_stride;
186
+ const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
187
+ const int base_ptr = m * channels + c;
188
+
189
+ const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
190
+ const scalar_t top_grad_value = top_grad * attn_weight;
191
+ scalar_t grad_h_weight = 0, grad_w_weight = 0;
192
+
193
+ scalar_t v1 = 0;
194
+ if (h_low >= 0 && w_low >= 0)
195
+ {
196
+ const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
197
+ v1 = bottom_data[ptr1];
198
+ grad_h_weight -= hw * v1;
199
+ grad_w_weight -= hh * v1;
200
+ atomicAdd(grad_value+ptr1, w1*top_grad_value);
201
+ }
202
+ scalar_t v2 = 0;
203
+ if (h_low >= 0 && w_high <= width - 1)
204
+ {
205
+ const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
206
+ v2 = bottom_data[ptr2];
207
+ grad_h_weight -= lw * v2;
208
+ grad_w_weight += hh * v2;
209
+ atomicAdd(grad_value+ptr2, w2*top_grad_value);
210
+ }
211
+ scalar_t v3 = 0;
212
+ if (h_high <= height - 1 && w_low >= 0)
213
+ {
214
+ const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
215
+ v3 = bottom_data[ptr3];
216
+ grad_h_weight += hw * v3;
217
+ grad_w_weight -= lh * v3;
218
+ atomicAdd(grad_value+ptr3, w3*top_grad_value);
219
+ }
220
+ scalar_t v4 = 0;
221
+ if (h_high <= height - 1 && w_high <= width - 1)
222
+ {
223
+ const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
224
+ v4 = bottom_data[ptr4];
225
+ grad_h_weight += lw * v4;
226
+ grad_w_weight += lh * v4;
227
+ atomicAdd(grad_value+ptr4, w4*top_grad_value);
228
+ }
229
+
230
+ const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
231
+ atomicAdd(grad_attn_weight, top_grad * val);
232
+ atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
233
+ atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
234
+ }
235
+
236
+
237
+ template <typename scalar_t>
238
+ __global__ void ms_deformable_im2col_gpu_kernel(const int n,
239
+ const scalar_t *data_value,
240
+ const int64_t *data_spatial_shapes,
241
+ const int64_t *data_level_start_index,
242
+ const scalar_t *data_sampling_loc,
243
+ const scalar_t *data_attn_weight,
244
+ const int batch_size,
245
+ const int spatial_size,
246
+ const int num_heads,
247
+ const int channels,
248
+ const int num_levels,
249
+ const int num_query,
250
+ const int num_point,
251
+ scalar_t *data_col)
252
+ {
253
+ CUDA_KERNEL_LOOP(index, n)
254
+ {
255
+ int _temp = index;
256
+ const int c_col = _temp % channels;
257
+ _temp /= channels;
258
+ const int sampling_index = _temp;
259
+ const int m_col = _temp % num_heads;
260
+ _temp /= num_heads;
261
+ const int q_col = _temp % num_query;
262
+ _temp /= num_query;
263
+ const int b_col = _temp;
264
+
265
+ scalar_t *data_col_ptr = data_col + index;
266
+ int data_weight_ptr = sampling_index * num_levels * num_point;
267
+ int data_loc_w_ptr = data_weight_ptr << 1;
268
+ const int qid_stride = num_heads * channels;
269
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
270
+ scalar_t col = 0;
271
+
272
+ for (int l_col=0; l_col < num_levels; ++l_col)
273
+ {
274
+ const int level_start_id = data_level_start_index[l_col];
275
+ const int spatial_h_ptr = l_col << 1;
276
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
277
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
278
+ const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
279
+ for (int p_col=0; p_col < num_point; ++p_col)
280
+ {
281
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
282
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
283
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
284
+
285
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
286
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
287
+
288
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
289
+ {
290
+ col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
291
+ }
292
+
293
+ data_weight_ptr += 1;
294
+ data_loc_w_ptr += 2;
295
+ }
296
+ }
297
+ *data_col_ptr = col;
298
+ }
299
+ }
300
+
301
+ template <typename scalar_t, unsigned int blockSize>
302
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
303
+ const scalar_t *grad_col,
304
+ const scalar_t *data_value,
305
+ const int64_t *data_spatial_shapes,
306
+ const int64_t *data_level_start_index,
307
+ const scalar_t *data_sampling_loc,
308
+ const scalar_t *data_attn_weight,
309
+ const int batch_size,
310
+ const int spatial_size,
311
+ const int num_heads,
312
+ const int channels,
313
+ const int num_levels,
314
+ const int num_query,
315
+ const int num_point,
316
+ scalar_t *grad_value,
317
+ scalar_t *grad_sampling_loc,
318
+ scalar_t *grad_attn_weight)
319
+ {
320
+ CUDA_KERNEL_LOOP(index, n)
321
+ {
322
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
323
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
324
+ unsigned int tid = threadIdx.x;
325
+ int _temp = index;
326
+ const int c_col = _temp % channels;
327
+ _temp /= channels;
328
+ const int sampling_index = _temp;
329
+ const int m_col = _temp % num_heads;
330
+ _temp /= num_heads;
331
+ const int q_col = _temp % num_query;
332
+ _temp /= num_query;
333
+ const int b_col = _temp;
334
+
335
+ const scalar_t top_grad = grad_col[index];
336
+
337
+ int data_weight_ptr = sampling_index * num_levels * num_point;
338
+ int data_loc_w_ptr = data_weight_ptr << 1;
339
+ const int grad_sampling_ptr = data_weight_ptr;
340
+ grad_sampling_loc += grad_sampling_ptr << 1;
341
+ grad_attn_weight += grad_sampling_ptr;
342
+ const int grad_weight_stride = 1;
343
+ const int grad_loc_stride = 2;
344
+ const int qid_stride = num_heads * channels;
345
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
346
+
347
+ for (int l_col=0; l_col < num_levels; ++l_col)
348
+ {
349
+ const int level_start_id = data_level_start_index[l_col];
350
+ const int spatial_h_ptr = l_col << 1;
351
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
352
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
353
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
354
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
355
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
356
+
357
+ for (int p_col=0; p_col < num_point; ++p_col)
358
+ {
359
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
360
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
361
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
362
+
363
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
364
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
365
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
366
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
367
+ *(cache_grad_attn_weight+threadIdx.x)=0;
368
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
369
+ {
370
+ ms_deform_attn_col2im_bilinear(
371
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
372
+ top_grad, weight, grad_value_ptr,
373
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
374
+ }
375
+
376
+ __syncthreads();
377
+ if (tid == 0)
378
+ {
379
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
380
+ int sid=2;
381
+ for (unsigned int tid = 1; tid < blockSize; ++tid)
382
+ {
383
+ _grad_w += cache_grad_sampling_loc[sid];
384
+ _grad_h += cache_grad_sampling_loc[sid + 1];
385
+ _grad_a += cache_grad_attn_weight[tid];
386
+ sid += 2;
387
+ }
388
+
389
+
390
+ *grad_sampling_loc = _grad_w;
391
+ *(grad_sampling_loc + 1) = _grad_h;
392
+ *grad_attn_weight = _grad_a;
393
+ }
394
+ __syncthreads();
395
+
396
+ data_weight_ptr += 1;
397
+ data_loc_w_ptr += 2;
398
+ grad_attn_weight += grad_weight_stride;
399
+ grad_sampling_loc += grad_loc_stride;
400
+ }
401
+ }
402
+ }
403
+ }
404
+
405
+
406
+ template <typename scalar_t, unsigned int blockSize>
407
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
408
+ const scalar_t *grad_col,
409
+ const scalar_t *data_value,
410
+ const int64_t *data_spatial_shapes,
411
+ const int64_t *data_level_start_index,
412
+ const scalar_t *data_sampling_loc,
413
+ const scalar_t *data_attn_weight,
414
+ const int batch_size,
415
+ const int spatial_size,
416
+ const int num_heads,
417
+ const int channels,
418
+ const int num_levels,
419
+ const int num_query,
420
+ const int num_point,
421
+ scalar_t *grad_value,
422
+ scalar_t *grad_sampling_loc,
423
+ scalar_t *grad_attn_weight)
424
+ {
425
+ CUDA_KERNEL_LOOP(index, n)
426
+ {
427
+ __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
428
+ __shared__ scalar_t cache_grad_attn_weight[blockSize];
429
+ unsigned int tid = threadIdx.x;
430
+ int _temp = index;
431
+ const int c_col = _temp % channels;
432
+ _temp /= channels;
433
+ const int sampling_index = _temp;
434
+ const int m_col = _temp % num_heads;
435
+ _temp /= num_heads;
436
+ const int q_col = _temp % num_query;
437
+ _temp /= num_query;
438
+ const int b_col = _temp;
439
+
440
+ const scalar_t top_grad = grad_col[index];
441
+
442
+ int data_weight_ptr = sampling_index * num_levels * num_point;
443
+ int data_loc_w_ptr = data_weight_ptr << 1;
444
+ const int grad_sampling_ptr = data_weight_ptr;
445
+ grad_sampling_loc += grad_sampling_ptr << 1;
446
+ grad_attn_weight += grad_sampling_ptr;
447
+ const int grad_weight_stride = 1;
448
+ const int grad_loc_stride = 2;
449
+ const int qid_stride = num_heads * channels;
450
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
451
+
452
+ for (int l_col=0; l_col < num_levels; ++l_col)
453
+ {
454
+ const int level_start_id = data_level_start_index[l_col];
455
+ const int spatial_h_ptr = l_col << 1;
456
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
457
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
458
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
459
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
460
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
461
+
462
+ for (int p_col=0; p_col < num_point; ++p_col)
463
+ {
464
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
465
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
466
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
467
+
468
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
469
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
470
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
471
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
472
+ *(cache_grad_attn_weight+threadIdx.x)=0;
473
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
474
+ {
475
+ ms_deform_attn_col2im_bilinear(
476
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
477
+ top_grad, weight, grad_value_ptr,
478
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
479
+ }
480
+
481
+ __syncthreads();
482
+
483
+ for (unsigned int s=blockSize/2; s>0; s>>=1)
484
+ {
485
+ if (tid < s) {
486
+ const unsigned int xid1 = tid << 1;
487
+ const unsigned int xid2 = (tid + s) << 1;
488
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
489
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
490
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
491
+ }
492
+ __syncthreads();
493
+ }
494
+
495
+ if (tid == 0)
496
+ {
497
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
498
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
499
+ *grad_attn_weight = cache_grad_attn_weight[0];
500
+ }
501
+ __syncthreads();
502
+
503
+ data_weight_ptr += 1;
504
+ data_loc_w_ptr += 2;
505
+ grad_attn_weight += grad_weight_stride;
506
+ grad_sampling_loc += grad_loc_stride;
507
+ }
508
+ }
509
+ }
510
+ }
511
+
512
+
513
+ template <typename scalar_t>
514
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
515
+ const scalar_t *grad_col,
516
+ const scalar_t *data_value,
517
+ const int64_t *data_spatial_shapes,
518
+ const int64_t *data_level_start_index,
519
+ const scalar_t *data_sampling_loc,
520
+ const scalar_t *data_attn_weight,
521
+ const int batch_size,
522
+ const int spatial_size,
523
+ const int num_heads,
524
+ const int channels,
525
+ const int num_levels,
526
+ const int num_query,
527
+ const int num_point,
528
+ scalar_t *grad_value,
529
+ scalar_t *grad_sampling_loc,
530
+ scalar_t *grad_attn_weight)
531
+ {
532
+ CUDA_KERNEL_LOOP(index, n)
533
+ {
534
+ extern __shared__ int _s[];
535
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
536
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
537
+ unsigned int tid = threadIdx.x;
538
+ int _temp = index;
539
+ const int c_col = _temp % channels;
540
+ _temp /= channels;
541
+ const int sampling_index = _temp;
542
+ const int m_col = _temp % num_heads;
543
+ _temp /= num_heads;
544
+ const int q_col = _temp % num_query;
545
+ _temp /= num_query;
546
+ const int b_col = _temp;
547
+
548
+ const scalar_t top_grad = grad_col[index];
549
+
550
+ int data_weight_ptr = sampling_index * num_levels * num_point;
551
+ int data_loc_w_ptr = data_weight_ptr << 1;
552
+ const int grad_sampling_ptr = data_weight_ptr;
553
+ grad_sampling_loc += grad_sampling_ptr << 1;
554
+ grad_attn_weight += grad_sampling_ptr;
555
+ const int grad_weight_stride = 1;
556
+ const int grad_loc_stride = 2;
557
+ const int qid_stride = num_heads * channels;
558
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
559
+
560
+ for (int l_col=0; l_col < num_levels; ++l_col)
561
+ {
562
+ const int level_start_id = data_level_start_index[l_col];
563
+ const int spatial_h_ptr = l_col << 1;
564
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
565
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
566
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
567
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
568
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
569
+
570
+ for (int p_col=0; p_col < num_point; ++p_col)
571
+ {
572
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
573
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
574
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
575
+
576
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
577
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
578
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
579
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
580
+ *(cache_grad_attn_weight+threadIdx.x)=0;
581
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
582
+ {
583
+ ms_deform_attn_col2im_bilinear(
584
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
585
+ top_grad, weight, grad_value_ptr,
586
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
587
+ }
588
+
589
+ __syncthreads();
590
+ if (tid == 0)
591
+ {
592
+ scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
593
+ int sid=2;
594
+ for (unsigned int tid = 1; tid < blockDim.x; ++tid)
595
+ {
596
+ _grad_w += cache_grad_sampling_loc[sid];
597
+ _grad_h += cache_grad_sampling_loc[sid + 1];
598
+ _grad_a += cache_grad_attn_weight[tid];
599
+ sid += 2;
600
+ }
601
+
602
+
603
+ *grad_sampling_loc = _grad_w;
604
+ *(grad_sampling_loc + 1) = _grad_h;
605
+ *grad_attn_weight = _grad_a;
606
+ }
607
+ __syncthreads();
608
+
609
+ data_weight_ptr += 1;
610
+ data_loc_w_ptr += 2;
611
+ grad_attn_weight += grad_weight_stride;
612
+ grad_sampling_loc += grad_loc_stride;
613
+ }
614
+ }
615
+ }
616
+ }
617
+
618
+ template <typename scalar_t>
619
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
620
+ const scalar_t *grad_col,
621
+ const scalar_t *data_value,
622
+ const int64_t *data_spatial_shapes,
623
+ const int64_t *data_level_start_index,
624
+ const scalar_t *data_sampling_loc,
625
+ const scalar_t *data_attn_weight,
626
+ const int batch_size,
627
+ const int spatial_size,
628
+ const int num_heads,
629
+ const int channels,
630
+ const int num_levels,
631
+ const int num_query,
632
+ const int num_point,
633
+ scalar_t *grad_value,
634
+ scalar_t *grad_sampling_loc,
635
+ scalar_t *grad_attn_weight)
636
+ {
637
+ CUDA_KERNEL_LOOP(index, n)
638
+ {
639
+ extern __shared__ int _s[];
640
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
641
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
642
+ unsigned int tid = threadIdx.x;
643
+ int _temp = index;
644
+ const int c_col = _temp % channels;
645
+ _temp /= channels;
646
+ const int sampling_index = _temp;
647
+ const int m_col = _temp % num_heads;
648
+ _temp /= num_heads;
649
+ const int q_col = _temp % num_query;
650
+ _temp /= num_query;
651
+ const int b_col = _temp;
652
+
653
+ const scalar_t top_grad = grad_col[index];
654
+
655
+ int data_weight_ptr = sampling_index * num_levels * num_point;
656
+ int data_loc_w_ptr = data_weight_ptr << 1;
657
+ const int grad_sampling_ptr = data_weight_ptr;
658
+ grad_sampling_loc += grad_sampling_ptr << 1;
659
+ grad_attn_weight += grad_sampling_ptr;
660
+ const int grad_weight_stride = 1;
661
+ const int grad_loc_stride = 2;
662
+ const int qid_stride = num_heads * channels;
663
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
664
+
665
+ for (int l_col=0; l_col < num_levels; ++l_col)
666
+ {
667
+ const int level_start_id = data_level_start_index[l_col];
668
+ const int spatial_h_ptr = l_col << 1;
669
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
670
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
671
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
672
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
673
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
674
+
675
+ for (int p_col=0; p_col < num_point; ++p_col)
676
+ {
677
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
678
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
679
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
680
+
681
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
682
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
683
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
684
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
685
+ *(cache_grad_attn_weight+threadIdx.x)=0;
686
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
687
+ {
688
+ ms_deform_attn_col2im_bilinear(
689
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
690
+ top_grad, weight, grad_value_ptr,
691
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
692
+ }
693
+
694
+ __syncthreads();
695
+
696
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
697
+ {
698
+ if (tid < s) {
699
+ const unsigned int xid1 = tid << 1;
700
+ const unsigned int xid2 = (tid + s) << 1;
701
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
702
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
703
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
704
+ if (tid + (s << 1) < spre)
705
+ {
706
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
707
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
708
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
709
+ }
710
+ }
711
+ __syncthreads();
712
+ }
713
+
714
+ if (tid == 0)
715
+ {
716
+ *grad_sampling_loc = cache_grad_sampling_loc[0];
717
+ *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
718
+ *grad_attn_weight = cache_grad_attn_weight[0];
719
+ }
720
+ __syncthreads();
721
+
722
+ data_weight_ptr += 1;
723
+ data_loc_w_ptr += 2;
724
+ grad_attn_weight += grad_weight_stride;
725
+ grad_sampling_loc += grad_loc_stride;
726
+ }
727
+ }
728
+ }
729
+ }
730
+
731
+ template <typename scalar_t>
732
+ __global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
733
+ const scalar_t *grad_col,
734
+ const scalar_t *data_value,
735
+ const int64_t *data_spatial_shapes,
736
+ const int64_t *data_level_start_index,
737
+ const scalar_t *data_sampling_loc,
738
+ const scalar_t *data_attn_weight,
739
+ const int batch_size,
740
+ const int spatial_size,
741
+ const int num_heads,
742
+ const int channels,
743
+ const int num_levels,
744
+ const int num_query,
745
+ const int num_point,
746
+ scalar_t *grad_value,
747
+ scalar_t *grad_sampling_loc,
748
+ scalar_t *grad_attn_weight)
749
+ {
750
+ CUDA_KERNEL_LOOP(index, n)
751
+ {
752
+ extern __shared__ int _s[];
753
+ scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
754
+ scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
755
+ unsigned int tid = threadIdx.x;
756
+ int _temp = index;
757
+ const int c_col = _temp % channels;
758
+ _temp /= channels;
759
+ const int sampling_index = _temp;
760
+ const int m_col = _temp % num_heads;
761
+ _temp /= num_heads;
762
+ const int q_col = _temp % num_query;
763
+ _temp /= num_query;
764
+ const int b_col = _temp;
765
+
766
+ const scalar_t top_grad = grad_col[index];
767
+
768
+ int data_weight_ptr = sampling_index * num_levels * num_point;
769
+ int data_loc_w_ptr = data_weight_ptr << 1;
770
+ const int grad_sampling_ptr = data_weight_ptr;
771
+ grad_sampling_loc += grad_sampling_ptr << 1;
772
+ grad_attn_weight += grad_sampling_ptr;
773
+ const int grad_weight_stride = 1;
774
+ const int grad_loc_stride = 2;
775
+ const int qid_stride = num_heads * channels;
776
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
777
+
778
+ for (int l_col=0; l_col < num_levels; ++l_col)
779
+ {
780
+ const int level_start_id = data_level_start_index[l_col];
781
+ const int spatial_h_ptr = l_col << 1;
782
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
783
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
784
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
785
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
786
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
787
+
788
+ for (int p_col=0; p_col < num_point; ++p_col)
789
+ {
790
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
791
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
792
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
793
+
794
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
795
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
796
+ *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
797
+ *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
798
+ *(cache_grad_attn_weight+threadIdx.x)=0;
799
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
800
+ {
801
+ ms_deform_attn_col2im_bilinear(
802
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
803
+ top_grad, weight, grad_value_ptr,
804
+ cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
805
+ }
806
+
807
+ __syncthreads();
808
+
809
+ for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
810
+ {
811
+ if (tid < s) {
812
+ const unsigned int xid1 = tid << 1;
813
+ const unsigned int xid2 = (tid + s) << 1;
814
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
815
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
816
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
817
+ if (tid + (s << 1) < spre)
818
+ {
819
+ cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
820
+ cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
821
+ cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
822
+ }
823
+ }
824
+ __syncthreads();
825
+ }
826
+
827
+ if (tid == 0)
828
+ {
829
+ atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
830
+ atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
831
+ atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
832
+ }
833
+ __syncthreads();
834
+
835
+ data_weight_ptr += 1;
836
+ data_loc_w_ptr += 2;
837
+ grad_attn_weight += grad_weight_stride;
838
+ grad_sampling_loc += grad_loc_stride;
839
+ }
840
+ }
841
+ }
842
+ }
843
+
844
+
845
+ template <typename scalar_t>
846
+ __global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
847
+ const scalar_t *grad_col,
848
+ const scalar_t *data_value,
849
+ const int64_t *data_spatial_shapes,
850
+ const int64_t *data_level_start_index,
851
+ const scalar_t *data_sampling_loc,
852
+ const scalar_t *data_attn_weight,
853
+ const int batch_size,
854
+ const int spatial_size,
855
+ const int num_heads,
856
+ const int channels,
857
+ const int num_levels,
858
+ const int num_query,
859
+ const int num_point,
860
+ scalar_t *grad_value,
861
+ scalar_t *grad_sampling_loc,
862
+ scalar_t *grad_attn_weight)
863
+ {
864
+ CUDA_KERNEL_LOOP(index, n)
865
+ {
866
+ int _temp = index;
867
+ const int c_col = _temp % channels;
868
+ _temp /= channels;
869
+ const int sampling_index = _temp;
870
+ const int m_col = _temp % num_heads;
871
+ _temp /= num_heads;
872
+ const int q_col = _temp % num_query;
873
+ _temp /= num_query;
874
+ const int b_col = _temp;
875
+
876
+ const scalar_t top_grad = grad_col[index];
877
+
878
+ int data_weight_ptr = sampling_index * num_levels * num_point;
879
+ int data_loc_w_ptr = data_weight_ptr << 1;
880
+ const int grad_sampling_ptr = data_weight_ptr;
881
+ grad_sampling_loc += grad_sampling_ptr << 1;
882
+ grad_attn_weight += grad_sampling_ptr;
883
+ const int grad_weight_stride = 1;
884
+ const int grad_loc_stride = 2;
885
+ const int qid_stride = num_heads * channels;
886
+ const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
887
+
888
+ for (int l_col=0; l_col < num_levels; ++l_col)
889
+ {
890
+ const int level_start_id = data_level_start_index[l_col];
891
+ const int spatial_h_ptr = l_col << 1;
892
+ const int spatial_h = data_spatial_shapes[spatial_h_ptr];
893
+ const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
894
+ const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
895
+ const scalar_t *data_value_ptr = data_value + value_ptr_offset;
896
+ scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
897
+
898
+ for (int p_col=0; p_col < num_point; ++p_col)
899
+ {
900
+ const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
901
+ const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
902
+ const scalar_t weight = data_attn_weight[data_weight_ptr];
903
+
904
+ const scalar_t h_im = loc_h * spatial_h - 0.5;
905
+ const scalar_t w_im = loc_w * spatial_w - 0.5;
906
+ if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
907
+ {
908
+ ms_deform_attn_col2im_bilinear_gm(
909
+ data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
910
+ top_grad, weight, grad_value_ptr,
911
+ grad_sampling_loc, grad_attn_weight);
912
+ }
913
+ data_weight_ptr += 1;
914
+ data_loc_w_ptr += 2;
915
+ grad_attn_weight += grad_weight_stride;
916
+ grad_sampling_loc += grad_loc_stride;
917
+ }
918
+ }
919
+ }
920
+ }
921
+
922
+
923
+ template <typename scalar_t>
924
+ void ms_deformable_im2col_cuda(cudaStream_t stream,
925
+ const scalar_t* data_value,
926
+ const int64_t* data_spatial_shapes,
927
+ const int64_t* data_level_start_index,
928
+ const scalar_t* data_sampling_loc,
929
+ const scalar_t* data_attn_weight,
930
+ const int batch_size,
931
+ const int spatial_size,
932
+ const int num_heads,
933
+ const int channels,
934
+ const int num_levels,
935
+ const int num_query,
936
+ const int num_point,
937
+ scalar_t* data_col)
938
+ {
939
+ const int num_kernels = batch_size * num_query * num_heads * channels;
940
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
941
+ const int num_threads = CUDA_NUM_THREADS;
942
+ ms_deformable_im2col_gpu_kernel<scalar_t>
943
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
944
+ 0, stream>>>(
945
+ num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
946
+ batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
947
+
948
+ cudaError_t err = cudaGetLastError();
949
+ if (err != cudaSuccess)
950
+ {
951
+ printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
952
+ }
953
+
954
+ }
955
+
956
+ template <typename scalar_t>
957
+ void ms_deformable_col2im_cuda(cudaStream_t stream,
958
+ const scalar_t* grad_col,
959
+ const scalar_t* data_value,
960
+ const int64_t * data_spatial_shapes,
961
+ const int64_t * data_level_start_index,
962
+ const scalar_t * data_sampling_loc,
963
+ const scalar_t * data_attn_weight,
964
+ const int batch_size,
965
+ const int spatial_size,
966
+ const int num_heads,
967
+ const int channels,
968
+ const int num_levels,
969
+ const int num_query,
970
+ const int num_point,
971
+ scalar_t* grad_value,
972
+ scalar_t* grad_sampling_loc,
973
+ scalar_t* grad_attn_weight)
974
+ {
975
+ const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
976
+ const int num_kernels = batch_size * num_query * num_heads * channels;
977
+ const int num_actual_kernels = batch_size * num_query * num_heads * channels;
978
+ if (channels > 1024)
979
+ {
980
+ if ((channels & 1023) == 0)
981
+ {
982
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
983
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
984
+ num_threads*3*sizeof(scalar_t), stream>>>(
985
+ num_kernels,
986
+ grad_col,
987
+ data_value,
988
+ data_spatial_shapes,
989
+ data_level_start_index,
990
+ data_sampling_loc,
991
+ data_attn_weight,
992
+ batch_size,
993
+ spatial_size,
994
+ num_heads,
995
+ channels,
996
+ num_levels,
997
+ num_query,
998
+ num_point,
999
+ grad_value,
1000
+ grad_sampling_loc,
1001
+ grad_attn_weight);
1002
+ }
1003
+ else
1004
+ {
1005
+ ms_deformable_col2im_gpu_kernel_gm<scalar_t>
1006
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1007
+ 0, stream>>>(
1008
+ num_kernels,
1009
+ grad_col,
1010
+ data_value,
1011
+ data_spatial_shapes,
1012
+ data_level_start_index,
1013
+ data_sampling_loc,
1014
+ data_attn_weight,
1015
+ batch_size,
1016
+ spatial_size,
1017
+ num_heads,
1018
+ channels,
1019
+ num_levels,
1020
+ num_query,
1021
+ num_point,
1022
+ grad_value,
1023
+ grad_sampling_loc,
1024
+ grad_attn_weight);
1025
+ }
1026
+ }
1027
+ else{
1028
+ switch(channels)
1029
+ {
1030
+ case 1:
1031
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>
1032
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1033
+ 0, stream>>>(
1034
+ num_kernels,
1035
+ grad_col,
1036
+ data_value,
1037
+ data_spatial_shapes,
1038
+ data_level_start_index,
1039
+ data_sampling_loc,
1040
+ data_attn_weight,
1041
+ batch_size,
1042
+ spatial_size,
1043
+ num_heads,
1044
+ channels,
1045
+ num_levels,
1046
+ num_query,
1047
+ num_point,
1048
+ grad_value,
1049
+ grad_sampling_loc,
1050
+ grad_attn_weight);
1051
+ break;
1052
+ case 2:
1053
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>
1054
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1055
+ 0, stream>>>(
1056
+ num_kernels,
1057
+ grad_col,
1058
+ data_value,
1059
+ data_spatial_shapes,
1060
+ data_level_start_index,
1061
+ data_sampling_loc,
1062
+ data_attn_weight,
1063
+ batch_size,
1064
+ spatial_size,
1065
+ num_heads,
1066
+ channels,
1067
+ num_levels,
1068
+ num_query,
1069
+ num_point,
1070
+ grad_value,
1071
+ grad_sampling_loc,
1072
+ grad_attn_weight);
1073
+ break;
1074
+ case 4:
1075
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>
1076
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1077
+ 0, stream>>>(
1078
+ num_kernels,
1079
+ grad_col,
1080
+ data_value,
1081
+ data_spatial_shapes,
1082
+ data_level_start_index,
1083
+ data_sampling_loc,
1084
+ data_attn_weight,
1085
+ batch_size,
1086
+ spatial_size,
1087
+ num_heads,
1088
+ channels,
1089
+ num_levels,
1090
+ num_query,
1091
+ num_point,
1092
+ grad_value,
1093
+ grad_sampling_loc,
1094
+ grad_attn_weight);
1095
+ break;
1096
+ case 8:
1097
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>
1098
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1099
+ 0, stream>>>(
1100
+ num_kernels,
1101
+ grad_col,
1102
+ data_value,
1103
+ data_spatial_shapes,
1104
+ data_level_start_index,
1105
+ data_sampling_loc,
1106
+ data_attn_weight,
1107
+ batch_size,
1108
+ spatial_size,
1109
+ num_heads,
1110
+ channels,
1111
+ num_levels,
1112
+ num_query,
1113
+ num_point,
1114
+ grad_value,
1115
+ grad_sampling_loc,
1116
+ grad_attn_weight);
1117
+ break;
1118
+ case 16:
1119
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>
1120
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1121
+ 0, stream>>>(
1122
+ num_kernels,
1123
+ grad_col,
1124
+ data_value,
1125
+ data_spatial_shapes,
1126
+ data_level_start_index,
1127
+ data_sampling_loc,
1128
+ data_attn_weight,
1129
+ batch_size,
1130
+ spatial_size,
1131
+ num_heads,
1132
+ channels,
1133
+ num_levels,
1134
+ num_query,
1135
+ num_point,
1136
+ grad_value,
1137
+ grad_sampling_loc,
1138
+ grad_attn_weight);
1139
+ break;
1140
+ case 32:
1141
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>
1142
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1143
+ 0, stream>>>(
1144
+ num_kernels,
1145
+ grad_col,
1146
+ data_value,
1147
+ data_spatial_shapes,
1148
+ data_level_start_index,
1149
+ data_sampling_loc,
1150
+ data_attn_weight,
1151
+ batch_size,
1152
+ spatial_size,
1153
+ num_heads,
1154
+ channels,
1155
+ num_levels,
1156
+ num_query,
1157
+ num_point,
1158
+ grad_value,
1159
+ grad_sampling_loc,
1160
+ grad_attn_weight);
1161
+ break;
1162
+ case 64:
1163
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>
1164
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1165
+ 0, stream>>>(
1166
+ num_kernels,
1167
+ grad_col,
1168
+ data_value,
1169
+ data_spatial_shapes,
1170
+ data_level_start_index,
1171
+ data_sampling_loc,
1172
+ data_attn_weight,
1173
+ batch_size,
1174
+ spatial_size,
1175
+ num_heads,
1176
+ channels,
1177
+ num_levels,
1178
+ num_query,
1179
+ num_point,
1180
+ grad_value,
1181
+ grad_sampling_loc,
1182
+ grad_attn_weight);
1183
+ break;
1184
+ case 128:
1185
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>
1186
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1187
+ 0, stream>>>(
1188
+ num_kernels,
1189
+ grad_col,
1190
+ data_value,
1191
+ data_spatial_shapes,
1192
+ data_level_start_index,
1193
+ data_sampling_loc,
1194
+ data_attn_weight,
1195
+ batch_size,
1196
+ spatial_size,
1197
+ num_heads,
1198
+ channels,
1199
+ num_levels,
1200
+ num_query,
1201
+ num_point,
1202
+ grad_value,
1203
+ grad_sampling_loc,
1204
+ grad_attn_weight);
1205
+ break;
1206
+ case 256:
1207
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>
1208
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1209
+ 0, stream>>>(
1210
+ num_kernels,
1211
+ grad_col,
1212
+ data_value,
1213
+ data_spatial_shapes,
1214
+ data_level_start_index,
1215
+ data_sampling_loc,
1216
+ data_attn_weight,
1217
+ batch_size,
1218
+ spatial_size,
1219
+ num_heads,
1220
+ channels,
1221
+ num_levels,
1222
+ num_query,
1223
+ num_point,
1224
+ grad_value,
1225
+ grad_sampling_loc,
1226
+ grad_attn_weight);
1227
+ break;
1228
+ case 512:
1229
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>
1230
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1231
+ 0, stream>>>(
1232
+ num_kernels,
1233
+ grad_col,
1234
+ data_value,
1235
+ data_spatial_shapes,
1236
+ data_level_start_index,
1237
+ data_sampling_loc,
1238
+ data_attn_weight,
1239
+ batch_size,
1240
+ spatial_size,
1241
+ num_heads,
1242
+ channels,
1243
+ num_levels,
1244
+ num_query,
1245
+ num_point,
1246
+ grad_value,
1247
+ grad_sampling_loc,
1248
+ grad_attn_weight);
1249
+ break;
1250
+ case 1024:
1251
+ ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>
1252
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1253
+ 0, stream>>>(
1254
+ num_kernels,
1255
+ grad_col,
1256
+ data_value,
1257
+ data_spatial_shapes,
1258
+ data_level_start_index,
1259
+ data_sampling_loc,
1260
+ data_attn_weight,
1261
+ batch_size,
1262
+ spatial_size,
1263
+ num_heads,
1264
+ channels,
1265
+ num_levels,
1266
+ num_query,
1267
+ num_point,
1268
+ grad_value,
1269
+ grad_sampling_loc,
1270
+ grad_attn_weight);
1271
+ break;
1272
+ default:
1273
+ if (channels < 64)
1274
+ {
1275
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
1276
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1277
+ num_threads*3*sizeof(scalar_t), stream>>>(
1278
+ num_kernels,
1279
+ grad_col,
1280
+ data_value,
1281
+ data_spatial_shapes,
1282
+ data_level_start_index,
1283
+ data_sampling_loc,
1284
+ data_attn_weight,
1285
+ batch_size,
1286
+ spatial_size,
1287
+ num_heads,
1288
+ channels,
1289
+ num_levels,
1290
+ num_query,
1291
+ num_point,
1292
+ grad_value,
1293
+ grad_sampling_loc,
1294
+ grad_attn_weight);
1295
+ }
1296
+ else
1297
+ {
1298
+ ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
1299
+ <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
1300
+ num_threads*3*sizeof(scalar_t), stream>>>(
1301
+ num_kernels,
1302
+ grad_col,
1303
+ data_value,
1304
+ data_spatial_shapes,
1305
+ data_level_start_index,
1306
+ data_sampling_loc,
1307
+ data_attn_weight,
1308
+ batch_size,
1309
+ spatial_size,
1310
+ num_heads,
1311
+ channels,
1312
+ num_levels,
1313
+ num_query,
1314
+ num_point,
1315
+ grad_value,
1316
+ grad_sampling_loc,
1317
+ grad_attn_weight);
1318
+ }
1319
+ }
1320
+ }
1321
+ cudaError_t err = cudaGetLastError();
1322
+ if (err != cudaSuccess)
1323
+ {
1324
+ printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
1325
+ }
1326
+
1327
+ }
Deformable-DETR/models/ops/src/ms_deform_attn.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include "cpu/ms_deform_attn_cpu.h"
14
+
15
+ #ifdef WITH_CUDA
16
+ #include "cuda/ms_deform_attn_cuda.h"
17
+ #endif
18
+
19
+
20
+ at::Tensor
21
+ ms_deform_attn_forward(
22
+ const at::Tensor &value,
23
+ const at::Tensor &spatial_shapes,
24
+ const at::Tensor &level_start_index,
25
+ const at::Tensor &sampling_loc,
26
+ const at::Tensor &attn_weight,
27
+ const int im2col_step)
28
+ {
29
+ if (value.type().is_cuda())
30
+ {
31
+ #ifdef WITH_CUDA
32
+ return ms_deform_attn_cuda_forward(
33
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
34
+ #else
35
+ AT_ERROR("Not compiled with GPU support");
36
+ #endif
37
+ }
38
+ AT_ERROR("Not implemented on the CPU");
39
+ }
40
+
41
+ std::vector<at::Tensor>
42
+ ms_deform_attn_backward(
43
+ const at::Tensor &value,
44
+ const at::Tensor &spatial_shapes,
45
+ const at::Tensor &level_start_index,
46
+ const at::Tensor &sampling_loc,
47
+ const at::Tensor &attn_weight,
48
+ const at::Tensor &grad_output,
49
+ const int im2col_step)
50
+ {
51
+ if (value.type().is_cuda())
52
+ {
53
+ #ifdef WITH_CUDA
54
+ return ms_deform_attn_cuda_backward(
55
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
56
+ #else
57
+ AT_ERROR("Not compiled with GPU support");
58
+ #endif
59
+ }
60
+ AT_ERROR("Not implemented on the CPU");
61
+ }
62
+
Deformable-DETR/models/ops/src/vision.cpp ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ **************************************************************************************************
3
+ * Deformable DETR
4
+ * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ **************************************************************************************************
7
+ * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
+ **************************************************************************************************
9
+ */
10
+
11
+ #include "ms_deform_attn.h"
12
+
13
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
14
+ m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
15
+ m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
16
+ }
Deformable-DETR/models/ops/test.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7
+ # ------------------------------------------------------------------------------------------------
8
+
9
+ from __future__ import absolute_import
10
+ from __future__ import print_function
11
+ from __future__ import division
12
+
13
+ import time
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.autograd import gradcheck
17
+
18
+ from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch
19
+
20
+
21
+ N, M, D = 1, 2, 2
22
+ Lq, L, P = 2, 2, 2
23
+ shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
24
+ level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))
25
+ S = sum([(H*W).item() for H, W in shapes])
26
+
27
+
28
+ torch.manual_seed(3)
29
+
30
+
31
+ @torch.no_grad()
32
+ def check_forward_equal_with_pytorch_double():
33
+ value = torch.rand(N, S, M, D).cuda() * 0.01
34
+ sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
35
+ attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
36
+ attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
37
+ im2col_step = 2
38
+ output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
39
+ output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
40
+ fwdok = torch.allclose(output_cuda, output_pytorch)
41
+ max_abs_err = (output_cuda - output_pytorch).abs().max()
42
+ max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
43
+
44
+ print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
45
+
46
+
47
+ @torch.no_grad()
48
+ def check_forward_equal_with_pytorch_float():
49
+ value = torch.rand(N, S, M, D).cuda() * 0.01
50
+ sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
51
+ attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
52
+ attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
53
+ im2col_step = 2
54
+ output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
55
+ output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
56
+ fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
57
+ max_abs_err = (output_cuda - output_pytorch).abs().max()
58
+ max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
59
+
60
+ print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
61
+
62
+
63
+ def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
64
+
65
+ value = torch.rand(N, S, M, channels).cuda() * 0.01
66
+ sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
67
+ attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
68
+ attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
69
+ im2col_step = 2
70
+ func = MSDeformAttnFunction.apply
71
+
72
+ value.requires_grad = grad_value
73
+ sampling_locations.requires_grad = grad_sampling_loc
74
+ attention_weights.requires_grad = grad_attn_weight
75
+
76
+ gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
77
+
78
+ print(f'* {gradok} check_gradient_numerical(D={channels})')
79
+
80
+
81
+ if __name__ == '__main__':
82
+ check_forward_equal_with_pytorch_double()
83
+ check_forward_equal_with_pytorch_float()
84
+
85
+ for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
86
+ check_gradient_numerical(channels, True, True, True)
87
+
88
+
89
+
Deformable-DETR/models/position_encoding.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Various positional encodings for the transformer.
12
+ """
13
+ import math
14
+ import torch
15
+ from torch import nn
16
+
17
+ from util.misc import NestedTensor
18
+
19
+
20
+ class PositionEmbeddingSine(nn.Module):
21
+ """
22
+ This is a more standard version of the position embedding, very similar to the one
23
+ used by the Attention is all you need paper, generalized to work on images.
24
+ """
25
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
26
+ super().__init__()
27
+ self.num_pos_feats = num_pos_feats
28
+ self.temperature = temperature
29
+ self.normalize = normalize
30
+ if scale is not None and normalize is False:
31
+ raise ValueError("normalize should be True if scale is passed")
32
+ if scale is None:
33
+ scale = 2 * math.pi
34
+ self.scale = scale
35
+
36
+ def forward(self, tensor_list: NestedTensor):
37
+ x = tensor_list.tensors
38
+ mask = tensor_list.mask
39
+ assert mask is not None
40
+ not_mask = ~mask
41
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
42
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
43
+ if self.normalize:
44
+ eps = 1e-6
45
+ y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
46
+ x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
47
+
48
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
49
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
50
+
51
+ pos_x = x_embed[:, :, :, None] / dim_t
52
+ pos_y = y_embed[:, :, :, None] / dim_t
53
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
54
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
55
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
56
+ return pos
57
+
58
+
59
+ class PositionEmbeddingLearned(nn.Module):
60
+ """
61
+ Absolute pos embedding, learned.
62
+ """
63
+ def __init__(self, num_pos_feats=256):
64
+ super().__init__()
65
+ self.row_embed = nn.Embedding(50, num_pos_feats)
66
+ self.col_embed = nn.Embedding(50, num_pos_feats)
67
+ self.reset_parameters()
68
+
69
+ def reset_parameters(self):
70
+ nn.init.uniform_(self.row_embed.weight)
71
+ nn.init.uniform_(self.col_embed.weight)
72
+
73
+ def forward(self, tensor_list: NestedTensor):
74
+ x = tensor_list.tensors
75
+ h, w = x.shape[-2:]
76
+ i = torch.arange(w, device=x.device)
77
+ j = torch.arange(h, device=x.device)
78
+ x_emb = self.col_embed(i)
79
+ y_emb = self.row_embed(j)
80
+ pos = torch.cat([
81
+ x_emb.unsqueeze(0).repeat(h, 1, 1),
82
+ y_emb.unsqueeze(1).repeat(1, w, 1),
83
+ ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
84
+ return pos
85
+
86
+
87
+ def build_position_encoding(args):
88
+ N_steps = args.hidden_dim // 2
89
+ if args.position_embedding in ('v2', 'sine'):
90
+ # TODO find a better way of exposing other arguments
91
+ position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
92
+ elif args.position_embedding in ('v3', 'learned'):
93
+ position_embedding = PositionEmbeddingLearned(N_steps)
94
+ else:
95
+ raise ValueError(f"not supported {args.position_embedding}")
96
+
97
+ return position_embedding
Deformable-DETR/models/segmentation.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ This file provides the definition of the convolutional heads used to predict masks, as well as the losses
12
+ """
13
+ import io
14
+ from collections import defaultdict
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from PIL import Image
20
+
21
+ import util.box_ops as box_ops
22
+ from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
23
+
24
+ try:
25
+ from panopticapi.utils import id2rgb, rgb2id
26
+ except ImportError:
27
+ pass
28
+
29
+
30
+ class DETRsegm(nn.Module):
31
+ def __init__(self, detr, freeze_detr=False):
32
+ super().__init__()
33
+ self.detr = detr
34
+
35
+ if freeze_detr:
36
+ for p in self.parameters():
37
+ p.requires_grad_(False)
38
+
39
+ hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
40
+ self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)
41
+ self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
42
+
43
+ def forward(self, samples: NestedTensor):
44
+ if not isinstance(samples, NestedTensor):
45
+ samples = nested_tensor_from_tensor_list(samples)
46
+ features, pos = self.detr.backbone(samples)
47
+
48
+ bs = features[-1].tensors.shape[0]
49
+
50
+ src, mask = features[-1].decompose()
51
+ src_proj = self.detr.input_proj(src)
52
+ hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
53
+
54
+ outputs_class = self.detr.class_embed(hs)
55
+ outputs_coord = self.detr.bbox_embed(hs).sigmoid()
56
+ out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
57
+ if self.detr.aux_loss:
58
+ out["aux_outputs"] = [
59
+ {"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
60
+ ]
61
+
62
+ # FIXME h_boxes takes the last one computed, keep this in mind
63
+ bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
64
+
65
+ seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])
66
+ outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
67
+
68
+ out["pred_masks"] = outputs_seg_masks
69
+ return out
70
+
71
+
72
+ class MaskHeadSmallConv(nn.Module):
73
+ """
74
+ Simple convolutional head, using group norm.
75
+ Upsampling is done using a FPN approach
76
+ """
77
+
78
+ def __init__(self, dim, fpn_dims, context_dim):
79
+ super().__init__()
80
+
81
+ inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
82
+ self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
83
+ self.gn1 = torch.nn.GroupNorm(8, dim)
84
+ self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
85
+ self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
86
+ self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
87
+ self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
88
+ self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
89
+ self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
90
+ self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
91
+ self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
92
+ self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
93
+
94
+ self.dim = dim
95
+
96
+ self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
97
+ self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
98
+ self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
99
+
100
+ for m in self.modules():
101
+ if isinstance(m, nn.Conv2d):
102
+ nn.init.kaiming_uniform_(m.weight, a=1)
103
+ nn.init.constant_(m.bias, 0)
104
+
105
+ def forward(self, x, bbox_mask, fpns):
106
+ def expand(tensor, length):
107
+ return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
108
+
109
+ x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
110
+
111
+ x = self.lay1(x)
112
+ x = self.gn1(x)
113
+ x = F.relu(x)
114
+ x = self.lay2(x)
115
+ x = self.gn2(x)
116
+ x = F.relu(x)
117
+
118
+ cur_fpn = self.adapter1(fpns[0])
119
+ if cur_fpn.size(0) != x.size(0):
120
+ cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
121
+ x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
122
+ x = self.lay3(x)
123
+ x = self.gn3(x)
124
+ x = F.relu(x)
125
+
126
+ cur_fpn = self.adapter2(fpns[1])
127
+ if cur_fpn.size(0) != x.size(0):
128
+ cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
129
+ x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
130
+ x = self.lay4(x)
131
+ x = self.gn4(x)
132
+ x = F.relu(x)
133
+
134
+ cur_fpn = self.adapter3(fpns[2])
135
+ if cur_fpn.size(0) != x.size(0):
136
+ cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))
137
+ x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
138
+ x = self.lay5(x)
139
+ x = self.gn5(x)
140
+ x = F.relu(x)
141
+
142
+ x = self.out_lay(x)
143
+ return x
144
+
145
+
146
+ class MHAttentionMap(nn.Module):
147
+ """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
148
+
149
+ def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True):
150
+ super().__init__()
151
+ self.num_heads = num_heads
152
+ self.hidden_dim = hidden_dim
153
+ self.dropout = nn.Dropout(dropout)
154
+
155
+ self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
156
+ self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
157
+
158
+ nn.init.zeros_(self.k_linear.bias)
159
+ nn.init.zeros_(self.q_linear.bias)
160
+ nn.init.xavier_uniform_(self.k_linear.weight)
161
+ nn.init.xavier_uniform_(self.q_linear.weight)
162
+ self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
163
+
164
+ def forward(self, q, k, mask=None):
165
+ q = self.q_linear(q)
166
+ k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
167
+ qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
168
+ kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
169
+ weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
170
+
171
+ if mask is not None:
172
+ weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
173
+ weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
174
+ weights = self.dropout(weights)
175
+ return weights
176
+
177
+
178
+ def dice_loss(inputs, targets, num_boxes):
179
+ """
180
+ Compute the DICE loss, similar to generalized IOU for masks
181
+ Args:
182
+ inputs: A float tensor of arbitrary shape.
183
+ The predictions for each example.
184
+ targets: A float tensor with the same shape as inputs. Stores the binary
185
+ classification label for each element in inputs
186
+ (0 for the negative class and 1 for the positive class).
187
+ """
188
+ inputs = inputs.sigmoid()
189
+ inputs = inputs.flatten(1)
190
+ numerator = 2 * (inputs * targets).sum(1)
191
+ denominator = inputs.sum(-1) + targets.sum(-1)
192
+ loss = 1 - (numerator + 1) / (denominator + 1)
193
+ return loss.sum() / num_boxes
194
+
195
+
196
+ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
197
+ """
198
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
199
+ Args:
200
+ inputs: A float tensor of arbitrary shape.
201
+ The predictions for each example.
202
+ targets: A float tensor with the same shape as inputs. Stores the binary
203
+ classification label for each element in inputs
204
+ (0 for the negative class and 1 for the positive class).
205
+ alpha: (optional) Weighting factor in range (0,1) to balance
206
+ positive vs negative examples. Default = -1 (no weighting).
207
+ gamma: Exponent of the modulating factor (1 - p_t) to
208
+ balance easy vs hard examples.
209
+ Returns:
210
+ Loss tensor
211
+ """
212
+ prob = inputs.sigmoid()
213
+ ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
214
+ p_t = prob * targets + (1 - prob) * (1 - targets)
215
+ loss = ce_loss * ((1 - p_t) ** gamma)
216
+
217
+ if alpha >= 0:
218
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
219
+ loss = alpha_t * loss
220
+
221
+ return loss.mean(1).sum() / num_boxes
222
+
223
+
224
+ class PostProcessSegm(nn.Module):
225
+ def __init__(self, threshold=0.5):
226
+ super().__init__()
227
+ self.threshold = threshold
228
+
229
+ @torch.no_grad()
230
+ def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
231
+ assert len(orig_target_sizes) == len(max_target_sizes)
232
+ max_h, max_w = max_target_sizes.max(0)[0].tolist()
233
+ outputs_masks = outputs["pred_masks"].squeeze(2)
234
+ outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
235
+ outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()
236
+
237
+ for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
238
+ img_h, img_w = t[0], t[1]
239
+ results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
240
+ results[i]["masks"] = F.interpolate(
241
+ results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
242
+ ).byte()
243
+
244
+ return results
245
+
246
+
247
+ class PostProcessPanoptic(nn.Module):
248
+ """This class converts the output of the model to the final panoptic result, in the format expected by the
249
+ coco panoptic API """
250
+
251
+ def __init__(self, is_thing_map, threshold=0.85):
252
+ """
253
+ Parameters:
254
+ is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
255
+ the class is a thing (True) or a stuff (False) class
256
+ threshold: confidence threshold: segments with confidence lower than this will be deleted
257
+ """
258
+ super().__init__()
259
+ self.threshold = threshold
260
+ self.is_thing_map = is_thing_map
261
+
262
+ def forward(self, outputs, processed_sizes, target_sizes=None):
263
+ """ This function computes the panoptic prediction from the model's predictions.
264
+ Parameters:
265
+ outputs: This is a dict coming directly from the model. See the model doc for the content.
266
+ processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
267
+ model, ie the size after data augmentation but before batching.
268
+ target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
269
+ of each prediction. If left to None, it will default to the processed_sizes
270
+ """
271
+ if target_sizes is None:
272
+ target_sizes = processed_sizes
273
+ assert len(processed_sizes) == len(target_sizes)
274
+ out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
275
+ assert len(out_logits) == len(raw_masks) == len(target_sizes)
276
+ preds = []
277
+
278
+ def to_tuple(tup):
279
+ if isinstance(tup, tuple):
280
+ return tup
281
+ return tuple(tup.cpu().tolist())
282
+
283
+ for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
284
+ out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
285
+ ):
286
+ # we filter empty queries and detection below threshold
287
+ scores, labels = cur_logits.softmax(-1).max(-1)
288
+ keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
289
+ cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
290
+ cur_scores = cur_scores[keep]
291
+ cur_classes = cur_classes[keep]
292
+ cur_masks = cur_masks[keep]
293
+ cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0)
294
+ cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
295
+
296
+ h, w = cur_masks.shape[-2:]
297
+ assert len(cur_boxes) == len(cur_classes)
298
+
299
+ # It may be that we have several predicted masks for the same stuff class.
300
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
301
+ cur_masks = cur_masks.flatten(1)
302
+ stuff_equiv_classes = defaultdict(lambda: [])
303
+ for k, label in enumerate(cur_classes):
304
+ if not self.is_thing_map[label.item()]:
305
+ stuff_equiv_classes[label.item()].append(k)
306
+
307
+ def get_ids_area(masks, scores, dedup=False):
308
+ # This helper function creates the final panoptic segmentation image
309
+ # It also returns the area of the masks that appears on the image
310
+
311
+ m_id = masks.transpose(0, 1).softmax(-1)
312
+
313
+ if m_id.shape[-1] == 0:
314
+ # We didn't detect any mask :(
315
+ m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
316
+ else:
317
+ m_id = m_id.argmax(-1).view(h, w)
318
+
319
+ if dedup:
320
+ # Merge the masks corresponding to the same stuff class
321
+ for equiv in stuff_equiv_classes.values():
322
+ if len(equiv) > 1:
323
+ for eq_id in equiv:
324
+ m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
325
+
326
+ final_h, final_w = to_tuple(target_size)
327
+
328
+ seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
329
+ seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
330
+
331
+ np_seg_img = (
332
+ torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
333
+ )
334
+ m_id = torch.from_numpy(rgb2id(np_seg_img))
335
+
336
+ area = []
337
+ for i in range(len(scores)):
338
+ area.append(m_id.eq(i).sum().item())
339
+ return area, seg_img
340
+
341
+ area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
342
+ if cur_classes.numel() > 0:
343
+ # We know filter empty masks as long as we find some
344
+ while True:
345
+ filtered_small = torch.as_tensor(
346
+ [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
347
+ )
348
+ if filtered_small.any().item():
349
+ cur_scores = cur_scores[~filtered_small]
350
+ cur_classes = cur_classes[~filtered_small]
351
+ cur_masks = cur_masks[~filtered_small]
352
+ area, seg_img = get_ids_area(cur_masks, cur_scores)
353
+ else:
354
+ break
355
+
356
+ else:
357
+ cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
358
+
359
+ segments_info = []
360
+ for i, a in enumerate(area):
361
+ cat = cur_classes[i].item()
362
+ segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
363
+ del cur_classes
364
+
365
+ with io.BytesIO() as out:
366
+ seg_img.save(out, format="PNG")
367
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
368
+ preds.append(predictions)
369
+ return preds
Deformable-DETR/tools/launch.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # --------------------------------------------------------------------------------------------------------------------------
6
+ # Modified from https://github.com/pytorch/pytorch/blob/173f224570017b4b1a3a1a13d0bff280a54d9cd9/torch/distributed/launch.py
7
+ # --------------------------------------------------------------------------------------------------------------------------
8
+
9
+ r"""
10
+ `torch.distributed.launch` is a module that spawns up multiple distributed
11
+ training processes on each of the training nodes.
12
+ The utility can be used for single-node distributed training, in which one or
13
+ more processes per node will be spawned. The utility can be used for either
14
+ CPU training or GPU training. If the utility is used for GPU training,
15
+ each distributed process will be operating on a single GPU. This can achieve
16
+ well-improved single-node training performance. It can also be used in
17
+ multi-node distributed training, by spawning up multiple processes on each node
18
+ for well-improved multi-node distributed training performance as well.
19
+ This will especially be benefitial for systems with multiple Infiniband
20
+ interfaces that have direct-GPU support, since all of them can be utilized for
21
+ aggregated communication bandwidth.
22
+ In both cases of single-node distributed training or multi-node distributed
23
+ training, this utility will launch the given number of processes per node
24
+ (``--nproc_per_node``). If used for GPU training, this number needs to be less
25
+ or euqal to the number of GPUs on the current system (``nproc_per_node``),
26
+ and each process will be operating on a single GPU from *GPU 0 to
27
+ GPU (nproc_per_node - 1)*.
28
+ **How to use this module:**
29
+ 1. Single-Node multi-process distributed training
30
+ ::
31
+ >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
32
+ YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other
33
+ arguments of your training script)
34
+ 2. Multi-Node multi-process distributed training: (e.g. two nodes)
35
+ Node 1: *(IP: 192.168.1.1, and has a free port: 1234)*
36
+ ::
37
+ >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
38
+ --nnodes=2 --node_rank=0 --master_addr="192.168.1.1"
39
+ --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
40
+ and all other arguments of your training script)
41
+ Node 2:
42
+ ::
43
+ >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE
44
+ --nnodes=2 --node_rank=1 --master_addr="192.168.1.1"
45
+ --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3
46
+ and all other arguments of your training script)
47
+ 3. To look up what optional arguments this module offers:
48
+ ::
49
+ >>> python -m torch.distributed.launch --help
50
+ **Important Notices:**
51
+ 1. This utilty and multi-process distributed (single-node or
52
+ multi-node) GPU training currently only achieves the best performance using
53
+ the NCCL distributed backend. Thus NCCL backend is the recommended backend to
54
+ use for GPU training.
55
+ 2. In your training program, you must parse the command-line argument:
56
+ ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module.
57
+ If your training program uses GPUs, you should ensure that your code only
58
+ runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by:
59
+ Parsing the local_rank argument
60
+ ::
61
+ >>> import argparse
62
+ >>> parser = argparse.ArgumentParser()
63
+ >>> parser.add_argument("--local_rank", type=int)
64
+ >>> args = parser.parse_args()
65
+ Set your device to local rank using either
66
+ ::
67
+ >>> torch.cuda.set_device(arg.local_rank) # before your code runs
68
+ or
69
+ ::
70
+ >>> with torch.cuda.device(arg.local_rank):
71
+ >>> # your code to run
72
+ 3. In your training program, you are supposed to call the following function
73
+ at the beginning to start the distributed backend. You need to make sure that
74
+ the init_method uses ``env://``, which is the only supported ``init_method``
75
+ by this module.
76
+ ::
77
+ torch.distributed.init_process_group(backend='YOUR BACKEND',
78
+ init_method='env://')
79
+ 4. In your training program, you can either use regular distributed functions
80
+ or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your
81
+ training program uses GPUs for training and you would like to use
82
+ :func:`torch.nn.parallel.DistributedDataParallel` module,
83
+ here is how to configure it.
84
+ ::
85
+ model = torch.nn.parallel.DistributedDataParallel(model,
86
+ device_ids=[arg.local_rank],
87
+ output_device=arg.local_rank)
88
+ Please ensure that ``device_ids`` argument is set to be the only GPU device id
89
+ that your code will be operating on. This is generally the local rank of the
90
+ process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``,
91
+ and ``output_device`` needs to be ``args.local_rank`` in order to use this
92
+ utility
93
+ 5. Another way to pass ``local_rank`` to the subprocesses via environment variable
94
+ ``LOCAL_RANK``. This behavior is enabled when you launch the script with
95
+ ``--use_env=True``. You must adjust the subprocess example above to replace
96
+ ``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher
97
+ will not pass ``--local_rank`` when you specify this flag.
98
+ .. warning::
99
+ ``local_rank`` is NOT globally unique: it is only unique per process
100
+ on a machine. Thus, don't use it to decide if you should, e.g.,
101
+ write to a networked filesystem. See
102
+ https://github.com/pytorch/pytorch/issues/12042 for an example of
103
+ how things can go wrong if you don't do this correctly.
104
+ """
105
+
106
+
107
+ import sys
108
+ import subprocess
109
+ import os
110
+ import socket
111
+ from argparse import ArgumentParser, REMAINDER
112
+
113
+ import torch
114
+
115
+
116
+ def parse_args():
117
+ """
118
+ Helper function parsing the command line options
119
+ @retval ArgumentParser
120
+ """
121
+ parser = ArgumentParser(description="PyTorch distributed training launch "
122
+ "helper utilty that will spawn up "
123
+ "multiple distributed processes")
124
+
125
+ # Optional arguments for the launch helper
126
+ parser.add_argument("--nnodes", type=int, default=1,
127
+ help="The number of nodes to use for distributed "
128
+ "training")
129
+ parser.add_argument("--node_rank", type=int, default=0,
130
+ help="The rank of the node for multi-node distributed "
131
+ "training")
132
+ parser.add_argument("--nproc_per_node", type=int, default=1,
133
+ help="The number of processes to launch on each node, "
134
+ "for GPU training, this is recommended to be set "
135
+ "to the number of GPUs in your system so that "
136
+ "each process can be bound to a single GPU.")
137
+ parser.add_argument("--master_addr", default="127.0.0.1", type=str,
138
+ help="Master node (rank 0)'s address, should be either "
139
+ "the IP address or the hostname of node 0, for "
140
+ "single node multi-proc training, the "
141
+ "--master_addr can simply be 127.0.0.1")
142
+ parser.add_argument("--master_port", default=29500, type=int,
143
+ help="Master node (rank 0)'s free port that needs to "
144
+ "be used for communciation during distributed "
145
+ "training")
146
+
147
+ # positional
148
+ parser.add_argument("training_script", type=str,
149
+ help="The full path to the single GPU training "
150
+ "program/script to be launched in parallel, "
151
+ "followed by all the arguments for the "
152
+ "training script")
153
+
154
+ # rest from the training program
155
+ parser.add_argument('training_script_args', nargs=REMAINDER)
156
+ return parser.parse_args()
157
+
158
+
159
+ def main():
160
+ args = parse_args()
161
+
162
+ # world size in terms of number of processes
163
+ dist_world_size = args.nproc_per_node * args.nnodes
164
+
165
+ # set PyTorch distributed related environmental variables
166
+ current_env = os.environ.copy()
167
+ current_env["MASTER_ADDR"] = args.master_addr
168
+ current_env["MASTER_PORT"] = str(args.master_port)
169
+ current_env["WORLD_SIZE"] = str(dist_world_size)
170
+
171
+ processes = []
172
+
173
+ for local_rank in range(0, args.nproc_per_node):
174
+ # each process's rank
175
+ dist_rank = args.nproc_per_node * args.node_rank + local_rank
176
+ current_env["RANK"] = str(dist_rank)
177
+ current_env["LOCAL_RANK"] = str(local_rank)
178
+
179
+ cmd = [args.training_script] + args.training_script_args
180
+
181
+ process = subprocess.Popen(cmd, env=current_env)
182
+ processes.append(process)
183
+
184
+ for process in processes:
185
+ process.wait()
186
+ if process.returncode != 0:
187
+ raise subprocess.CalledProcessError(returncode=process.returncode,
188
+ cmd=process.args)
189
+
190
+
191
+ if __name__ == "__main__":
192
+ main()
Deformable-DETR/tools/run_dist_launch.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # ------------------------------------------------------------------------
3
+ # Deformable DETR
4
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ # ------------------------------------------------------------------------
7
+
8
+ set -x
9
+
10
+ GPUS=$1
11
+ RUN_COMMAND=${@:2}
12
+ if [ $GPUS -lt 8 ]; then
13
+ GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS}
14
+ else
15
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
16
+ fi
17
+ MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
18
+ MASTER_PORT=${MASTER_PORT:-"29500"}
19
+ NODE_RANK=${NODE_RANK:-0}
20
+
21
+ let "NNODES=GPUS/GPUS_PER_NODE"
22
+
23
+ python ./tools/launch.py \
24
+ --nnodes ${NNODES} \
25
+ --node_rank ${NODE_RANK} \
26
+ --master_addr ${MASTER_ADDR} \
27
+ --master_port ${MASTER_PORT} \
28
+ --nproc_per_node ${GPUS_PER_NODE} \
29
+ ${RUN_COMMAND}
Deformable-DETR/tools/run_dist_slurm.sh ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # --------------------------------------------------------------------------------------------------------------------------
3
+ # Deformable DETR
4
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
5
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
+ # --------------------------------------------------------------------------------------------------------------------------
7
+ # Modified from https://github.com/open-mmlab/mmdetection/blob/3b53fe15d87860c6941f3dda63c0f27422da6266/tools/slurm_train.sh
8
+ # --------------------------------------------------------------------------------------------------------------------------
9
+
10
+ set -x
11
+
12
+ PARTITION=$1
13
+ JOB_NAME=$2
14
+ GPUS=$3
15
+ RUN_COMMAND=${@:4}
16
+ if [ $GPUS -lt 8 ]; then
17
+ GPUS_PER_NODE=${GPUS_PER_NODE:-$GPUS}
18
+ else
19
+ GPUS_PER_NODE=${GPUS_PER_NODE:-8}
20
+ fi
21
+ CPUS_PER_TASK=${CPUS_PER_TASK:-4}
22
+ SRUN_ARGS=${SRUN_ARGS:-""}
23
+
24
+ srun -p ${PARTITION} \
25
+ --job-name=${JOB_NAME} \
26
+ --gres=gpu:${GPUS_PER_NODE} \
27
+ --ntasks=${GPUS} \
28
+ --ntasks-per-node=${GPUS_PER_NODE} \
29
+ --cpus-per-task=${CPUS_PER_TASK} \
30
+ --kill-on-bad-exit=1 \
31
+ ${SRUN_ARGS} \
32
+ ${RUN_COMMAND}
33
+
Deformable-DETR/util/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
Deformable-DETR/util/box_ops.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Utilities for bounding box manipulation and GIoU.
12
+ """
13
+ import torch
14
+ from torchvision.ops.boxes import box_area
15
+
16
+
17
+ def box_cxcywh_to_xyxy(x):
18
+ x_c, y_c, w, h = x.unbind(-1)
19
+ b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
20
+ (x_c + 0.5 * w), (y_c + 0.5 * h)]
21
+ return torch.stack(b, dim=-1)
22
+
23
+
24
+ def box_xyxy_to_cxcywh(x):
25
+ x0, y0, x1, y1 = x.unbind(-1)
26
+ b = [(x0 + x1) / 2, (y0 + y1) / 2,
27
+ (x1 - x0), (y1 - y0)]
28
+ return torch.stack(b, dim=-1)
29
+
30
+
31
+ # modified from torchvision to also return the union
32
+ def box_iou(boxes1, boxes2):
33
+ area1 = box_area(boxes1)
34
+ area2 = box_area(boxes2)
35
+
36
+ lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
37
+ rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
38
+
39
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
40
+ inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
41
+
42
+ union = area1[:, None] + area2 - inter
43
+
44
+ iou = inter / union
45
+ return iou, union
46
+
47
+
48
+ def generalized_box_iou(boxes1, boxes2):
49
+ """
50
+ Generalized IoU from https://giou.stanford.edu/
51
+
52
+ The boxes should be in [x0, y0, x1, y1] format
53
+
54
+ Returns a [N, M] pairwise matrix, where N = len(boxes1)
55
+ and M = len(boxes2)
56
+ """
57
+ # degenerate boxes gives inf / nan results
58
+ # so do an early check
59
+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
60
+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
61
+ iou, union = box_iou(boxes1, boxes2)
62
+
63
+ lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
64
+ rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
65
+
66
+ wh = (rb - lt).clamp(min=0) # [N,M,2]
67
+ area = wh[:, :, 0] * wh[:, :, 1]
68
+
69
+ return iou - (area - union) / area
70
+
71
+
72
+ def masks_to_boxes(masks):
73
+ """Compute the bounding boxes around the provided masks
74
+
75
+ The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
76
+
77
+ Returns a [N, 4] tensors, with the boxes in xyxy format
78
+ """
79
+ if masks.numel() == 0:
80
+ return torch.zeros((0, 4), device=masks.device)
81
+
82
+ h, w = masks.shape[-2:]
83
+
84
+ y = torch.arange(0, h, dtype=torch.float)
85
+ x = torch.arange(0, w, dtype=torch.float)
86
+ y, x = torch.meshgrid(y, x)
87
+
88
+ x_mask = (masks * x.unsqueeze(0))
89
+ x_max = x_mask.flatten(1).max(-1)[0]
90
+ x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
91
+
92
+ y_mask = (masks * y.unsqueeze(0))
93
+ y_max = y_mask.flatten(1).max(-1)[0]
94
+ y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
95
+
96
+ return torch.stack([x_min, y_min, x_max, y_max], 1)
Deformable-DETR/util/misc.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Misc functions, including distributed helpers.
12
+
13
+ Mostly copy-paste from torchvision references.
14
+ """
15
+ import os
16
+ import subprocess
17
+ import time
18
+ from collections import defaultdict, deque
19
+ import datetime
20
+ import pickle
21
+ from typing import Optional, List
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.distributed as dist
26
+ from torch import Tensor
27
+
28
+ # needed due to empty tensor bug in pytorch and torchvision 0.5
29
+ import torchvision
30
+ if float(torchvision.__version__[:3]) < 0.5:
31
+ import math
32
+ from torchvision.ops.misc import _NewEmptyTensorOp
33
+ def _check_size_scale_factor(dim, size, scale_factor):
34
+ # type: (int, Optional[List[int]], Optional[float]) -> None
35
+ if size is None and scale_factor is None:
36
+ raise ValueError("either size or scale_factor should be defined")
37
+ if size is not None and scale_factor is not None:
38
+ raise ValueError("only one of size or scale_factor should be defined")
39
+ if not (scale_factor is not None and len(scale_factor) != dim):
40
+ raise ValueError(
41
+ "scale_factor shape must match input shape. "
42
+ "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
43
+ )
44
+ def _output_size(dim, input, size, scale_factor):
45
+ # type: (int, Tensor, Optional[List[int]], Optional[float]) -> List[int]
46
+ assert dim == 2
47
+ _check_size_scale_factor(dim, size, scale_factor)
48
+ if size is not None:
49
+ return size
50
+ # if dim is not 2 or scale_factor is iterable use _ntuple instead of concat
51
+ assert scale_factor is not None and isinstance(scale_factor, (int, float))
52
+ scale_factors = [scale_factor, scale_factor]
53
+ # math.floor might return float in py2.7
54
+ return [
55
+ int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
56
+ ]
57
+ elif float(torchvision.__version__[:3]) < 0.7:
58
+ from torchvision.ops import _new_empty_tensor
59
+ from torchvision.ops.misc import _output_size
60
+
61
+
62
+ class SmoothedValue(object):
63
+ """Track a series of values and provide access to smoothed values over a
64
+ window or the global series average.
65
+ """
66
+
67
+ def __init__(self, window_size=20, fmt=None):
68
+ if fmt is None:
69
+ fmt = "{median:.4f} ({global_avg:.4f})"
70
+ self.deque = deque(maxlen=window_size)
71
+ self.total = 0.0
72
+ self.count = 0
73
+ self.fmt = fmt
74
+
75
+ def update(self, value, n=1):
76
+ self.deque.append(value)
77
+ self.count += n
78
+ self.total += value * n
79
+
80
+ def synchronize_between_processes(self):
81
+ """
82
+ Warning: does not synchronize the deque!
83
+ """
84
+ if not is_dist_avail_and_initialized():
85
+ return
86
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
87
+ dist.barrier()
88
+ dist.all_reduce(t)
89
+ t = t.tolist()
90
+ self.count = int(t[0])
91
+ self.total = t[1]
92
+
93
+ @property
94
+ def median(self):
95
+ d = torch.tensor(list(self.deque))
96
+ return d.median().item()
97
+
98
+ @property
99
+ def avg(self):
100
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
101
+ return d.mean().item()
102
+
103
+ @property
104
+ def global_avg(self):
105
+ return self.total / self.count
106
+
107
+ @property
108
+ def max(self):
109
+ return max(self.deque)
110
+
111
+ @property
112
+ def value(self):
113
+ return self.deque[-1]
114
+
115
+ def __str__(self):
116
+ return self.fmt.format(
117
+ median=self.median,
118
+ avg=self.avg,
119
+ global_avg=self.global_avg,
120
+ max=self.max,
121
+ value=self.value)
122
+
123
+
124
+ def all_gather(data):
125
+ """
126
+ Run all_gather on arbitrary picklable data (not necessarily tensors)
127
+ Args:
128
+ data: any picklable object
129
+ Returns:
130
+ list[data]: list of data gathered from each rank
131
+ """
132
+ world_size = get_world_size()
133
+ if world_size == 1:
134
+ return [data]
135
+
136
+ # serialized to a Tensor
137
+ buffer = pickle.dumps(data)
138
+ storage = torch.ByteStorage.from_buffer(buffer)
139
+ tensor = torch.ByteTensor(storage).to("cuda")
140
+
141
+ # obtain Tensor size of each rank
142
+ local_size = torch.tensor([tensor.numel()], device="cuda")
143
+ size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
144
+ dist.all_gather(size_list, local_size)
145
+ size_list = [int(size.item()) for size in size_list]
146
+ max_size = max(size_list)
147
+
148
+ # receiving Tensor from all ranks
149
+ # we pad the tensor because torch all_gather does not support
150
+ # gathering tensors of different shapes
151
+ tensor_list = []
152
+ for _ in size_list:
153
+ tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
154
+ if local_size != max_size:
155
+ padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
156
+ tensor = torch.cat((tensor, padding), dim=0)
157
+ dist.all_gather(tensor_list, tensor)
158
+
159
+ data_list = []
160
+ for size, tensor in zip(size_list, tensor_list):
161
+ buffer = tensor.cpu().numpy().tobytes()[:size]
162
+ data_list.append(pickle.loads(buffer))
163
+
164
+ return data_list
165
+
166
+
167
+ def reduce_dict(input_dict, average=True):
168
+ """
169
+ Args:
170
+ input_dict (dict): all the values will be reduced
171
+ average (bool): whether to do average or sum
172
+ Reduce the values in the dictionary from all processes so that all processes
173
+ have the averaged results. Returns a dict with the same fields as
174
+ input_dict, after reduction.
175
+ """
176
+ world_size = get_world_size()
177
+ if world_size < 2:
178
+ return input_dict
179
+ with torch.no_grad():
180
+ names = []
181
+ values = []
182
+ # sort the keys so that they are consistent across processes
183
+ for k in sorted(input_dict.keys()):
184
+ names.append(k)
185
+ values.append(input_dict[k])
186
+ values = torch.stack(values, dim=0)
187
+ dist.all_reduce(values)
188
+ if average:
189
+ values /= world_size
190
+ reduced_dict = {k: v for k, v in zip(names, values)}
191
+ return reduced_dict
192
+
193
+
194
+ class MetricLogger(object):
195
+ def __init__(self, delimiter="\t"):
196
+ self.meters = defaultdict(SmoothedValue)
197
+ self.delimiter = delimiter
198
+
199
+ def update(self, **kwargs):
200
+ for k, v in kwargs.items():
201
+ if isinstance(v, torch.Tensor):
202
+ v = v.item()
203
+ assert isinstance(v, (float, int))
204
+ self.meters[k].update(v)
205
+
206
+ def __getattr__(self, attr):
207
+ if attr in self.meters:
208
+ return self.meters[attr]
209
+ if attr in self.__dict__:
210
+ return self.__dict__[attr]
211
+ raise AttributeError("'{}' object has no attribute '{}'".format(
212
+ type(self).__name__, attr))
213
+
214
+ def __str__(self):
215
+ loss_str = []
216
+ for name, meter in self.meters.items():
217
+ loss_str.append(
218
+ "{}: {}".format(name, str(meter))
219
+ )
220
+ return self.delimiter.join(loss_str)
221
+
222
+ def synchronize_between_processes(self):
223
+ for meter in self.meters.values():
224
+ meter.synchronize_between_processes()
225
+
226
+ def add_meter(self, name, meter):
227
+ self.meters[name] = meter
228
+
229
+ def log_every(self, iterable, print_freq, header=None):
230
+ i = 0
231
+ if not header:
232
+ header = ''
233
+ start_time = time.time()
234
+ end = time.time()
235
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
236
+ data_time = SmoothedValue(fmt='{avg:.4f}')
237
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
238
+ if torch.cuda.is_available():
239
+ log_msg = self.delimiter.join([
240
+ header,
241
+ '[{0' + space_fmt + '}/{1}]',
242
+ 'eta: {eta}',
243
+ '{meters}',
244
+ 'time: {time}',
245
+ 'data: {data}',
246
+ 'max mem: {memory:.0f}'
247
+ ])
248
+ else:
249
+ log_msg = self.delimiter.join([
250
+ header,
251
+ '[{0' + space_fmt + '}/{1}]',
252
+ 'eta: {eta}',
253
+ '{meters}',
254
+ 'time: {time}',
255
+ 'data: {data}'
256
+ ])
257
+ MB = 1024.0 * 1024.0
258
+ for obj in iterable:
259
+ data_time.update(time.time() - end)
260
+ yield obj
261
+ iter_time.update(time.time() - end)
262
+ if i % print_freq == 0 or i == len(iterable) - 1:
263
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
264
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
265
+ if torch.cuda.is_available():
266
+ print(log_msg.format(
267
+ i, len(iterable), eta=eta_string,
268
+ meters=str(self),
269
+ time=str(iter_time), data=str(data_time),
270
+ memory=torch.cuda.max_memory_allocated() / MB))
271
+ else:
272
+ print(log_msg.format(
273
+ i, len(iterable), eta=eta_string,
274
+ meters=str(self),
275
+ time=str(iter_time), data=str(data_time)))
276
+ i += 1
277
+ end = time.time()
278
+ total_time = time.time() - start_time
279
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
280
+ print('{} Total time: {} ({:.4f} s / it)'.format(
281
+ header, total_time_str, total_time / len(iterable)))
282
+
283
+
284
+ def get_sha():
285
+ cwd = os.path.dirname(os.path.abspath(__file__))
286
+
287
+ def _run(command):
288
+ return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
289
+ sha = 'N/A'
290
+ diff = "clean"
291
+ branch = 'N/A'
292
+ try:
293
+ sha = _run(['git', 'rev-parse', 'HEAD'])
294
+ subprocess.check_output(['git', 'diff'], cwd=cwd)
295
+ diff = _run(['git', 'diff-index', 'HEAD'])
296
+ diff = "has uncommited changes" if diff else "clean"
297
+ branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
298
+ except Exception:
299
+ pass
300
+ message = f"sha: {sha}, status: {diff}, branch: {branch}"
301
+ return message
302
+
303
+
304
+ def collate_fn(batch):
305
+ batch = list(zip(*batch))
306
+ batch[0] = nested_tensor_from_tensor_list(batch[0])
307
+ return tuple(batch)
308
+
309
+
310
+ def _max_by_axis(the_list):
311
+ # type: (List[List[int]]) -> List[int]
312
+ maxes = the_list[0]
313
+ for sublist in the_list[1:]:
314
+ for index, item in enumerate(sublist):
315
+ maxes[index] = max(maxes[index], item)
316
+ return maxes
317
+
318
+
319
+ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
320
+ # TODO make this more general
321
+ if tensor_list[0].ndim == 3:
322
+ # TODO make it support different-sized images
323
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
324
+ # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
325
+ batch_shape = [len(tensor_list)] + max_size
326
+ b, c, h, w = batch_shape
327
+ dtype = tensor_list[0].dtype
328
+ device = tensor_list[0].device
329
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
330
+ mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
331
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
332
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
333
+ m[: img.shape[1], :img.shape[2]] = False
334
+ else:
335
+ raise ValueError('not supported')
336
+ return NestedTensor(tensor, mask)
337
+
338
+
339
+ class NestedTensor(object):
340
+ def __init__(self, tensors, mask: Optional[Tensor]):
341
+ self.tensors = tensors
342
+ self.mask = mask
343
+
344
+ def to(self, device, non_blocking=False):
345
+ # type: (Device) -> NestedTensor # noqa
346
+ cast_tensor = self.tensors.to(device, non_blocking=non_blocking)
347
+ mask = self.mask
348
+ if mask is not None:
349
+ assert mask is not None
350
+ cast_mask = mask.to(device, non_blocking=non_blocking)
351
+ else:
352
+ cast_mask = None
353
+ return NestedTensor(cast_tensor, cast_mask)
354
+
355
+ def record_stream(self, *args, **kwargs):
356
+ self.tensors.record_stream(*args, **kwargs)
357
+ if self.mask is not None:
358
+ self.mask.record_stream(*args, **kwargs)
359
+
360
+ def decompose(self):
361
+ return self.tensors, self.mask
362
+
363
+ def __repr__(self):
364
+ return str(self.tensors)
365
+
366
+
367
+ def setup_for_distributed(is_master):
368
+ """
369
+ This function disables printing when not in master process
370
+ """
371
+ import builtins as __builtin__
372
+ builtin_print = __builtin__.print
373
+
374
+ def print(*args, **kwargs):
375
+ force = kwargs.pop('force', False)
376
+ if is_master or force:
377
+ builtin_print(*args, **kwargs)
378
+
379
+ __builtin__.print = print
380
+
381
+
382
+ def is_dist_avail_and_initialized():
383
+ if not dist.is_available():
384
+ return False
385
+ if not dist.is_initialized():
386
+ return False
387
+ return True
388
+
389
+
390
+ def get_world_size():
391
+ if not is_dist_avail_and_initialized():
392
+ return 1
393
+ return dist.get_world_size()
394
+
395
+
396
+ def get_rank():
397
+ if not is_dist_avail_and_initialized():
398
+ return 0
399
+ return dist.get_rank()
400
+
401
+
402
+ def get_local_size():
403
+ if not is_dist_avail_and_initialized():
404
+ return 1
405
+ return int(os.environ['LOCAL_SIZE'])
406
+
407
+
408
+ def get_local_rank():
409
+ if not is_dist_avail_and_initialized():
410
+ return 0
411
+ return int(os.environ['LOCAL_RANK'])
412
+
413
+
414
+ def is_main_process():
415
+ return get_rank() == 0
416
+
417
+
418
+ def save_on_master(*args, **kwargs):
419
+ if is_main_process():
420
+ torch.save(*args, **kwargs)
421
+
422
+
423
+ def init_distributed_mode(args):
424
+ if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
425
+ args.rank = int(os.environ["RANK"])
426
+ args.world_size = int(os.environ['WORLD_SIZE'])
427
+ args.gpu = int(os.environ['LOCAL_RANK'])
428
+ args.dist_url = 'env://'
429
+ os.environ['LOCAL_SIZE'] = str(torch.cuda.device_count())
430
+ elif 'SLURM_PROCID' in os.environ:
431
+ proc_id = int(os.environ['SLURM_PROCID'])
432
+ ntasks = int(os.environ['SLURM_NTASKS'])
433
+ node_list = os.environ['SLURM_NODELIST']
434
+ num_gpus = torch.cuda.device_count()
435
+ addr = subprocess.getoutput(
436
+ 'scontrol show hostname {} | head -n1'.format(node_list))
437
+ os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', '29500')
438
+ os.environ['MASTER_ADDR'] = addr
439
+ os.environ['WORLD_SIZE'] = str(ntasks)
440
+ os.environ['RANK'] = str(proc_id)
441
+ os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
442
+ os.environ['LOCAL_SIZE'] = str(num_gpus)
443
+ args.dist_url = 'env://'
444
+ args.world_size = ntasks
445
+ args.rank = proc_id
446
+ args.gpu = proc_id % num_gpus
447
+ else:
448
+ print('Not using distributed mode')
449
+ args.distributed = False
450
+ return
451
+
452
+ args.distributed = True
453
+
454
+ torch.cuda.set_device(args.gpu)
455
+ args.dist_backend = 'nccl'
456
+ print('| distributed init (rank {}): {}'.format(
457
+ args.rank, args.dist_url), flush=True)
458
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
459
+ world_size=args.world_size, rank=args.rank)
460
+ torch.distributed.barrier()
461
+ setup_for_distributed(args.rank == 0)
462
+
463
+
464
+ @torch.no_grad()
465
+ def accuracy(output, target, topk=(1,)):
466
+ """Computes the precision@k for the specified values of k"""
467
+ if target.numel() == 0:
468
+ return [torch.zeros([], device=output.device)]
469
+ maxk = max(topk)
470
+ batch_size = target.size(0)
471
+
472
+ _, pred = output.topk(maxk, 1, True, True)
473
+ pred = pred.t()
474
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
475
+
476
+ res = []
477
+ for k in topk:
478
+ correct_k = correct[:k].view(-1).float().sum(0)
479
+ res.append(correct_k.mul_(100.0 / batch_size))
480
+ return res
481
+
482
+
483
+ def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
484
+ # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
485
+ """
486
+ Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
487
+ This will eventually be supported natively by PyTorch, and this
488
+ class can go away.
489
+ """
490
+ if float(torchvision.__version__[:3]) < 0.7:
491
+ if input.numel() > 0:
492
+ return torch.nn.functional.interpolate(
493
+ input, size, scale_factor, mode, align_corners
494
+ )
495
+
496
+ output_shape = _output_size(2, input, size, scale_factor)
497
+ output_shape = list(input.shape[:-2]) + list(output_shape)
498
+ if float(torchvision.__version__[:3]) < 0.5:
499
+ return _NewEmptyTensorOp.apply(input, output_shape)
500
+ return _new_empty_tensor(input, output_shape)
501
+ else:
502
+ return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
503
+
504
+
505
+ def get_total_grad_norm(parameters, norm_type=2):
506
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
507
+ norm_type = float(norm_type)
508
+ device = parameters[0].grad.device
509
+ total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]),
510
+ norm_type)
511
+ return total_norm
512
+
513
+ def inverse_sigmoid(x, eps=1e-5):
514
+ x = x.clamp(min=0, max=1)
515
+ x1 = x.clamp(min=eps)
516
+ x2 = (1 - x).clamp(min=eps)
517
+ return torch.log(x1/x2)
518
+
Deformable-DETR/util/plot_utils.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Deformable DETR
3
+ # Copyright (c) 2020 SenseTime. All Rights Reserved.
4
+ # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5
+ # ------------------------------------------------------------------------
6
+ # Modified from DETR (https://github.com/facebookresearch/detr)
7
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
+ # ------------------------------------------------------------------------
9
+
10
+ """
11
+ Plotting utilities to visualize training logs.
12
+ """
13
+ import torch
14
+ import pandas as pd
15
+ import seaborn as sns
16
+ import matplotlib.pyplot as plt
17
+
18
+ from pathlib import Path, PurePath
19
+
20
+
21
+ def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
22
+ '''
23
+ Function to plot specific fields from training log(s). Plots both training and test results.
24
+
25
+ :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
26
+ - fields = which results to plot from each log file - plots both training and test for each field.
27
+ - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
28
+ - log_name = optional, name of log file if different than default 'log.txt'.
29
+
30
+ :: Outputs - matplotlib plots of results in fields, color coded for each log file.
31
+ - solid lines are training results, dashed lines are test results.
32
+
33
+ '''
34
+ func_name = "plot_utils.py::plot_logs"
35
+
36
+ # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
37
+ # convert single Path to list to avoid 'not iterable' error
38
+
39
+ if not isinstance(logs, list):
40
+ if isinstance(logs, PurePath):
41
+ logs = [logs]
42
+ print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
43
+ else:
44
+ raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
45
+ Expect list[Path] or single Path obj, received {type(logs)}")
46
+
47
+ # verify valid dir(s) and that every item in list is Path object
48
+ for i, dir in enumerate(logs):
49
+ if not isinstance(dir, PurePath):
50
+ raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
51
+ if dir.exists():
52
+ continue
53
+ raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
54
+
55
+ # load log file(s) and plot
56
+ dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
57
+
58
+ fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
59
+
60
+ for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
61
+ for j, field in enumerate(fields):
62
+ if field == 'mAP':
63
+ coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()
64
+ axs[j].plot(coco_eval, c=color)
65
+ else:
66
+ df.interpolate().ewm(com=ewm_col).mean().plot(
67
+ y=[f'train_{field}', f'test_{field}'],
68
+ ax=axs[j],
69
+ color=[color] * 2,
70
+ style=['-', '--']
71
+ )
72
+ for ax, field in zip(axs, fields):
73
+ ax.legend([Path(p).name for p in logs])
74
+ ax.set_title(field)
75
+
76
+
77
+ def plot_precision_recall(files, naming_scheme='iter'):
78
+ if naming_scheme == 'exp_id':
79
+ # name becomes exp_id
80
+ names = [f.parts[-3] for f in files]
81
+ elif naming_scheme == 'iter':
82
+ names = [f.stem for f in files]
83
+ else:
84
+ raise ValueError(f'not supported {naming_scheme}')
85
+ fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
86
+ for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
87
+ data = torch.load(f)
88
+ # precision is n_iou, n_points, n_cat, n_area, max_det
89
+ precision = data['precision']
90
+ recall = data['params'].recThrs
91
+ scores = data['scores']
92
+ # take precision for all classes, all areas and 100 detections
93
+ precision = precision[0, :, :, 0, -1].mean(1)
94
+ scores = scores[0, :, :, 0, -1].mean(1)
95
+ prec = precision.mean()
96
+ rec = data['recall'][0, :, 0, -1].mean()
97
+ print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
98
+ f'score={scores.mean():0.3f}, ' +
99
+ f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
100
+ )
101
+ axs[0].plot(recall, precision, c=color)
102
+ axs[1].plot(recall, scores, c=color)
103
+
104
+ axs[0].set_title('Precision / Recall')
105
+ axs[0].legend(names)
106
+ axs[1].set_title('Scores / Recall')
107
+ axs[1].legend(names)
108
+ return fig, axs
109
+
110
+
111
+
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GeCo2 Gradio Demo
3
+ sdk: gradio
4
+ sdk_version: 4.44.1
5
+ python_version: "3.10"
6
+ app_file: demo_gradio.py
7
+ ---
8
+
9
+
10
+ # GeCo2: Count Anything with Few Examples
11
+
12
+ GeCo2 is a few-shot, category-agnostic detection counter. With only a small number of exemplars, GeCo2 can identify and count all instances of the target object in an image—without predefined classes or retraining.
13
+
14
+ ## Key Features
15
+ - **Count anything**: works for arbitrary object categories
16
+ - **Few-shot**: requires only a handful of exemplars
17
+ - **Robust**: handles scale variation and diverse visual domains
18
+
19
+ ## Demo Description
20
+ Provide a few exemplar annotations for the object of interest, and GeCo2 will propagate this visual concept across the image to detect and count all matching instances. This makes the demo suitable for rapid exploration of novel object categories and domains where labeled data is scarce.
21
+
22
+
23
+
24
+ ------
25
+ This demo was build using the [GeCo2](https://arxiv.org/pdf/2511.08048) model and the demo bases on the [CountGD demo](https://github.com/niki-amini-naieni/CountGD).
configs/ABC123.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: -1
2
+ dataset: MCAC
3
+ test_split: val
4
+
5
+ MCAC_crop_size: 672
6
+ MCAC_occ_limit: 70
7
+ MCAC_occ_limit_exemplar: 30
8
+ img_channels: 3
9
+ image_transforms: ref_rot
10
+
11
+ counting_backbone: "vit_dino"
12
+ counting_backbone_unfreeze_layers: -1
13
+
14
+ counting_head: "5_32"
15
+ upsample_padding_mode: 'replicate' # 'zeros', 'reflect', 'replicate'
16
+
17
+
18
+ matching_type: density # no_match, count, density
19
+ matcher_cost_power: 2
20
+ normalize_matching: True
21
+
22
+ counting_loss: pixelwise_mae
23
+ gtd_scale: 400
24
+
25
+
26
+ learning_rate: 3e-5
27
+ scheduler: StepLR
28
+ scheduler_steps: 35
29
+ scheduler_gamma: 0.5
30
+ weight_decay: 0
31
+
32
+ train_batch_size: 2
33
+ eval_batch_size: 2
34
+ val_every: 1
35
+
36
+
37
+ max_epochs: 150
configs/_DEFAULT.yml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ seed: -1
3
+ dataset: MCAC
4
+ data_path: "PATH/TO/MCAC/"
5
+ log_dir: logs/
6
+ resume_path: ""
7
+ find_unused_parameters: False
8
+
9
+
10
+ MCAC_occ_limit: -1
11
+ MCAC_crop_size: -1
12
+ MCAC_max_num_classes: 5
13
+ MCAC_max_number_per_type: 300
14
+ MCAC_exclude_imgs_with_counts_over: -1 # no images with counts over the amount, -1 is the no excclusion case
15
+ MCAC_exclude_imgs_with_num_classes_over: -1 # no images with number of classes over the amount, -1 is the no excclusion case
16
+
17
+ img_channels: 3
18
+ img_size: [224, 224]
19
+ image_transforms: None
20
+ drop_last: True
21
+
22
+ counting_backbone_pretrained: True # if using pretrained vakues (only a thing for dino)
23
+ counting_backbone_unfreeze_layers: -1
24
+ counting_head: "5_32" # linearprobe
25
+ upsample_padding_mode: 'replicate' # 'zeros', 'reflect', 'replicate'
26
+
27
+
28
+ matching_type: no_match # no_match, count, density
29
+ normalize_matching: False # ONLY APPLIES TO DENSITY MATCHING
30
+ matcher_cost_p_norm: 1
31
+ matcher_cost_power: 1
32
+
33
+ counting_loss: pixelwise_mae
34
+ gtd_scale: 1
35
+
36
+ learning_rate: 3e-4
37
+ scheduler: None
38
+ weight_decay: 0
39
+ scheduler_steps: 100
40
+ scheduler_gamma: 0.5
41
+ accumulate_grad_batches: 1
42
+
43
+ train_batch_size: 32
44
+ eval_batch_size: 32
45
+ num_workers: 4
46
+
47
+ num_sanity_val_steps: -1
48
+
49
+ val_every: 1
50
+ max_epochs: 10000
51
+ max_steps: -1
configs/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+