Creador301 commited on
Commit
4e96b1d
·
verified ·
1 Parent(s): df4a29b

Upload 466 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. sam2.1HQ/sam-hq-main/LICENSE +201 -0
  3. sam2.1HQ/sam-hq-main/README.md +318 -0
  4. sam2.1HQ/sam-hq-main/build/lib/segment_anything/__init__.py +16 -0
  5. sam2.1HQ/sam-hq-main/build/lib/segment_anything/automatic_mask_generator.py +374 -0
  6. sam2.1HQ/sam-hq-main/build/lib/segment_anything/build_sam.py +169 -0
  7. sam2.1HQ/sam-hq-main/build/lib/segment_anything/build_sam_baseline.py +156 -0
  8. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/__init__.py +13 -0
  9. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/common.py +43 -0
  10. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/image_encoder.py +398 -0
  11. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/mask_decoder.py +178 -0
  12. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/mask_decoder_hq.py +232 -0
  13. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/prompt_encoder.py +214 -0
  14. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/sam.py +177 -0
  15. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/tiny_vit_sam.py +724 -0
  16. sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/transformer.py +240 -0
  17. sam2.1HQ/sam-hq-main/build/lib/segment_anything/predictor.py +276 -0
  18. sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/__init__.py +5 -0
  19. sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/amg.py +346 -0
  20. sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/onnx.py +155 -0
  21. sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/transforms.py +102 -0
  22. sam2.1HQ/sam-hq-main/demo/demo_hqsam.py +147 -0
  23. sam2.1HQ/sam-hq-main/demo/demo_hqsam_light.py +141 -0
  24. sam2.1HQ/sam-hq-main/demo/demo_hqsam_pip_example.py +141 -0
  25. sam2.1HQ/sam-hq-main/demo/demo_sam.py +127 -0
  26. sam2.1HQ/sam-hq-main/demo/input_imgs/dog.jpg +3 -0
  27. sam2.1HQ/sam-hq-main/demo/input_imgs/example0.png +3 -0
  28. sam2.1HQ/sam-hq-main/demo/input_imgs/example1.png +3 -0
  29. sam2.1HQ/sam-hq-main/demo/input_imgs/example2.png +3 -0
  30. sam2.1HQ/sam-hq-main/demo/input_imgs/example3.png +3 -0
  31. sam2.1HQ/sam-hq-main/demo/input_imgs/example4.png +3 -0
  32. sam2.1HQ/sam-hq-main/demo/input_imgs/example5.png +3 -0
  33. sam2.1HQ/sam-hq-main/demo/input_imgs/example6.png +3 -0
  34. sam2.1HQ/sam-hq-main/demo/input_imgs/example7.png +0 -0
  35. sam2.1HQ/sam-hq-main/demo/input_imgs/example8.png +3 -0
  36. sam2.1HQ/sam-hq-main/figs/coco_vis_comp.png +3 -0
  37. sam2.1HQ/sam-hq-main/figs/davis.png +0 -0
  38. sam2.1HQ/sam-hq-main/figs/points_comp.png +3 -0
  39. sam2.1HQ/sam-hq-main/figs/sam-hf-framework.png +3 -0
  40. sam2.1HQ/sam-hq-main/figs/sam_variants_comp.png +3 -0
  41. sam2.1HQ/sam-hq-main/figs/sam_vs_hqsam_backbones.png +3 -0
  42. sam2.1HQ/sam-hq-main/figs/ytvis.png +0 -0
  43. sam2.1HQ/sam-hq-main/sam-hq2/INSTALL.md +189 -0
  44. sam2.1HQ/sam-hq-main/sam-hq2/README.md +252 -0
  45. sam2.1HQ/sam-hq-main/sam-hq2/assets/hq-sam2-results.png +3 -0
  46. sam2.1HQ/sam-hq-main/sam-hq2/checkpoints/download_ckpts.sh +54 -0
  47. sam2.1HQ/sam-hq-main/sam-hq2/demo/demo_hqsam2.py +118 -0
  48. sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example1.png +3 -0
  49. sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example2.png +3 -0
  50. sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example3.png +3 -0
.gitattributes CHANGED
@@ -33,3 +33,38 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sam2.1HQ/sam-hq-main/demo/input_imgs/dog.jpg filter=lfs diff=lfs merge=lfs -text
37
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example0.png filter=lfs diff=lfs merge=lfs -text
38
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example1.png filter=lfs diff=lfs merge=lfs -text
39
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example2.png filter=lfs diff=lfs merge=lfs -text
40
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example3.png filter=lfs diff=lfs merge=lfs -text
41
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example4.png filter=lfs diff=lfs merge=lfs -text
42
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example5.png filter=lfs diff=lfs merge=lfs -text
43
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example6.png filter=lfs diff=lfs merge=lfs -text
44
+ sam2.1HQ/sam-hq-main/demo/input_imgs/example8.png filter=lfs diff=lfs merge=lfs -text
45
+ sam2.1HQ/sam-hq-main/figs/coco_vis_comp.png filter=lfs diff=lfs merge=lfs -text
46
+ sam2.1HQ/sam-hq-main/figs/points_comp.png filter=lfs diff=lfs merge=lfs -text
47
+ sam2.1HQ/sam-hq-main/figs/sam_variants_comp.png filter=lfs diff=lfs merge=lfs -text
48
+ sam2.1HQ/sam-hq-main/figs/sam_vs_hqsam_backbones.png filter=lfs diff=lfs merge=lfs -text
49
+ sam2.1HQ/sam-hq-main/figs/sam-hf-framework.png filter=lfs diff=lfs merge=lfs -text
50
+ sam2.1HQ/sam-hq-main/sam-hq2/assets/hq-sam2-results.png filter=lfs diff=lfs merge=lfs -text
51
+ sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example1.png filter=lfs diff=lfs merge=lfs -text
52
+ sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example2.png filter=lfs diff=lfs merge=lfs -text
53
+ sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example3.png filter=lfs diff=lfs merge=lfs -text
54
+ sam2.1HQ/sam-hq-main/sam-hq2/notebooks/images/cars.jpg filter=lfs diff=lfs merge=lfs -text
55
+ sam2.1HQ/sam-hq-main/sam-hq2/notebooks/images/groceries.jpg filter=lfs diff=lfs merge=lfs -text
56
+ sam2.1HQ/sam-hq-main/sam-hq2/notebooks/images/truck.jpg filter=lfs diff=lfs merge=lfs -text
57
+ sam2.1HQ/sam-hq-main/sam-hq2/notebooks/videos/bedroom.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/arch.png filter=lfs diff=lfs merge=lfs -text
59
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/cats.png filter=lfs diff=lfs merge=lfs -text
60
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/COCO.png filter=lfs diff=lfs merge=lfs -text
61
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/GD_GLIGEN.png filter=lfs diff=lfs merge=lfs -text
62
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/GD_SD.png filter=lfs diff=lfs merge=lfs -text
63
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/hero_figure.png filter=lfs diff=lfs merge=lfs -text
64
+ sam2.1HQ/sam-hq-main/seginw/GroundingDINO/.asset/ODinW.png filter=lfs diff=lfs merge=lfs -text
65
+ sam2.1HQ/sam-hq-main/visual_demo/1.gif filter=lfs diff=lfs merge=lfs -text
66
+ sam2.1HQ/sam-hq-main/visual_demo/2.gif filter=lfs diff=lfs merge=lfs -text
67
+ sam2.1HQ/sam-hq-main/visual_demo/3.gif filter=lfs diff=lfs merge=lfs -text
68
+ sam2.1HQ/sam-hq-main/visual_demo/4.gif filter=lfs diff=lfs merge=lfs -text
69
+ sam2.1HQ/sam-hq-main/visual_demo/5.gif filter=lfs diff=lfs merge=lfs -text
70
+ sam2.1HQ/sam-hq-main/visual_demo/6.gif filter=lfs diff=lfs merge=lfs -text
sam2.1HQ/sam-hq-main/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
sam2.1HQ/sam-hq-main/README.md ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Segment Anything in High Quality
2
+
3
+ <a href="https://colab.research.google.com/drive/1QwAbn5hsdqKOD5niuBzuqQX4eLCbNKFL?usp=sharing"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
4
+ [![Huggingfaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/sam-hq-team/sam-hq)
5
+ [![Open in OpenXLab](https://cdn-static.openxlab.org.cn/app-center/openxlab_app.svg)](https://openxlab.org.cn/apps/detail/keleiwhu/sam-hq)
6
+ [![Downloads](https://static.pepy.tech/badge/segment-anything-hq)](https://pepy.tech/project/segment-anything-hq)
7
+
8
+
9
+ > [**Segment Anything in High Quality**](https://arxiv.org/abs/2306.01567)
10
+ > NeurIPS 2023
11
+ > ETH Zurich & HKUST
12
+
13
+ We propose HQ-SAM to upgrade SAM for high-quality zero-shot segmentation. Refer to our [paper](https://arxiv.org/abs/2306.01567) for more details.
14
+
15
+ ## Latest updates
16
+
17
+ **2025/06** -- :fire::fire: HQ-SAM is supported in the [Huggingface Transformers](https://github.com/huggingface/transformers) library. Please see the detailed usage instruction [here](https://huggingface.co/docs/transformers/main/model_doc/sam_hq). The pretrained model checkpoints can also be downloaded at [here](https://huggingface.co/syscv-community).
18
+
19
+ **2024/11/17 -- HQ-SAM 2 is released**
20
+
21
+ - A new suite of improved model checkpoints (denoted as **HQ-SAM 2**, beta-version) are released. See [Model Description](sam-hq2/README.md) for details. Change working directory by `cd sam-hq2`
22
+
23
+ ![HQ-SAM2 results comparison](sam-hq2/assets/hq-sam2-results.png?raw=true)
24
+
25
+ Updates
26
+ -----------------
27
+ :fire::fire: **SAM for Video Segmentation**: Interested in intersecting SAM and video? HQ-SAM is supported by [DEVA](https://github.com/hkchengrex/Tracking-Anything-with-DEVA) in its text-prompted mode! Also, check the work [MASA](https://github.com/siyuanliii/masa) and [SAM-PT](https://github.com/SysCV/sam-pt) with SAM.
28
+
29
+ :fire::fire: **SAM in 3D**: Interested in intersecting SAM and 3D Gaussian Splatting? See our new work [Gaussian Grouping](https://github.com/lkeab/gaussian-grouping)! Also, if you are interested in intersecting SAM and NeRF, please see work [SANeRF-HQ](https://github.com/lyclyc52/SANeRF-HQ)!
30
+
31
+ More: HQ-SAM is adopted in [Osprey](https://arxiv.org/abs/2312.10032), [CaR](https://torrvision.com/clip_as_rnn/), [SpatialRGPT](https://arxiv.org/abs/2406.01584), [GLaMM](https://arxiv.org/abs/2311.03356), [ENIGMA-51](https://iplab.dmi.unict.it/ENIGMA-51/) to provide fine-grained mask annotations.
32
+
33
+
34
+ Platform integration: HQ-SAM is supported in the [OpenMMLab PlayGround](https://github.com/open-mmlab/playground/blob/main/label_anything/readme.md) for annotation with Label-Studio, in [segment-geospatial](https://github.com/opengeos/segment-geospatial) for segmenting geospatial data, and mask annotation tool [ISAT](https://github.com/yatengLG/ISAT_with_segment_anything), and [Supervisely](https://supervisely.com/blog/segment-anything-in-high-quality-HQ-SAM/)!
35
+
36
+ 2023/08/11: Support [python package](#quick-installation-via-pip) for easier **pip installation**. Light HQ-SAM is in [EfficientSAM series](https://github.com/IDEA-Research/Grounded-Segment-Anything/tree/main/EfficientSAM) combining with [Grounded SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything/)!
37
+
38
+ <!-- 2023/07/21: HQ-SAM is also in OpenXLab apps, thanks their support! -->
39
+
40
+ :rocket::rocket: 2023/07/17: We released **Light HQ-SAM** using TinyViT as backbone, for both fast and high-quality zero-shot segmentation, which reaches **41.2 FPS**. Refer to [Light HQ-SAM vs. MobileSAM](#light-hq-sam-vs-mobilesam-on-coco) for more details.
41
+
42
+ :trophy::1st_place_medal: 2023/07/14: Grounded **HQ-SAM** obtains the **first place**:1st_place_medal: in the [Segmentation in the Wild](https://eval.ai/web/challenges/challenge-page/1931/leaderboard/4567) competition on zero-shot track (hosted in [CVPR 2023 workshop](https://computer-vision-in-the-wild.github.io/cvpr-2023/)), outperforming Grounded SAM. Refer to our [SGinW evaluation](#grounded-hq-sam-vs-grounded-sam-on-seginw) for more details.
43
+
44
+ 2023/07/05: We released [SAM tuning instuctions](#hq-sam-tuning-and-hq-seg44k-data) and [HQSeg-44K data](#hq-sam-tuning-and-hq-seg44k-data).
45
+
46
+ 2023/07/04: HQ-SAM is adopted in [SAM-PT](https://github.com/SysCV/sam-pt) to improve the SAM-based zero-shot video segmentation performance. Also, HQ-SAM is used in [Grounded-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything), [Inpaint Anything](https://github.com/Uminosachi/sd-webui-inpaint-anything) and [HQTrack](https://github.com/jiawen-zhu/HQTrack) (2nd in VOTS 2023).
47
+
48
+ 2023/06/28: We released the [ONNX export script](#onnx-export) and [colab notebook](https://colab.research.google.com/drive/11U2La49c2IxahzJkAV-EzPqEH3cz_5hq?usp=sharing) for exporting and using ONNX model.
49
+
50
+ 2023/06/23: Play with HQ-SAM demo at [![Huggingfaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/sam-hq-team/sam-hq), which supports point, box and text prompts.
51
+
52
+ 2023/06/14: We released the [colab demo](https://colab.research.google.com/drive/1QwAbn5hsdqKOD5niuBzuqQX4eLCbNKFL?usp=sharing) <a href="https://colab.research.google.com/drive/1QwAbn5hsdqKOD5niuBzuqQX4eLCbNKFL?usp=sharing"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> and [automatic mask generator notebook](https://colab.research.google.com/drive/1dhRq4eR6Fbl-yl1vbQvU9hqyyeOidQaU?usp=sharing).
53
+
54
+ 2023/06/13: We released the [model checkpoints](#model-checkpoints) and [demo visualization codes](#getting-started).
55
+
56
+ Visual comparison between SAM and HQ-SAM
57
+ -----------------
58
+ **SAM vs. HQ-SAM**
59
+ <table>
60
+ <tr>
61
+ <td><img src="visual_demo/1.gif" width="250"></td>
62
+ <td><img src="visual_demo/2.gif" width="250"></td>
63
+ <td><img src="visual_demo/3.gif" width="250"></td>
64
+ </tr>
65
+ <tr>
66
+ <td><img src="visual_demo/4.gif" width="250"></td>
67
+ <td><img src="visual_demo/5.gif" width="250"></td>
68
+ <td><img src="visual_demo/6.gif" width="250"></td>
69
+ </tr>
70
+ </table>
71
+
72
+ <img width="900" alt="image" src='figs/coco_vis_comp.png'>
73
+
74
+ Introduction
75
+ -----------------
76
+ The recent Segment Anything Model (SAM) represents a big leap in scaling up segmentation models, allowing for powerful zero-shot capabilities and flexible prompting. Despite being trained with 1.1 billion masks, SAM's mask prediction quality falls short in many cases, particularly when dealing with objects that have intricate structures. We propose HQ-SAM, equipping SAM with the ability to accurately segment any object, while maintaining SAM's original promptable design, efficiency, and zero-shot generalizability. Our careful design reuses and preserves the pre-trained model weights of SAM, while only introducing minimal additional parameters and computation. We design a learnable High-Quality Output Token, which is injected into SAM's mask decoder and is responsible for predicting the high-quality mask. Instead of only applying it on mask-decoder features, we first fuse them with early and final ViT features for improved mask details. To train our introduced learnable parameters, we compose a dataset of 44K fine-grained masks from several sources. HQ-SAM is only trained on the introduced detaset of 44k masks, which takes only 4 hours on 8 GPUs. We show the efficacy of HQ-SAM in a suite of 9 diverse segmentation datasets across different downstream tasks, where 7 out of them are evaluated in a zero-shot transfer protocol.
77
+
78
+ <img width="1096" alt="image" src='figs/sam-hf-framework.png'>
79
+
80
+ Quantitative comparison between SAM and HQ-SAM
81
+ -----------------
82
+ Note: For box-prompting-based evaluation, we feed SAM, MobileSAM and our HQ-SAM with the same image/video bounding boxes and adopt the single mask output mode of SAM.
83
+
84
+ We provide comprehensive performance, model size and speed comparison on SAM variants:
85
+ <img width="1096" alt="image" src='figs/sam_variants_comp.png'>
86
+
87
+ ### Various ViT backbones on COCO:
88
+ ![backbones](figs/sam_vs_hqsam_backbones.png)
89
+ Note: For the COCO dataset, we use a SOTA detector FocalNet-DINO trained on the COCO dataset as our box prompt generator.
90
+
91
+ ### YTVIS and HQ-YTVIS
92
+ Note:Using ViT-L backbone. We adopt the SOTA detector Mask2Former trained on the YouTubeVIS 2019 dataset as our video boxes prompt generator while reusing its object association prediction.
93
+ ![ytvis](figs/ytvis.png)
94
+
95
+ ### DAVIS
96
+ Note: Using ViT-L backbone. We adopt the SOTA model XMem as our video boxes prompt generator while reusing its object association prediction.
97
+ ![davis](figs/davis.png)
98
+
99
+ ### **Quick Installation via pip**
100
+ ```
101
+ pip install segment-anything-hq
102
+ python
103
+ from segment_anything_hq import sam_model_registry
104
+ model_type = "<model_type>" #"vit_l/vit_b/vit_h/vit_tiny"
105
+ sam_checkpoint = "<path/to/checkpoint>"
106
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
107
+ ```
108
+
109
+ see specific usage example (such as vit-l) by running belowing command:
110
+ ```
111
+ export PYTHONPATH=$(pwd)
112
+ python demo/demo_hqsam_pip_example.py
113
+ ```
114
+
115
+
116
+ ### **Standard Installation**
117
+ The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended.
118
+
119
+ Clone the repository locally and install with
120
+
121
+ ```
122
+ git clone https://github.com/SysCV/sam-hq.git
123
+ cd sam-hq; pip install -e .
124
+ ```
125
+
126
+ The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks.
127
+
128
+ ```
129
+ pip install opencv-python pycocotools matplotlib onnxruntime onnx timm
130
+ ```
131
+
132
+ ### Example conda environment setup
133
+ ```bash
134
+ conda create --name sam_hq python=3.8 -y
135
+ conda activate sam_hq
136
+ conda install pytorch==1.10.0 torchvision==0.11.0 cudatoolkit=11.1 -c pytorch -c nvidia
137
+ pip install opencv-python pycocotools matplotlib onnxruntime onnx timm
138
+
139
+ # under your working directory
140
+ git clone https://github.com/SysCV/sam-hq.git
141
+ cd sam-hq
142
+ pip install -e .
143
+ export PYTHONPATH=$(pwd)
144
+ ```
145
+
146
+ ### **Model Checkpoints**
147
+
148
+ Three HQ-SAM model versions of the model are available with different backbone sizes. These models can be instantiated by running
149
+
150
+ ```
151
+ from segment_anything import sam_model_registry
152
+ sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
153
+ ```
154
+
155
+ Download the provided trained model below and put them into the pretrained_checkpoint folder:
156
+ ```
157
+ mkdir pretrained_checkpoint
158
+ ```
159
+
160
+ Click the links below to download the checkpoint for the corresponding model type. We also provide **alternative model downloading links** [here](https://github.com/SysCV/sam-hq/issues/5) or at [hugging face](https://huggingface.co/lkeab/hq-sam/tree/main).
161
+ - `vit_b`: [ViT-B HQ-SAM model.](https://drive.google.com/file/d/11yExZLOve38kRZPfRx_MRxfIAKmfMY47/view?usp=sharing)
162
+ - `vit_l`: [ViT-L HQ-SAM model.](https://drive.google.com/file/d/1Uk17tDKX1YAKas5knI4y9ZJCo0lRVL0G/view?usp=sharing)
163
+ - `vit_h`: [ViT-H HQ-SAM model.](https://drive.google.com/file/d/1qobFYrI4eyIANfBSmYcGuWRaSIXfMOQ8/view?usp=sharing)
164
+ - `vit_tiny` (**Light HQ-SAM** for real-time need): [ViT-Tiny HQ-SAM model.](https://huggingface.co/lkeab/hq-sam/resolve/main/sam_hq_vit_tiny.pth)
165
+
166
+ ### **Getting Started**
167
+
168
+ First download a [model checkpoint](#model-checkpoints). Then the model can be used in just a few lines to get masks from a given prompt:
169
+
170
+ ```
171
+ from segment_anything import SamPredictor, sam_model_registry
172
+ sam = sam_model_registry["<model_type>"](checkpoint="<path/to/checkpoint>")
173
+ predictor = SamPredictor(sam)
174
+ predictor.set_image(<your_image>)
175
+ masks, _, _ = predictor.predict(<input_prompts>)
176
+ ```
177
+
178
+ Additionally, see the usage examples in our [demo](/demo/demo_hqsam.py) , [colab notebook](https://colab.research.google.com/drive/1QwAbn5hsdqKOD5niuBzuqQX4eLCbNKFL?usp=sharing) and [automatic mask generator notebook](https://colab.research.google.com/drive/1dhRq4eR6Fbl-yl1vbQvU9hqyyeOidQaU?usp=sharing).
179
+
180
+ To obtain HQ-SAM's visual result:
181
+ ```
182
+ python demo/demo_hqsam.py
183
+ ```
184
+
185
+ To obtain baseline SAM's visual result. Note that you need to download original SAM checkpoint from [baseline-SAM-L model](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth) and put it into the pretrained_checkpoint folder.
186
+ ```
187
+ python demo/demo_sam.py
188
+ ```
189
+
190
+ To obtain Light HQ-SAM's visual result:
191
+ ```
192
+ python demo/demo_hqsam_light.py
193
+ ```
194
+
195
+ ### **HQ-SAM Tuning and HQ-Seg44k Data**
196
+ We provide detailed training, evaluation, visualization and data downloading instructions in [HQ-SAM training](train/README.md). You can also replace our training data to obtain your own SAM in specific application domain (like medical, OCR and remote sensing).
197
+
198
+ Please change the current folder path to:
199
+ ```
200
+ cd train
201
+ ```
202
+ and then refer to detailed [readme instruction](train/README.md).
203
+
204
+ ### **Grounded HQ-SAM vs Grounded SAM on [SegInW](https://eval.ai/web/challenges/challenge-page/1931/overview?ref=blog.roboflow.com)**
205
+
206
+ Grounded HQ-SAM wins the **first place**:1st_place_medal: on SegInW benchmark (consist of 25 public zero-shot in the wild segmentation datasets), and outpuerforming Grounded SAM using the same grounding-dino detector.
207
+
208
+ <table><tbody>
209
+ <!-- START TABLE -->
210
+ <!-- TABLE HEADER -->
211
+ <th valign="bottom">Model Name</th>
212
+ <th valign="bottom">Encoder</th>
213
+ <th valign="bottom">GroundingDINO</th>
214
+ <th valign="bottom">Mean AP</th>
215
+ <th valign="bottom">Evaluation Script</th>
216
+ <th valign="bottom">Log</th>
217
+ <th valign="bottom">Output Json</th>
218
+ <!-- TABLE BODY -->
219
+ <!-- ROW: maskformer2_R50_bs16_50ep -->
220
+ <tr><td align="left">Grounded SAM</td>
221
+ <td align="center">vit-h</td>
222
+ <td align="center">swin-b</td>
223
+ <td align="center">48.7</td>
224
+ <td align="center"><a href="seginw/test_seginw.sh">script</a></td>
225
+ <td align="center"><a href="seginw/logs/grounded_sam.log">log</a></td>
226
+ <td align="center"><a href="https://huggingface.co/sam-hq-team/SegInW/resolve/main/result/grounded_sam.zip">result</a></td>
227
+ </tr>
228
+ <!-- ROW: maskformer2_R101_bs16_50ep -->
229
+ <tr><td align="left">Grounded HQ-SAM</td>
230
+ <td align="center">vit-h</td>
231
+ <td align="center">swin-b</td>
232
+ <td align="center"><b>49.6</b></td>
233
+ <td align="center"><a href="seginw/test_seginw_hq.sh">script</a></td>
234
+ <td align="center"><a href="seginw/logs/grounded_hqsam.log">log</a></td>
235
+ <td align="center"><a href="https://huggingface.co/sam-hq-team/SegInW/resolve/main/result/grounded_hqsam.zip">result</a></td>
236
+ </tr>
237
+ </tbody></table>
238
+
239
+ Please change the current folder path to:
240
+ ```
241
+ cd seginw
242
+ ```
243
+ We provide detailed evaluation instructions and metrics on SegInW in [Grounded-HQ-SAM evaluation](seginw/README.md).
244
+
245
+ ### **Light HQ-SAM vs MobileSAM on COCO**
246
+ We propose [Light HQ-SAM](#model-checkpoints) based on the tiny vit image encoder provided by MobileSAM. We provide quantitative comparison on zero-shot COCO performance, speed and memory below. Try Light HQ-SAM at [here](#getting-started).
247
+
248
+ <table><tbody>
249
+ <!-- START TABLE -->
250
+ <!-- TABLE HEADER -->
251
+ <th valign="bottom">Model</th>
252
+ <th valign="bottom">Encoder</th>
253
+ <th valign="bottom">AP</th>
254
+ <th valign="bottom">AP@L</th>
255
+ <th valign="bottom">AP@M</th>
256
+ <th valign="bottom">AP@S</th>
257
+ <th valign="bottom">Model Params (MB)</th>
258
+ <th valign="bottom">FPS</th>
259
+ <th valign="bottom">Memory (GB)</th>
260
+ <!-- TABLE BODY -->
261
+ <!-- ROW: maskformer2_R50_bs16_50ep -->
262
+ <tr><td align="left">MobileSAM</td>
263
+ <td align="center">TinyViT</td>
264
+ <td align="center">44.3</td>
265
+ <td align="center">61.8</td>
266
+ <td align="center">48.1</td>
267
+ <td align="center">28.8</td>
268
+ <td align="center">38.6</td>
269
+ <td align="center">44.8</td>
270
+ <td align="center">3.7</td>
271
+ </tr>
272
+ <!-- ROW: maskformer2_R101_bs16_50ep -->
273
+ <tr><td align="left"><b>Light HQ-SAM</b></td>
274
+ <td align="center">TinyViT</td>
275
+ <td align="center"><b>45.0</b></td>
276
+ <td align="center">62.8</td>
277
+ <td align="center">48.8</td>
278
+ <td align="center">29.2</td>
279
+ <td align="center">40.3</td>
280
+ <td align="center">41.2</td>
281
+ <td align="center">3.7</td>
282
+ </tr>
283
+ </tbody></table>
284
+
285
+ Note: For the COCO dataset, we use the same SOTA detector FocalNet-DINO trained on the COCO dataset as our and Mobile sam's box prompt generator.
286
+
287
+
288
+ ### **ONNX export**
289
+ HQ-SAM's lightweight mask decoder can be exported to ONNX format so that it can be run in any environment that supports ONNX runtime. Export the model with
290
+ ```
291
+ python scripts/export_onnx_model.py --checkpoint <path/to/checkpoint> --model-type <model_type> --output <path/to/output>
292
+ ```
293
+ See the [example notebook](https://colab.research.google.com/drive/11U2La49c2IxahzJkAV-EzPqEH3cz_5hq?usp=sharing) for details on how to combine image preprocessing via HQ-SAM's backbone with mask prediction using the ONNX model. It is recommended to use the latest stable version of PyTorch for ONNX export.
294
+
295
+
296
+ Citation
297
+ ---------------
298
+ If you find HQ-SAM useful in your research or refer to the provided baseline results, please star :star: this repository and consider citing :pencil::
299
+ ```
300
+ @inproceedings{sam_hq,
301
+ title={Segment Anything in High Quality},
302
+ author={Ke, Lei and Ye, Mingqiao and Danelljan, Martin and Liu, Yifan and Tai, Yu-Wing and Tang, Chi-Keung and Yu, Fisher},
303
+ booktitle={NeurIPS},
304
+ year={2023}
305
+ }
306
+ ```
307
+ Related high-quality instance segmentation work:
308
+ ```
309
+ @inproceedings{transfiner,
310
+ title={Mask Transfiner for High-Quality Instance Segmentation},
311
+ author={Ke, Lei and Danelljan, Martin and Li, Xia and Tai, Yu-Wing and Tang, Chi-Keung and Yu, Fisher},
312
+ booktitle={CVPR},
313
+ year={2022}
314
+ }
315
+ ```
316
+
317
+ ## Acknowledgments
318
+ - Thanks [SAM](https://github.com/facebookresearch/segment-anything), [Grounded SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything) and [MobileSAM](https://github.com/ChaoningZhang/MobileSAM) for their public code and released models.
sam2.1HQ/sam-hq-main/build/lib/segment_anything/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from .build_sam import (
8
+ build_sam,
9
+ build_sam_vit_h,
10
+ build_sam_vit_l,
11
+ build_sam_vit_b,
12
+ sam_model_registry,
13
+ )
14
+ from .build_sam_baseline import sam_model_registry_baseline
15
+ from .predictor import SamPredictor
16
+ from .automatic_mask_generator import SamAutomaticMaskGenerator
sam2.1HQ/sam-hq-main/build/lib/segment_anything/automatic_mask_generator.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
10
+
11
+ from typing import Any, Dict, List, Optional, Tuple
12
+
13
+ from .modeling import Sam
14
+ from .predictor import SamPredictor
15
+ from .utils.amg import (
16
+ MaskData,
17
+ area_from_rle,
18
+ batch_iterator,
19
+ batched_mask_to_box,
20
+ box_xyxy_to_xywh,
21
+ build_all_layer_point_grids,
22
+ calculate_stability_score,
23
+ coco_encode_rle,
24
+ generate_crop_boxes,
25
+ is_box_near_crop_edge,
26
+ mask_to_rle_pytorch,
27
+ remove_small_regions,
28
+ rle_to_mask,
29
+ uncrop_boxes_xyxy,
30
+ uncrop_masks,
31
+ uncrop_points,
32
+ )
33
+
34
+
35
+ class SamAutomaticMaskGenerator:
36
+ def __init__(
37
+ self,
38
+ model: Sam,
39
+ points_per_side: Optional[int] = 32,
40
+ points_per_batch: int = 64,
41
+ pred_iou_thresh: float = 0.88,
42
+ stability_score_thresh: float = 0.95,
43
+ stability_score_offset: float = 1.0,
44
+ box_nms_thresh: float = 0.7,
45
+ crop_n_layers: int = 0,
46
+ crop_nms_thresh: float = 0.7,
47
+ crop_overlap_ratio: float = 512 / 1500,
48
+ crop_n_points_downscale_factor: int = 1,
49
+ point_grids: Optional[List[np.ndarray]] = None,
50
+ min_mask_region_area: int = 0,
51
+ output_mode: str = "binary_mask",
52
+ ) -> None:
53
+ """
54
+ Using a SAM model, generates masks for the entire image.
55
+ Generates a grid of point prompts over the image, then filters
56
+ low quality and duplicate masks. The default settings are chosen
57
+ for SAM with a ViT-H backbone.
58
+
59
+ Arguments:
60
+ model (Sam): The SAM model to use for mask prediction.
61
+ points_per_side (int or None): The number of points to be sampled
62
+ along one side of the image. The total number of points is
63
+ points_per_side**2. If None, 'point_grids' must provide explicit
64
+ point sampling.
65
+ points_per_batch (int): Sets the number of points run simultaneously
66
+ by the model. Higher numbers may be faster but use more GPU memory.
67
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
68
+ model's predicted mask quality.
69
+ stability_score_thresh (float): A filtering threshold in [0,1], using
70
+ the stability of the mask under changes to the cutoff used to binarize
71
+ the model's mask predictions.
72
+ stability_score_offset (float): The amount to shift the cutoff when
73
+ calculated the stability score.
74
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
75
+ suppression to filter duplicate masks.
76
+ crop_n_layers (int): If >0, mask prediction will be run again on
77
+ crops of the image. Sets the number of layers to run, where each
78
+ layer has 2**i_layer number of image crops.
79
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
80
+ suppression to filter duplicate masks between different crops.
81
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
82
+ In the first crop layer, crops will overlap by this fraction of
83
+ the image length. Later layers with more crops scale down this overlap.
84
+ crop_n_points_downscale_factor (int): The number of points-per-side
85
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
86
+ point_grids (list(np.ndarray) or None): A list over explicit grids
87
+ of points used for sampling, normalized to [0,1]. The nth grid in the
88
+ list is used in the nth crop layer. Exclusive with points_per_side.
89
+ min_mask_region_area (int): If >0, postprocessing will be applied
90
+ to remove disconnected regions and holes in masks with area smaller
91
+ than min_mask_region_area. Requires opencv.
92
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
93
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
94
+ For large resolutions, 'binary_mask' may consume large amounts of
95
+ memory.
96
+ """
97
+
98
+ assert (points_per_side is None) != (
99
+ point_grids is None
100
+ ), "Exactly one of points_per_side or point_grid must be provided."
101
+ if points_per_side is not None:
102
+ self.point_grids = build_all_layer_point_grids(
103
+ points_per_side,
104
+ crop_n_layers,
105
+ crop_n_points_downscale_factor,
106
+ )
107
+ elif point_grids is not None:
108
+ self.point_grids = point_grids
109
+ else:
110
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
111
+
112
+ assert output_mode in [
113
+ "binary_mask",
114
+ "uncompressed_rle",
115
+ "coco_rle",
116
+ ], f"Unknown output_mode {output_mode}."
117
+ if output_mode == "coco_rle":
118
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
119
+
120
+ if min_mask_region_area > 0:
121
+ import cv2 # type: ignore # noqa: F401
122
+
123
+ self.predictor = SamPredictor(model)
124
+ self.points_per_batch = points_per_batch
125
+ self.pred_iou_thresh = pred_iou_thresh
126
+ self.stability_score_thresh = stability_score_thresh
127
+ self.stability_score_offset = stability_score_offset
128
+ self.box_nms_thresh = box_nms_thresh
129
+ self.crop_n_layers = crop_n_layers
130
+ self.crop_nms_thresh = crop_nms_thresh
131
+ self.crop_overlap_ratio = crop_overlap_ratio
132
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
133
+ self.min_mask_region_area = min_mask_region_area
134
+ self.output_mode = output_mode
135
+
136
+ @torch.no_grad()
137
+ def generate(self, image: np.ndarray, multimask_output: bool = True) -> List[Dict[str, Any]]:
138
+ """
139
+ Generates masks for the given image.
140
+
141
+ Arguments:
142
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
143
+
144
+ Returns:
145
+ list(dict(str, any)): A list over records for masks. Each record is
146
+ a dict containing the following keys:
147
+ segmentation (dict(str, any) or np.ndarray): The mask. If
148
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
149
+ is a dictionary containing the RLE.
150
+ bbox (list(float)): The box around the mask, in XYWH format.
151
+ area (int): The area in pixels of the mask.
152
+ predicted_iou (float): The model's own prediction of the mask's
153
+ quality. This is filtered by the pred_iou_thresh parameter.
154
+ point_coords (list(list(float))): The point coordinates input
155
+ to the model to generate this mask.
156
+ stability_score (float): A measure of the mask's quality. This
157
+ is filtered on using the stability_score_thresh parameter.
158
+ crop_box (list(float)): The crop of the image used to generate
159
+ the mask, given in XYWH format.
160
+ """
161
+
162
+ # Generate masks
163
+ mask_data = self._generate_masks(image, multimask_output)
164
+
165
+ # Filter small disconnected regions and holes in masks
166
+ if self.min_mask_region_area > 0:
167
+ mask_data = self.postprocess_small_regions(
168
+ mask_data,
169
+ self.min_mask_region_area,
170
+ max(self.box_nms_thresh, self.crop_nms_thresh),
171
+ )
172
+
173
+ # Encode masks
174
+ if self.output_mode == "coco_rle":
175
+ mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
176
+ elif self.output_mode == "binary_mask":
177
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
178
+ else:
179
+ mask_data["segmentations"] = mask_data["rles"]
180
+
181
+ # Write mask records
182
+ curr_anns = []
183
+ for idx in range(len(mask_data["segmentations"])):
184
+ ann = {
185
+ "segmentation": mask_data["segmentations"][idx],
186
+ "area": area_from_rle(mask_data["rles"][idx]),
187
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
188
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
189
+ "point_coords": [mask_data["points"][idx].tolist()],
190
+ "stability_score": mask_data["stability_score"][idx].item(),
191
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
192
+ }
193
+ curr_anns.append(ann)
194
+
195
+ return curr_anns
196
+
197
+ def _generate_masks(self, image: np.ndarray, multimask_output: bool = True) -> MaskData:
198
+ orig_size = image.shape[:2]
199
+ crop_boxes, layer_idxs = generate_crop_boxes(
200
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
201
+ )
202
+
203
+ # Iterate over image crops
204
+ data = MaskData()
205
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
206
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size, multimask_output)
207
+ data.cat(crop_data)
208
+
209
+ # Remove duplicate masks between crops
210
+ if len(crop_boxes) > 1:
211
+ # Prefer masks from smaller crops
212
+ scores = 1 / box_area(data["crop_boxes"])
213
+ scores = scores.to(data["boxes"].device)
214
+ keep_by_nms = batched_nms(
215
+ data["boxes"].float(),
216
+ scores,
217
+ torch.zeros_like(data["boxes"][:, 0]), # categories
218
+ iou_threshold=self.crop_nms_thresh,
219
+ )
220
+ data.filter(keep_by_nms)
221
+
222
+ data.to_numpy()
223
+ return data
224
+
225
+ def _process_crop(
226
+ self,
227
+ image: np.ndarray,
228
+ crop_box: List[int],
229
+ crop_layer_idx: int,
230
+ orig_size: Tuple[int, ...],
231
+ multimask_output: bool = True,
232
+ ) -> MaskData:
233
+ # Crop the image and calculate embeddings
234
+ x0, y0, x1, y1 = crop_box
235
+ cropped_im = image[y0:y1, x0:x1, :]
236
+ cropped_im_size = cropped_im.shape[:2]
237
+ self.predictor.set_image(cropped_im)
238
+
239
+ # Get points for this crop
240
+ points_scale = np.array(cropped_im_size)[None, ::-1]
241
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
242
+
243
+ # Generate masks for this crop in batches
244
+ data = MaskData()
245
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
246
+ batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, multimask_output)
247
+ data.cat(batch_data)
248
+ del batch_data
249
+ self.predictor.reset_image()
250
+
251
+ # Remove duplicates within this crop.
252
+ keep_by_nms = batched_nms(
253
+ data["boxes"].float(),
254
+ data["iou_preds"],
255
+ torch.zeros_like(data["boxes"][:, 0]), # categories
256
+ iou_threshold=self.box_nms_thresh,
257
+ )
258
+ data.filter(keep_by_nms)
259
+
260
+ # Return to the original image frame
261
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
262
+ data["points"] = uncrop_points(data["points"], crop_box)
263
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
264
+
265
+ return data
266
+
267
+ def _process_batch(
268
+ self,
269
+ points: np.ndarray,
270
+ im_size: Tuple[int, ...],
271
+ crop_box: List[int],
272
+ orig_size: Tuple[int, ...],
273
+ multimask_output: bool = True,
274
+ ) -> MaskData:
275
+ orig_h, orig_w = orig_size
276
+
277
+ # Run model on this batch
278
+ transformed_points = self.predictor.transform.apply_coords(points, im_size)
279
+ in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
280
+ in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
281
+ masks, iou_preds, _ = self.predictor.predict_torch(
282
+ in_points[:, None, :],
283
+ in_labels[:, None],
284
+ multimask_output=multimask_output,
285
+ return_logits=True,
286
+ )
287
+
288
+ # Serialize predictions and store in MaskData
289
+ data = MaskData(
290
+ masks=masks.flatten(0, 1),
291
+ iou_preds=iou_preds.flatten(0, 1),
292
+ points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
293
+ )
294
+ del masks
295
+
296
+ # Filter by predicted IoU
297
+ if self.pred_iou_thresh > 0.0:
298
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
299
+ data.filter(keep_mask)
300
+
301
+ # Calculate stability score
302
+ data["stability_score"] = calculate_stability_score(
303
+ data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
304
+ )
305
+ if self.stability_score_thresh > 0.0:
306
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
307
+ data.filter(keep_mask)
308
+
309
+ # Threshold masks and calculate boxes
310
+ data["masks"] = data["masks"] > self.predictor.model.mask_threshold
311
+ data["boxes"] = batched_mask_to_box(data["masks"])
312
+
313
+ # Filter boxes that touch crop boundaries
314
+ keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
315
+ if not torch.all(keep_mask):
316
+ data.filter(keep_mask)
317
+
318
+ # Compress to RLE
319
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
320
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
321
+ del data["masks"]
322
+
323
+ return data
324
+
325
+ @staticmethod
326
+ def postprocess_small_regions(
327
+ mask_data: MaskData, min_area: int, nms_thresh: float
328
+ ) -> MaskData:
329
+ """
330
+ Removes small disconnected regions and holes in masks, then reruns
331
+ box NMS to remove any new duplicates.
332
+
333
+ Edits mask_data in place.
334
+
335
+ Requires open-cv as a dependency.
336
+ """
337
+ if len(mask_data["rles"]) == 0:
338
+ return mask_data
339
+
340
+ # Filter small disconnected regions and holes
341
+ new_masks = []
342
+ scores = []
343
+ for rle in mask_data["rles"]:
344
+ mask = rle_to_mask(rle)
345
+
346
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
347
+ unchanged = not changed
348
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
349
+ unchanged = unchanged and not changed
350
+
351
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
352
+ # Give score=0 to changed masks and score=1 to unchanged masks
353
+ # so NMS will prefer ones that didn't need postprocessing
354
+ scores.append(float(unchanged))
355
+
356
+ # Recalculate boxes and remove any new duplicates
357
+ masks = torch.cat(new_masks, dim=0)
358
+ boxes = batched_mask_to_box(masks)
359
+ keep_by_nms = batched_nms(
360
+ boxes.float(),
361
+ torch.as_tensor(scores),
362
+ torch.zeros_like(boxes[:, 0]), # categories
363
+ iou_threshold=nms_thresh,
364
+ )
365
+
366
+ # Only recalculate RLEs for masks that have changed
367
+ for i_mask in keep_by_nms:
368
+ if scores[i_mask] == 0.0:
369
+ mask_torch = masks[i_mask].unsqueeze(0)
370
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
371
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
372
+ mask_data.filter(keep_by_nms)
373
+
374
+ return mask_data
sam2.1HQ/sam-hq-main/build/lib/segment_anything/build_sam.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+
9
+ from functools import partial
10
+
11
+ from .modeling import ImageEncoderViT, MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer, TinyViT
12
+
13
+
14
+ def build_sam_vit_h(checkpoint=None):
15
+ return _build_sam(
16
+ encoder_embed_dim=1280,
17
+ encoder_depth=32,
18
+ encoder_num_heads=16,
19
+ encoder_global_attn_indexes=[7, 15, 23, 31],
20
+ checkpoint=checkpoint,
21
+ )
22
+
23
+
24
+ build_sam = build_sam_vit_h
25
+
26
+
27
+ def build_sam_vit_l(checkpoint=None):
28
+ return _build_sam(
29
+ encoder_embed_dim=1024,
30
+ encoder_depth=24,
31
+ encoder_num_heads=16,
32
+ encoder_global_attn_indexes=[5, 11, 17, 23],
33
+ checkpoint=checkpoint,
34
+ )
35
+
36
+
37
+ def build_sam_vit_b(checkpoint=None):
38
+ return _build_sam(
39
+ encoder_embed_dim=768,
40
+ encoder_depth=12,
41
+ encoder_num_heads=12,
42
+ encoder_global_attn_indexes=[2, 5, 8, 11],
43
+ checkpoint=checkpoint,
44
+ )
45
+
46
+
47
+ def build_sam_vit_t(checkpoint=None):
48
+ prompt_embed_dim = 256
49
+ image_size = 1024
50
+ vit_patch_size = 16
51
+ image_embedding_size = image_size // vit_patch_size
52
+ mobile_sam = Sam(
53
+ image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000,
54
+ embed_dims=[64, 128, 160, 320],
55
+ depths=[2, 2, 6, 2],
56
+ num_heads=[2, 4, 5, 10],
57
+ window_sizes=[7, 7, 14, 7],
58
+ mlp_ratio=4.,
59
+ drop_rate=0.,
60
+ drop_path_rate=0.0,
61
+ use_checkpoint=False,
62
+ mbconv_expand_ratio=4.0,
63
+ local_conv_size=3,
64
+ layer_lr_decay=0.8
65
+ ),
66
+ prompt_encoder=PromptEncoder(
67
+ embed_dim=prompt_embed_dim,
68
+ image_embedding_size=(image_embedding_size, image_embedding_size),
69
+ input_image_size=(image_size, image_size),
70
+ mask_in_chans=16,
71
+ ),
72
+ mask_decoder=MaskDecoderHQ(
73
+ num_multimask_outputs=3,
74
+ transformer=TwoWayTransformer(
75
+ depth=2,
76
+ embedding_dim=prompt_embed_dim,
77
+ mlp_dim=2048,
78
+ num_heads=8,
79
+ ),
80
+ transformer_dim=prompt_embed_dim,
81
+ iou_head_depth=3,
82
+ iou_head_hidden_dim=256,
83
+ vit_dim=160,
84
+ ),
85
+ pixel_mean=[123.675, 116.28, 103.53],
86
+ pixel_std=[58.395, 57.12, 57.375],
87
+ )
88
+
89
+ mobile_sam.eval()
90
+ if checkpoint is not None:
91
+ with open(checkpoint, "rb") as f:
92
+ device = "cuda" if torch.cuda.is_available() else "cpu"
93
+ state_dict = torch.load(f, map_location=device)
94
+ info = mobile_sam.load_state_dict(state_dict, strict=False)
95
+ print(info)
96
+ for n, p in mobile_sam.named_parameters():
97
+ if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n:
98
+ p.requires_grad = False
99
+ return mobile_sam
100
+
101
+ sam_model_registry = {
102
+ "default": build_sam_vit_h,
103
+ "vit_h": build_sam_vit_h,
104
+ "vit_l": build_sam_vit_l,
105
+ "vit_b": build_sam_vit_b,
106
+ "vit_tiny": build_sam_vit_t
107
+ }
108
+
109
+
110
+ def _build_sam(
111
+ encoder_embed_dim,
112
+ encoder_depth,
113
+ encoder_num_heads,
114
+ encoder_global_attn_indexes,
115
+ checkpoint=None,
116
+ ):
117
+ prompt_embed_dim = 256
118
+ image_size = 1024
119
+ vit_patch_size = 16
120
+ image_embedding_size = image_size // vit_patch_size
121
+ sam = Sam(
122
+ image_encoder=ImageEncoderViT(
123
+ depth=encoder_depth,
124
+ embed_dim=encoder_embed_dim,
125
+ img_size=image_size,
126
+ mlp_ratio=4,
127
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
128
+ num_heads=encoder_num_heads,
129
+ patch_size=vit_patch_size,
130
+ qkv_bias=True,
131
+ use_rel_pos=True,
132
+ global_attn_indexes=encoder_global_attn_indexes,
133
+ window_size=14,
134
+ out_chans=prompt_embed_dim,
135
+ ),
136
+ prompt_encoder=PromptEncoder(
137
+ embed_dim=prompt_embed_dim,
138
+ image_embedding_size=(image_embedding_size, image_embedding_size),
139
+ input_image_size=(image_size, image_size),
140
+ mask_in_chans=16,
141
+ ),
142
+ mask_decoder=MaskDecoderHQ(
143
+ num_multimask_outputs=3,
144
+ transformer=TwoWayTransformer(
145
+ depth=2,
146
+ embedding_dim=prompt_embed_dim,
147
+ mlp_dim=2048,
148
+ num_heads=8,
149
+ ),
150
+ transformer_dim=prompt_embed_dim,
151
+ iou_head_depth=3,
152
+ iou_head_hidden_dim=256,
153
+ vit_dim=encoder_embed_dim,
154
+ ),
155
+ pixel_mean=[123.675, 116.28, 103.53],
156
+ pixel_std=[58.395, 57.12, 57.375],
157
+ )
158
+ sam.eval()
159
+ if checkpoint is not None:
160
+ with open(checkpoint, "rb") as f:
161
+ device = "cuda" if torch.cuda.is_available() else "cpu"
162
+ state_dict = torch.load(f, map_location=device)
163
+ info = sam.load_state_dict(state_dict, strict=False)
164
+ print(info)
165
+ for n, p in sam.named_parameters():
166
+ if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n:
167
+ p.requires_grad = False
168
+
169
+ return sam
sam2.1HQ/sam-hq-main/build/lib/segment_anything/build_sam_baseline.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+
9
+ from functools import partial
10
+
11
+ from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer, TinyViT
12
+
13
+
14
+ def build_sam_vit_h(checkpoint=None):
15
+ return _build_sam(
16
+ encoder_embed_dim=1280,
17
+ encoder_depth=32,
18
+ encoder_num_heads=16,
19
+ encoder_global_attn_indexes=[7, 15, 23, 31],
20
+ checkpoint=checkpoint,
21
+ )
22
+
23
+
24
+ build_sam = build_sam_vit_h
25
+
26
+
27
+ def build_sam_vit_l(checkpoint=None):
28
+ return _build_sam(
29
+ encoder_embed_dim=1024,
30
+ encoder_depth=24,
31
+ encoder_num_heads=16,
32
+ encoder_global_attn_indexes=[5, 11, 17, 23],
33
+ checkpoint=checkpoint,
34
+ )
35
+
36
+
37
+ def build_sam_vit_b(checkpoint=None):
38
+ return _build_sam(
39
+ encoder_embed_dim=768,
40
+ encoder_depth=12,
41
+ encoder_num_heads=12,
42
+ encoder_global_attn_indexes=[2, 5, 8, 11],
43
+ checkpoint=checkpoint,
44
+ )
45
+
46
+
47
+ def build_sam_vit_t(checkpoint=None):
48
+ prompt_embed_dim = 256
49
+ image_size = 1024
50
+ vit_patch_size = 16
51
+ image_embedding_size = image_size // vit_patch_size
52
+ mobile_sam = Sam(
53
+ image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000,
54
+ embed_dims=[64, 128, 160, 320],
55
+ depths=[2, 2, 6, 2],
56
+ num_heads=[2, 4, 5, 10],
57
+ window_sizes=[7, 7, 14, 7],
58
+ mlp_ratio=4.,
59
+ drop_rate=0.,
60
+ drop_path_rate=0.0,
61
+ use_checkpoint=False,
62
+ mbconv_expand_ratio=4.0,
63
+ local_conv_size=3,
64
+ layer_lr_decay=0.8
65
+ ),
66
+ prompt_encoder=PromptEncoder(
67
+ embed_dim=prompt_embed_dim,
68
+ image_embedding_size=(image_embedding_size, image_embedding_size),
69
+ input_image_size=(image_size, image_size),
70
+ mask_in_chans=16,
71
+ ),
72
+ mask_decoder=MaskDecoder(
73
+ num_multimask_outputs=3,
74
+ transformer=TwoWayTransformer(
75
+ depth=2,
76
+ embedding_dim=prompt_embed_dim,
77
+ mlp_dim=2048,
78
+ num_heads=8,
79
+ ),
80
+ transformer_dim=prompt_embed_dim,
81
+ iou_head_depth=3,
82
+ iou_head_hidden_dim=256,
83
+ ),
84
+ pixel_mean=[123.675, 116.28, 103.53],
85
+ pixel_std=[58.395, 57.12, 57.375],
86
+ )
87
+
88
+ mobile_sam.eval()
89
+ if checkpoint is not None:
90
+ with open(checkpoint, "rb") as f:
91
+ state_dict = torch.load(f)
92
+ mobile_sam.load_state_dict(state_dict)
93
+ return mobile_sam
94
+
95
+ sam_model_registry_baseline = {
96
+ "default": build_sam_vit_h,
97
+ "vit_h": build_sam_vit_h,
98
+ "vit_l": build_sam_vit_l,
99
+ "vit_b": build_sam_vit_b,
100
+ "vit_tiny": build_sam_vit_t
101
+ }
102
+
103
+
104
+ def _build_sam(
105
+ encoder_embed_dim,
106
+ encoder_depth,
107
+ encoder_num_heads,
108
+ encoder_global_attn_indexes,
109
+ checkpoint=None,
110
+ ):
111
+ prompt_embed_dim = 256
112
+ image_size = 1024
113
+ vit_patch_size = 16
114
+ image_embedding_size = image_size // vit_patch_size
115
+ sam = Sam(
116
+ image_encoder=ImageEncoderViT(
117
+ depth=encoder_depth,
118
+ embed_dim=encoder_embed_dim,
119
+ img_size=image_size,
120
+ mlp_ratio=4,
121
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
122
+ num_heads=encoder_num_heads,
123
+ patch_size=vit_patch_size,
124
+ qkv_bias=True,
125
+ use_rel_pos=True,
126
+ global_attn_indexes=encoder_global_attn_indexes,
127
+ window_size=14,
128
+ out_chans=prompt_embed_dim,
129
+ ),
130
+ prompt_encoder=PromptEncoder(
131
+ embed_dim=prompt_embed_dim,
132
+ image_embedding_size=(image_embedding_size, image_embedding_size),
133
+ input_image_size=(image_size, image_size),
134
+ mask_in_chans=16,
135
+ ),
136
+ mask_decoder=MaskDecoder(
137
+ num_multimask_outputs=3,
138
+ transformer=TwoWayTransformer(
139
+ depth=2,
140
+ embedding_dim=prompt_embed_dim,
141
+ mlp_dim=2048,
142
+ num_heads=8,
143
+ ),
144
+ transformer_dim=prompt_embed_dim,
145
+ iou_head_depth=3,
146
+ iou_head_hidden_dim=256,
147
+ ),
148
+ pixel_mean=[123.675, 116.28, 103.53],
149
+ pixel_std=[58.395, 57.12, 57.375],
150
+ )
151
+ sam.eval()
152
+ if checkpoint is not None:
153
+ with open(checkpoint, "rb") as f:
154
+ state_dict = torch.load(f)
155
+ sam.load_state_dict(state_dict)
156
+ return sam
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from .sam import Sam
8
+ from .image_encoder import ImageEncoderViT
9
+ from .mask_decoder_hq import MaskDecoderHQ
10
+ from .mask_decoder import MaskDecoder
11
+ from .prompt_encoder import PromptEncoder
12
+ from .transformer import TwoWayTransformer
13
+ from .tiny_vit_sam import TinyViT
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/common.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from typing import Type
11
+
12
+
13
+ class MLPBlock(nn.Module):
14
+ def __init__(
15
+ self,
16
+ embedding_dim: int,
17
+ mlp_dim: int,
18
+ act: Type[nn.Module] = nn.GELU,
19
+ ) -> None:
20
+ super().__init__()
21
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
22
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
23
+ self.act = act()
24
+
25
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
26
+ return self.lin2(self.act(self.lin1(x)))
27
+
28
+
29
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
30
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
31
+ class LayerNorm2d(nn.Module):
32
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
33
+ super().__init__()
34
+ self.weight = nn.Parameter(torch.ones(num_channels))
35
+ self.bias = nn.Parameter(torch.zeros(num_channels))
36
+ self.eps = eps
37
+
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ u = x.mean(1, keepdim=True)
40
+ s = (x - u).pow(2).mean(1, keepdim=True)
41
+ x = (x - u) / torch.sqrt(s + self.eps)
42
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
43
+ return x
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/image_encoder.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from typing import Optional, Tuple, Type
12
+
13
+ from .common import LayerNorm2d, MLPBlock
14
+
15
+
16
+ # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
17
+ class ImageEncoderViT(nn.Module):
18
+ def __init__(
19
+ self,
20
+ img_size: int = 1024,
21
+ patch_size: int = 16,
22
+ in_chans: int = 3,
23
+ embed_dim: int = 768,
24
+ depth: int = 12,
25
+ num_heads: int = 12,
26
+ mlp_ratio: float = 4.0,
27
+ out_chans: int = 256,
28
+ qkv_bias: bool = True,
29
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
30
+ act_layer: Type[nn.Module] = nn.GELU,
31
+ use_abs_pos: bool = True,
32
+ use_rel_pos: bool = False,
33
+ rel_pos_zero_init: bool = True,
34
+ window_size: int = 0,
35
+ global_attn_indexes: Tuple[int, ...] = (),
36
+ ) -> None:
37
+ """
38
+ Args:
39
+ img_size (int): Input image size.
40
+ patch_size (int): Patch size.
41
+ in_chans (int): Number of input image channels.
42
+ embed_dim (int): Patch embedding dimension.
43
+ depth (int): Depth of ViT.
44
+ num_heads (int): Number of attention heads in each ViT block.
45
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
46
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
47
+ norm_layer (nn.Module): Normalization layer.
48
+ act_layer (nn.Module): Activation layer.
49
+ use_abs_pos (bool): If True, use absolute positional embeddings.
50
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
51
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
52
+ window_size (int): Window size for window attention blocks.
53
+ global_attn_indexes (list): Indexes for blocks using global attention.
54
+ """
55
+ super().__init__()
56
+ self.img_size = img_size
57
+
58
+ self.patch_embed = PatchEmbed(
59
+ kernel_size=(patch_size, patch_size),
60
+ stride=(patch_size, patch_size),
61
+ in_chans=in_chans,
62
+ embed_dim=embed_dim,
63
+ )
64
+
65
+ self.pos_embed: Optional[nn.Parameter] = None
66
+ if use_abs_pos:
67
+ # Initialize absolute positional embedding with pretrain image size.
68
+ self.pos_embed = nn.Parameter(
69
+ torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
70
+ )
71
+
72
+ self.blocks = nn.ModuleList()
73
+ for i in range(depth):
74
+ block = Block(
75
+ dim=embed_dim,
76
+ num_heads=num_heads,
77
+ mlp_ratio=mlp_ratio,
78
+ qkv_bias=qkv_bias,
79
+ norm_layer=norm_layer,
80
+ act_layer=act_layer,
81
+ use_rel_pos=use_rel_pos,
82
+ rel_pos_zero_init=rel_pos_zero_init,
83
+ window_size=window_size if i not in global_attn_indexes else 0,
84
+ input_size=(img_size // patch_size, img_size // patch_size),
85
+ )
86
+ self.blocks.append(block)
87
+
88
+ self.neck = nn.Sequential(
89
+ nn.Conv2d(
90
+ embed_dim,
91
+ out_chans,
92
+ kernel_size=1,
93
+ bias=False,
94
+ ),
95
+ LayerNorm2d(out_chans),
96
+ nn.Conv2d(
97
+ out_chans,
98
+ out_chans,
99
+ kernel_size=3,
100
+ padding=1,
101
+ bias=False,
102
+ ),
103
+ LayerNorm2d(out_chans),
104
+ )
105
+
106
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
107
+ x = self.patch_embed(x)
108
+ if self.pos_embed is not None:
109
+ x = x + self.pos_embed
110
+
111
+ interm_embeddings=[]
112
+ for blk in self.blocks:
113
+ x = blk(x)
114
+ if blk.window_size == 0:
115
+ interm_embeddings.append(x)
116
+
117
+ x = self.neck(x.permute(0, 3, 1, 2))
118
+
119
+ return x, interm_embeddings
120
+
121
+
122
+ class Block(nn.Module):
123
+ """Transformer blocks with support of window attention and residual propagation blocks"""
124
+
125
+ def __init__(
126
+ self,
127
+ dim: int,
128
+ num_heads: int,
129
+ mlp_ratio: float = 4.0,
130
+ qkv_bias: bool = True,
131
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
132
+ act_layer: Type[nn.Module] = nn.GELU,
133
+ use_rel_pos: bool = False,
134
+ rel_pos_zero_init: bool = True,
135
+ window_size: int = 0,
136
+ input_size: Optional[Tuple[int, int]] = None,
137
+ ) -> None:
138
+ """
139
+ Args:
140
+ dim (int): Number of input channels.
141
+ num_heads (int): Number of attention heads in each ViT block.
142
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
143
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
144
+ norm_layer (nn.Module): Normalization layer.
145
+ act_layer (nn.Module): Activation layer.
146
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
147
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
148
+ window_size (int): Window size for window attention blocks. If it equals 0, then
149
+ use global attention.
150
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
151
+ positional parameter size.
152
+ """
153
+ super().__init__()
154
+ self.norm1 = norm_layer(dim)
155
+ self.attn = Attention(
156
+ dim,
157
+ num_heads=num_heads,
158
+ qkv_bias=qkv_bias,
159
+ use_rel_pos=use_rel_pos,
160
+ rel_pos_zero_init=rel_pos_zero_init,
161
+ input_size=input_size if window_size == 0 else (window_size, window_size),
162
+ )
163
+
164
+ self.norm2 = norm_layer(dim)
165
+ self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
166
+
167
+ self.window_size = window_size
168
+
169
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
170
+ shortcut = x
171
+ x = self.norm1(x)
172
+ # Window partition
173
+ if self.window_size > 0:
174
+ H, W = x.shape[1], x.shape[2]
175
+ x, pad_hw = window_partition(x, self.window_size)
176
+
177
+ x = self.attn(x)
178
+ # Reverse window partition
179
+ if self.window_size > 0:
180
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
181
+
182
+ x = shortcut + x
183
+ x = x + self.mlp(self.norm2(x))
184
+
185
+ return x
186
+
187
+
188
+ class Attention(nn.Module):
189
+ """Multi-head Attention block with relative position embeddings."""
190
+
191
+ def __init__(
192
+ self,
193
+ dim: int,
194
+ num_heads: int = 8,
195
+ qkv_bias: bool = True,
196
+ use_rel_pos: bool = False,
197
+ rel_pos_zero_init: bool = True,
198
+ input_size: Optional[Tuple[int, int]] = None,
199
+ ) -> None:
200
+ """
201
+ Args:
202
+ dim (int): Number of input channels.
203
+ num_heads (int): Number of attention heads.
204
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
205
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
206
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
207
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
208
+ positional parameter size.
209
+ """
210
+ super().__init__()
211
+ self.num_heads = num_heads
212
+ head_dim = dim // num_heads
213
+ self.scale = head_dim**-0.5
214
+
215
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
216
+ self.proj = nn.Linear(dim, dim)
217
+
218
+ self.use_rel_pos = use_rel_pos
219
+ if self.use_rel_pos:
220
+ assert (
221
+ input_size is not None
222
+ ), "Input size must be provided if using relative positional encoding."
223
+ # initialize relative positional embeddings
224
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
225
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
226
+
227
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
228
+ B, H, W, _ = x.shape
229
+ # qkv with shape (3, B, nHead, H * W, C)
230
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
231
+ # q, k, v with shape (B * nHead, H * W, C)
232
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
233
+
234
+ attn = (q * self.scale) @ k.transpose(-2, -1)
235
+
236
+ if self.use_rel_pos:
237
+ attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
238
+
239
+ attn = attn.softmax(dim=-1)
240
+ x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
241
+ x = self.proj(x)
242
+
243
+ return x
244
+
245
+
246
+ def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
247
+ """
248
+ Partition into non-overlapping windows with padding if needed.
249
+ Args:
250
+ x (tensor): input tokens with [B, H, W, C].
251
+ window_size (int): window size.
252
+
253
+ Returns:
254
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
255
+ (Hp, Wp): padded height and width before partition
256
+ """
257
+ B, H, W, C = x.shape
258
+
259
+ pad_h = (window_size - H % window_size) % window_size
260
+ pad_w = (window_size - W % window_size) % window_size
261
+ if pad_h > 0 or pad_w > 0:
262
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
263
+ Hp, Wp = H + pad_h, W + pad_w
264
+
265
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
266
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
267
+ return windows, (Hp, Wp)
268
+
269
+
270
+ def window_unpartition(
271
+ windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
272
+ ) -> torch.Tensor:
273
+ """
274
+ Window unpartition into original sequences and removing padding.
275
+ Args:
276
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
277
+ window_size (int): window size.
278
+ pad_hw (Tuple): padded height and width (Hp, Wp).
279
+ hw (Tuple): original height and width (H, W) before padding.
280
+
281
+ Returns:
282
+ x: unpartitioned sequences with [B, H, W, C].
283
+ """
284
+ Hp, Wp = pad_hw
285
+ H, W = hw
286
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
287
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
288
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
289
+
290
+ if Hp > H or Wp > W:
291
+ x = x[:, :H, :W, :].contiguous()
292
+ return x
293
+
294
+
295
+ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
296
+ """
297
+ Get relative positional embeddings according to the relative positions of
298
+ query and key sizes.
299
+ Args:
300
+ q_size (int): size of query q.
301
+ k_size (int): size of key k.
302
+ rel_pos (Tensor): relative position embeddings (L, C).
303
+
304
+ Returns:
305
+ Extracted positional embeddings according to relative positions.
306
+ """
307
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
308
+ # Interpolate rel pos if needed.
309
+ if rel_pos.shape[0] != max_rel_dist:
310
+ # Interpolate rel pos.
311
+ rel_pos_resized = F.interpolate(
312
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
313
+ size=max_rel_dist,
314
+ mode="linear",
315
+ )
316
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
317
+ else:
318
+ rel_pos_resized = rel_pos
319
+
320
+ # Scale the coords with short length if shapes for q and k are different.
321
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
322
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
323
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
324
+
325
+ return rel_pos_resized[relative_coords.long()]
326
+
327
+
328
+ def add_decomposed_rel_pos(
329
+ attn: torch.Tensor,
330
+ q: torch.Tensor,
331
+ rel_pos_h: torch.Tensor,
332
+ rel_pos_w: torch.Tensor,
333
+ q_size: Tuple[int, int],
334
+ k_size: Tuple[int, int],
335
+ ) -> torch.Tensor:
336
+ """
337
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
338
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
339
+ Args:
340
+ attn (Tensor): attention map.
341
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
342
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
343
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
344
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
345
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
346
+
347
+ Returns:
348
+ attn (Tensor): attention map with added relative positional embeddings.
349
+ """
350
+ q_h, q_w = q_size
351
+ k_h, k_w = k_size
352
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
353
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
354
+
355
+ B, _, dim = q.shape
356
+ r_q = q.reshape(B, q_h, q_w, dim)
357
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
358
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
359
+
360
+ attn = (
361
+ attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
362
+ ).view(B, q_h * q_w, k_h * k_w)
363
+
364
+ return attn
365
+
366
+
367
+ class PatchEmbed(nn.Module):
368
+ """
369
+ Image to Patch Embedding.
370
+ """
371
+
372
+ def __init__(
373
+ self,
374
+ kernel_size: Tuple[int, int] = (16, 16),
375
+ stride: Tuple[int, int] = (16, 16),
376
+ padding: Tuple[int, int] = (0, 0),
377
+ in_chans: int = 3,
378
+ embed_dim: int = 768,
379
+ ) -> None:
380
+ """
381
+ Args:
382
+ kernel_size (Tuple): kernel size of the projection layer.
383
+ stride (Tuple): stride of the projection layer.
384
+ padding (Tuple): padding size of the projection layer.
385
+ in_chans (int): Number of input image channels.
386
+ embed_dim (int): Patch embedding dimension.
387
+ """
388
+ super().__init__()
389
+
390
+ self.proj = nn.Conv2d(
391
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
392
+ )
393
+
394
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
395
+ x = self.proj(x)
396
+ # B C H W -> B H W C
397
+ x = x.permute(0, 2, 3, 1)
398
+ return x
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/mask_decoder.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import List, Tuple, Type
12
+
13
+ from .common import LayerNorm2d
14
+
15
+
16
+ class MaskDecoder(nn.Module):
17
+ def __init__(
18
+ self,
19
+ *,
20
+ transformer_dim: int,
21
+ transformer: nn.Module,
22
+ num_multimask_outputs: int = 3,
23
+ activation: Type[nn.Module] = nn.GELU,
24
+ iou_head_depth: int = 3,
25
+ iou_head_hidden_dim: int = 256,
26
+ ) -> None:
27
+ """
28
+ Predicts masks given an image and prompt embeddings, using a
29
+ transformer architecture.
30
+
31
+ Arguments:
32
+ transformer_dim (int): the channel dimension of the transformer
33
+ transformer (nn.Module): the transformer used to predict masks
34
+ num_multimask_outputs (int): the number of masks to predict
35
+ when disambiguating masks
36
+ activation (nn.Module): the type of activation to use when
37
+ upscaling masks
38
+ iou_head_depth (int): the depth of the MLP used to predict
39
+ mask quality
40
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
41
+ used to predict mask quality
42
+ """
43
+ super().__init__()
44
+ self.transformer_dim = transformer_dim
45
+ self.transformer = transformer
46
+
47
+ self.num_multimask_outputs = num_multimask_outputs
48
+
49
+ self.iou_token = nn.Embedding(1, transformer_dim)
50
+ self.num_mask_tokens = num_multimask_outputs + 1
51
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
52
+
53
+ self.output_upscaling = nn.Sequential(
54
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
55
+ LayerNorm2d(transformer_dim // 4),
56
+ activation(),
57
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
58
+ activation(),
59
+ )
60
+ self.output_hypernetworks_mlps = nn.ModuleList(
61
+ [
62
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
63
+ for i in range(self.num_mask_tokens)
64
+ ]
65
+ )
66
+
67
+ self.iou_prediction_head = MLP(
68
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
69
+ )
70
+
71
+ def forward(
72
+ self,
73
+ image_embeddings: torch.Tensor,
74
+ image_pe: torch.Tensor,
75
+ sparse_prompt_embeddings: torch.Tensor,
76
+ dense_prompt_embeddings: torch.Tensor,
77
+ multimask_output: bool,
78
+ hq_token_only: bool,
79
+ interm_embeddings: torch.Tensor,
80
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
81
+ """
82
+ Predict masks given image and prompt embeddings.
83
+
84
+ Arguments:
85
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
86
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
87
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
88
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
89
+ multimask_output (bool): Whether to return multiple masks or a single
90
+ mask.
91
+
92
+ Returns:
93
+ torch.Tensor: batched predicted masks
94
+ torch.Tensor: batched predictions of mask quality
95
+ """
96
+ masks, iou_pred = self.predict_masks(
97
+ image_embeddings=image_embeddings,
98
+ image_pe=image_pe,
99
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
100
+ dense_prompt_embeddings=dense_prompt_embeddings,
101
+ )
102
+
103
+ # Select the correct mask or masks for output
104
+ if multimask_output:
105
+ mask_slice = slice(1, None)
106
+ else:
107
+ mask_slice = slice(0, 1)
108
+ masks = masks[:, mask_slice, :, :]
109
+ iou_pred = iou_pred[:, mask_slice]
110
+
111
+ # Prepare output
112
+ return masks, iou_pred
113
+
114
+ def predict_masks(
115
+ self,
116
+ image_embeddings: torch.Tensor,
117
+ image_pe: torch.Tensor,
118
+ sparse_prompt_embeddings: torch.Tensor,
119
+ dense_prompt_embeddings: torch.Tensor,
120
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
121
+ """Predicts masks. See 'forward' for more details."""
122
+ # Concatenate output tokens
123
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
124
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
125
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
126
+
127
+ # Expand per-image data in batch direction to be per-mask
128
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
129
+ src = src + dense_prompt_embeddings
130
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
131
+ b, c, h, w = src.shape
132
+
133
+ # Run the transformer
134
+ hs, src = self.transformer(src, pos_src, tokens)
135
+ iou_token_out = hs[:, 0, :]
136
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
137
+
138
+ # Upscale mask embeddings and predict masks using the mask tokens
139
+ src = src.transpose(1, 2).view(b, c, h, w)
140
+ upscaled_embedding = self.output_upscaling(src)
141
+ hyper_in_list: List[torch.Tensor] = []
142
+ for i in range(self.num_mask_tokens):
143
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
144
+ hyper_in = torch.stack(hyper_in_list, dim=1)
145
+ b, c, h, w = upscaled_embedding.shape
146
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
147
+
148
+ # Generate mask quality predictions
149
+ iou_pred = self.iou_prediction_head(iou_token_out)
150
+
151
+ return masks, iou_pred
152
+
153
+
154
+ # Lightly adapted from
155
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
156
+ class MLP(nn.Module):
157
+ def __init__(
158
+ self,
159
+ input_dim: int,
160
+ hidden_dim: int,
161
+ output_dim: int,
162
+ num_layers: int,
163
+ sigmoid_output: bool = False,
164
+ ) -> None:
165
+ super().__init__()
166
+ self.num_layers = num_layers
167
+ h = [hidden_dim] * (num_layers - 1)
168
+ self.layers = nn.ModuleList(
169
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
170
+ )
171
+ self.sigmoid_output = sigmoid_output
172
+
173
+ def forward(self, x):
174
+ for i, layer in enumerate(self.layers):
175
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
176
+ if self.sigmoid_output:
177
+ x = F.sigmoid(x)
178
+ return x
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/mask_decoder_hq.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # Modified by HQ-SAM team
3
+ # All rights reserved.
4
+
5
+ # This source code is licensed under the license found in the
6
+ # LICENSE file in the root directory of this source tree.
7
+
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+
12
+ from typing import List, Tuple, Type
13
+
14
+ from .common import LayerNorm2d
15
+
16
+
17
+ class MaskDecoderHQ(nn.Module):
18
+ def __init__(
19
+ self,
20
+ *,
21
+ transformer_dim: int,
22
+ transformer: nn.Module,
23
+ num_multimask_outputs: int = 3,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ iou_head_depth: int = 3,
26
+ iou_head_hidden_dim: int = 256,
27
+ vit_dim: int = 1024,
28
+ ) -> None:
29
+ """
30
+ Predicts masks given an image and prompt embeddings, using a
31
+ transformer architecture.
32
+
33
+ Arguments:
34
+ transformer_dim (int): the channel dimension of the transformer
35
+ transformer (nn.Module): the transformer used to predict masks
36
+ num_multimask_outputs (int): the number of masks to predict
37
+ when disambiguating masks
38
+ activation (nn.Module): the type of activation to use when
39
+ upscaling masks
40
+ iou_head_depth (int): the depth of the MLP used to predict
41
+ mask quality
42
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
43
+ used to predict mask quality
44
+ """
45
+ super().__init__()
46
+ self.transformer_dim = transformer_dim
47
+ self.transformer = transformer
48
+
49
+ self.num_multimask_outputs = num_multimask_outputs
50
+
51
+ self.iou_token = nn.Embedding(1, transformer_dim)
52
+ self.num_mask_tokens = num_multimask_outputs + 1
53
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
54
+
55
+ self.output_upscaling = nn.Sequential(
56
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
57
+ LayerNorm2d(transformer_dim // 4),
58
+ activation(),
59
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
60
+ activation(),
61
+ )
62
+ self.output_hypernetworks_mlps = nn.ModuleList(
63
+ [
64
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
65
+ for i in range(self.num_mask_tokens)
66
+ ]
67
+ )
68
+
69
+ self.iou_prediction_head = MLP(
70
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
71
+ )
72
+
73
+ # HQ-SAM parameters
74
+ self.hf_token = nn.Embedding(1, transformer_dim) # HQ-Ouptput-Token
75
+ self.hf_mlp = MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) # corresponding new MLP layer for HQ-Ouptput-Token
76
+ self.num_mask_tokens = self.num_mask_tokens + 1
77
+
78
+ # three conv fusion layers for obtaining HQ-Feature
79
+ self.compress_vit_feat = nn.Sequential(
80
+ nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2),
81
+ LayerNorm2d(transformer_dim),
82
+ nn.GELU(),
83
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 8, kernel_size=2, stride=2))
84
+
85
+ self.embedding_encoder = nn.Sequential(
86
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
87
+ LayerNorm2d(transformer_dim // 4),
88
+ nn.GELU(),
89
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
90
+ )
91
+ self.embedding_maskfeature = nn.Sequential(
92
+ nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1),
93
+ LayerNorm2d(transformer_dim // 4),
94
+ nn.GELU(),
95
+ nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1))
96
+
97
+
98
+
99
+ def forward(
100
+ self,
101
+ image_embeddings: torch.Tensor,
102
+ image_pe: torch.Tensor,
103
+ sparse_prompt_embeddings: torch.Tensor,
104
+ dense_prompt_embeddings: torch.Tensor,
105
+ multimask_output: bool,
106
+ hq_token_only: bool,
107
+ interm_embeddings: torch.Tensor,
108
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
109
+ """
110
+ Predict masks given image and prompt embeddings.
111
+
112
+ Arguments:
113
+ image_embeddings (torch.Tensor): the embeddings from the ViT image encoder
114
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
115
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
116
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
117
+ multimask_output (bool): Whether to return multiple masks or a single
118
+ mask.
119
+
120
+ Returns:
121
+ torch.Tensor: batched predicted masks
122
+ torch.Tensor: batched predictions of mask quality
123
+ """
124
+ vit_features = interm_embeddings[0].permute(0, 3, 1, 2) # early-layer ViT feature, after 1st global attention block in ViT
125
+ hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat(vit_features)
126
+
127
+ masks, iou_pred = self.predict_masks(
128
+ image_embeddings=image_embeddings,
129
+ image_pe=image_pe,
130
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
131
+ dense_prompt_embeddings=dense_prompt_embeddings,
132
+ hq_features=hq_features,
133
+ )
134
+
135
+ # Select the correct mask or masks for output
136
+ if multimask_output:
137
+ # mask with highest score
138
+ mask_slice = slice(1,self.num_mask_tokens-1)
139
+ iou_pred = iou_pred[:, mask_slice]
140
+ iou_pred, max_iou_idx = torch.max(iou_pred,dim=1)
141
+ iou_pred = iou_pred.unsqueeze(1)
142
+ masks_multi = masks[:, mask_slice, :, :]
143
+ masks_sam = masks_multi[torch.arange(masks_multi.size(0)),max_iou_idx].unsqueeze(1)
144
+ else:
145
+ # singale mask output, default
146
+ mask_slice = slice(0, 1)
147
+ iou_pred = iou_pred[:,mask_slice]
148
+ masks_sam = masks[:,mask_slice]
149
+
150
+ masks_hq = masks[:,slice(self.num_mask_tokens-1, self.num_mask_tokens)]
151
+ if hq_token_only:
152
+ masks = masks_hq
153
+ else:
154
+ masks = masks_sam + masks_hq
155
+ # Prepare output
156
+ return masks, iou_pred
157
+
158
+ def predict_masks(
159
+ self,
160
+ image_embeddings: torch.Tensor,
161
+ image_pe: torch.Tensor,
162
+ sparse_prompt_embeddings: torch.Tensor,
163
+ dense_prompt_embeddings: torch.Tensor,
164
+ hq_features: torch.Tensor,
165
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
166
+ """Predicts masks. See 'forward' for more details."""
167
+ # Concatenate output tokens
168
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], dim=0)
169
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
170
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
171
+
172
+ # Expand per-image data in batch direction to be per-mask
173
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
174
+ src = src + dense_prompt_embeddings
175
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
176
+ b, c, h, w = src.shape
177
+
178
+ # Run the transformer
179
+ hs, src = self.transformer(src, pos_src, tokens)
180
+ iou_token_out = hs[:, 0, :]
181
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
182
+
183
+ # Upscale mask embeddings and predict masks using the mask tokens
184
+ src = src.transpose(1, 2).view(b, c, h, w)
185
+
186
+ upscaled_embedding_sam = self.output_upscaling(src)
187
+ upscaled_embedding_hq = self.embedding_maskfeature(upscaled_embedding_sam) + hq_features.repeat(b,1,1,1)
188
+
189
+ hyper_in_list: List[torch.Tensor] = []
190
+ for i in range(self.num_mask_tokens):
191
+ if i < self.num_mask_tokens - 1:
192
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
193
+ else:
194
+ hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :]))
195
+
196
+ hyper_in = torch.stack(hyper_in_list, dim=1)
197
+ b, c, h, w = upscaled_embedding_sam.shape
198
+
199
+ masks_sam = (hyper_in[:,:self.num_mask_tokens-1] @ upscaled_embedding_sam.view(b, c, h * w)).view(b, -1, h, w)
200
+ masks_sam_hq = (hyper_in[:,self.num_mask_tokens-1:] @ upscaled_embedding_hq.view(b, c, h * w)).view(b, -1, h, w)
201
+ masks = torch.cat([masks_sam,masks_sam_hq],dim=1)
202
+ # Generate mask quality predictions
203
+ iou_pred = self.iou_prediction_head(iou_token_out)
204
+
205
+ return masks, iou_pred
206
+
207
+
208
+ # Lightly adapted from
209
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
210
+ class MLP(nn.Module):
211
+ def __init__(
212
+ self,
213
+ input_dim: int,
214
+ hidden_dim: int,
215
+ output_dim: int,
216
+ num_layers: int,
217
+ sigmoid_output: bool = False,
218
+ ) -> None:
219
+ super().__init__()
220
+ self.num_layers = num_layers
221
+ h = [hidden_dim] * (num_layers - 1)
222
+ self.layers = nn.ModuleList(
223
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
224
+ )
225
+ self.sigmoid_output = sigmoid_output
226
+
227
+ def forward(self, x):
228
+ for i, layer in enumerate(self.layers):
229
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
230
+ if self.sigmoid_output:
231
+ x = F.sigmoid(x)
232
+ return x
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/prompt_encoder.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+
11
+ from typing import Any, Optional, Tuple, Type
12
+
13
+ from .common import LayerNorm2d
14
+
15
+
16
+ class PromptEncoder(nn.Module):
17
+ def __init__(
18
+ self,
19
+ embed_dim: int,
20
+ image_embedding_size: Tuple[int, int],
21
+ input_image_size: Tuple[int, int],
22
+ mask_in_chans: int,
23
+ activation: Type[nn.Module] = nn.GELU,
24
+ ) -> None:
25
+ """
26
+ Encodes prompts for input to SAM's mask decoder.
27
+
28
+ Arguments:
29
+ embed_dim (int): The prompts' embedding dimension
30
+ image_embedding_size (tuple(int, int)): The spatial size of the
31
+ image embedding, as (H, W).
32
+ input_image_size (int): The padded size of the image as input
33
+ to the image encoder, as (H, W).
34
+ mask_in_chans (int): The number of hidden channels used for
35
+ encoding input masks.
36
+ activation (nn.Module): The activation to use when encoding
37
+ input masks.
38
+ """
39
+ super().__init__()
40
+ self.embed_dim = embed_dim
41
+ self.input_image_size = input_image_size
42
+ self.image_embedding_size = image_embedding_size
43
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
44
+
45
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
46
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
47
+ self.point_embeddings = nn.ModuleList(point_embeddings)
48
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
49
+
50
+ self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
51
+ self.mask_downscaling = nn.Sequential(
52
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
53
+ LayerNorm2d(mask_in_chans // 4),
54
+ activation(),
55
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
56
+ LayerNorm2d(mask_in_chans),
57
+ activation(),
58
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
59
+ )
60
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
61
+
62
+ def get_dense_pe(self) -> torch.Tensor:
63
+ """
64
+ Returns the positional encoding used to encode point prompts,
65
+ applied to a dense set of points the shape of the image encoding.
66
+
67
+ Returns:
68
+ torch.Tensor: Positional encoding with shape
69
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
70
+ """
71
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
72
+
73
+ def _embed_points(
74
+ self,
75
+ points: torch.Tensor,
76
+ labels: torch.Tensor,
77
+ pad: bool,
78
+ ) -> torch.Tensor:
79
+ """Embeds point prompts."""
80
+ points = points + 0.5 # Shift to center of pixel
81
+ if pad:
82
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
83
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
84
+ points = torch.cat([points, padding_point], dim=1)
85
+ labels = torch.cat([labels, padding_label], dim=1)
86
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
87
+ point_embedding[labels == -1] = 0.0
88
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
89
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
90
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
91
+ return point_embedding
92
+
93
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
94
+ """Embeds box prompts."""
95
+ boxes = boxes + 0.5 # Shift to center of pixel
96
+ coords = boxes.reshape(-1, 2, 2)
97
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
98
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
99
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
100
+ return corner_embedding
101
+
102
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
103
+ """Embeds mask inputs."""
104
+ mask_embedding = self.mask_downscaling(masks)
105
+ return mask_embedding
106
+
107
+ def _get_batch_size(
108
+ self,
109
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
110
+ boxes: Optional[torch.Tensor],
111
+ masks: Optional[torch.Tensor],
112
+ ) -> int:
113
+ """
114
+ Gets the batch size of the output given the batch size of the input prompts.
115
+ """
116
+ if points is not None:
117
+ return points[0].shape[0]
118
+ elif boxes is not None:
119
+ return boxes.shape[0]
120
+ elif masks is not None:
121
+ return masks.shape[0]
122
+ else:
123
+ return 1
124
+
125
+ def _get_device(self) -> torch.device:
126
+ return self.point_embeddings[0].weight.device
127
+
128
+ def forward(
129
+ self,
130
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
131
+ boxes: Optional[torch.Tensor],
132
+ masks: Optional[torch.Tensor],
133
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
134
+ """
135
+ Embeds different types of prompts, returning both sparse and dense
136
+ embeddings.
137
+
138
+ Arguments:
139
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
140
+ and labels to embed.
141
+ boxes (torch.Tensor or none): boxes to embed
142
+ masks (torch.Tensor or none): masks to embed
143
+
144
+ Returns:
145
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
146
+ BxNx(embed_dim), where N is determined by the number of input points
147
+ and boxes.
148
+ torch.Tensor: dense embeddings for the masks, in the shape
149
+ Bx(embed_dim)x(embed_H)x(embed_W)
150
+ """
151
+ bs = self._get_batch_size(points, boxes, masks)
152
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
153
+ if points is not None:
154
+ coords, labels = points
155
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
156
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
157
+ if boxes is not None:
158
+ box_embeddings = self._embed_boxes(boxes)
159
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
160
+
161
+ if masks is not None:
162
+ dense_embeddings = self._embed_masks(masks)
163
+ else:
164
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
165
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
166
+ )
167
+
168
+ return sparse_embeddings, dense_embeddings
169
+
170
+
171
+ class PositionEmbeddingRandom(nn.Module):
172
+ """
173
+ Positional encoding using random spatial frequencies.
174
+ """
175
+
176
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
177
+ super().__init__()
178
+ if scale is None or scale <= 0.0:
179
+ scale = 1.0
180
+ self.register_buffer(
181
+ "positional_encoding_gaussian_matrix",
182
+ scale * torch.randn((2, num_pos_feats)),
183
+ )
184
+
185
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
186
+ """Positionally encode points that are normalized to [0,1]."""
187
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
188
+ coords = 2 * coords - 1
189
+ coords = coords @ self.positional_encoding_gaussian_matrix
190
+ coords = 2 * np.pi * coords
191
+ # outputs d_1 x ... x d_n x C shape
192
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
193
+
194
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
195
+ """Generate positional encoding for a grid of the specified size."""
196
+ h, w = size
197
+ device: Any = self.positional_encoding_gaussian_matrix.device
198
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
199
+ y_embed = grid.cumsum(dim=0) - 0.5
200
+ x_embed = grid.cumsum(dim=1) - 0.5
201
+ y_embed = y_embed / h
202
+ x_embed = x_embed / w
203
+
204
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
205
+ return pe.permute(2, 0, 1) # C x H x W
206
+
207
+ def forward_with_coords(
208
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
209
+ ) -> torch.Tensor:
210
+ """Positionally encode points that are not normalized to [0,1]."""
211
+ coords = coords_input.clone()
212
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
213
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
214
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/sam.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import Any, Dict, List, Tuple
12
+
13
+ from .image_encoder import ImageEncoderViT
14
+ from .mask_decoder import MaskDecoder
15
+ from .prompt_encoder import PromptEncoder
16
+
17
+
18
+ class Sam(nn.Module):
19
+ mask_threshold: float = 0.0
20
+ image_format: str = "RGB"
21
+
22
+ def __init__(
23
+ self,
24
+ image_encoder: ImageEncoderViT,
25
+ prompt_encoder: PromptEncoder,
26
+ mask_decoder: MaskDecoder,
27
+ pixel_mean: List[float] = [123.675, 116.28, 103.53],
28
+ pixel_std: List[float] = [58.395, 57.12, 57.375],
29
+ ) -> None:
30
+ """
31
+ SAM predicts object masks from an image and input prompts.
32
+
33
+ Arguments:
34
+ image_encoder (ImageEncoderViT): The backbone used to encode the
35
+ image into image embeddings that allow for efficient mask prediction.
36
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
37
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
38
+ and encoded prompts.
39
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
40
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
41
+ """
42
+ super().__init__()
43
+ self.image_encoder = image_encoder
44
+ self.prompt_encoder = prompt_encoder
45
+ self.mask_decoder = mask_decoder
46
+ self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
47
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
48
+
49
+ @property
50
+ def device(self) -> Any:
51
+ return self.pixel_mean.device
52
+
53
+ def forward(
54
+ self,
55
+ batched_input: List[Dict[str, Any]],
56
+ multimask_output: bool,
57
+ hq_token_only: bool =False,
58
+ ) -> List[Dict[str, torch.Tensor]]:
59
+ """
60
+ Predicts masks end-to-end from provided images and prompts.
61
+ If prompts are not known in advance, using SamPredictor is
62
+ recommended over calling the model directly.
63
+
64
+ Arguments:
65
+ batched_input (list(dict)): A list over input images, each a
66
+ dictionary with the following keys. A prompt key can be
67
+ excluded if it is not present.
68
+ 'image': The image as a torch tensor in 3xHxW format,
69
+ already transformed for input to the model.
70
+ 'original_size': (tuple(int, int)) The original size of
71
+ the image before transformation, as (H, W).
72
+ 'point_coords': (torch.Tensor) Batched point prompts for
73
+ this image, with shape BxNx2. Already transformed to the
74
+ input frame of the model.
75
+ 'point_labels': (torch.Tensor) Batched labels for point prompts,
76
+ with shape BxN.
77
+ 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
78
+ Already transformed to the input frame of the model.
79
+ 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
80
+ in the form Bx1xHxW.
81
+ multimask_output (bool): Whether the model should predict multiple
82
+ disambiguating masks, or return a single mask.
83
+
84
+ Returns:
85
+ (list(dict)): A list over input images, where each element is
86
+ as dictionary with the following keys.
87
+ 'masks': (torch.Tensor) Batched binary mask predictions,
88
+ with shape BxCxHxW, where B is the number of input prompts,
89
+ C is determined by multimask_output, and (H, W) is the
90
+ original size of the image.
91
+ 'iou_predictions': (torch.Tensor) The model's predictions
92
+ of mask quality, in shape BxC.
93
+ 'low_res_logits': (torch.Tensor) Low resolution logits with
94
+ shape BxCxHxW, where H=W=256. Can be passed as mask input
95
+ to subsequent iterations of prediction.
96
+ """
97
+ input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
98
+ image_embeddings, interm_embeddings = self.image_encoder(input_images)
99
+ interm_embeddings = interm_embeddings[0] # early layer
100
+
101
+ outputs = []
102
+ for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings):
103
+ if "point_coords" in image_record:
104
+ points = (image_record["point_coords"], image_record["point_labels"])
105
+ else:
106
+ points = None
107
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
108
+ points=points,
109
+ boxes=image_record.get("boxes", None),
110
+ masks=image_record.get("mask_inputs", None),
111
+ )
112
+ low_res_masks, iou_predictions = self.mask_decoder(
113
+ image_embeddings=curr_embedding.unsqueeze(0),
114
+ image_pe=self.prompt_encoder.get_dense_pe(),
115
+ sparse_prompt_embeddings=sparse_embeddings,
116
+ dense_prompt_embeddings=dense_embeddings,
117
+ multimask_output=multimask_output,
118
+ hq_token_only=hq_token_only,
119
+ interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0),
120
+ )
121
+ masks = self.postprocess_masks(
122
+ low_res_masks,
123
+ input_size=image_record["image"].shape[-2:],
124
+ original_size=image_record["original_size"],
125
+ )
126
+ masks = masks > self.mask_threshold
127
+ outputs.append(
128
+ {
129
+ "masks": masks,
130
+ "iou_predictions": iou_predictions,
131
+ "low_res_logits": low_res_masks,
132
+ }
133
+ )
134
+ return outputs
135
+
136
+ def postprocess_masks(
137
+ self,
138
+ masks: torch.Tensor,
139
+ input_size: Tuple[int, ...],
140
+ original_size: Tuple[int, ...],
141
+ ) -> torch.Tensor:
142
+ """
143
+ Remove padding and upscale masks to the original image size.
144
+
145
+ Arguments:
146
+ masks (torch.Tensor): Batched masks from the mask_decoder,
147
+ in BxCxHxW format.
148
+ input_size (tuple(int, int)): The size of the image input to the
149
+ model, in (H, W) format. Used to remove padding.
150
+ original_size (tuple(int, int)): The original size of the image
151
+ before resizing for input to the model, in (H, W) format.
152
+
153
+ Returns:
154
+ (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
155
+ is given by original_size.
156
+ """
157
+ masks = F.interpolate(
158
+ masks,
159
+ (self.image_encoder.img_size, self.image_encoder.img_size),
160
+ mode="bilinear",
161
+ align_corners=False,
162
+ )
163
+ masks = masks[..., : input_size[0], : input_size[1]]
164
+ masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
165
+ return masks
166
+
167
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
168
+ """Normalize pixel values and pad to a square input."""
169
+ # Normalize colors
170
+ x = (x - self.pixel_mean) / self.pixel_std
171
+
172
+ # Pad
173
+ h, w = x.shape[-2:]
174
+ padh = self.image_encoder.img_size - h
175
+ padw = self.image_encoder.img_size - w
176
+ x = F.pad(x, (0, padw, 0, padh))
177
+ return x
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/tiny_vit_sam.py ADDED
@@ -0,0 +1,724 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # TinyViT Model Architecture
3
+ # Copyright (c) 2022 Microsoft
4
+ # Adapted from LeViT and Swin Transformer
5
+ # LeViT: (https://github.com/facebookresearch/levit)
6
+ # Swin: (https://github.com/microsoft/swin-transformer)
7
+ # Build the TinyViT Model
8
+ # --------------------------------------------------------
9
+
10
+ import itertools
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import torch.utils.checkpoint as checkpoint
15
+ from timm.models.layers import DropPath as TimmDropPath,\
16
+ to_2tuple, trunc_normal_
17
+ from timm.models.registry import register_model
18
+ from typing import Tuple
19
+
20
+
21
+ class Conv2d_BN(torch.nn.Sequential):
22
+ def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
23
+ groups=1, bn_weight_init=1):
24
+ super().__init__()
25
+ self.add_module('c', torch.nn.Conv2d(
26
+ a, b, ks, stride, pad, dilation, groups, bias=False))
27
+ bn = torch.nn.BatchNorm2d(b)
28
+ torch.nn.init.constant_(bn.weight, bn_weight_init)
29
+ torch.nn.init.constant_(bn.bias, 0)
30
+ self.add_module('bn', bn)
31
+
32
+ @torch.no_grad()
33
+ def fuse(self):
34
+ c, bn = self._modules.values()
35
+ w = bn.weight / (bn.running_var + bn.eps)**0.5
36
+ w = c.weight * w[:, None, None, None]
37
+ b = bn.bias - bn.running_mean * bn.weight / \
38
+ (bn.running_var + bn.eps)**0.5
39
+ m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
40
+ 0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
41
+ m.weight.data.copy_(w)
42
+ m.bias.data.copy_(b)
43
+ return m
44
+
45
+
46
+ class DropPath(TimmDropPath):
47
+ def __init__(self, drop_prob=None):
48
+ super().__init__(drop_prob=drop_prob)
49
+ self.drop_prob = drop_prob
50
+
51
+ def __repr__(self):
52
+ msg = super().__repr__()
53
+ msg += f'(drop_prob={self.drop_prob})'
54
+ return msg
55
+
56
+
57
+ class PatchEmbed(nn.Module):
58
+ def __init__(self, in_chans, embed_dim, resolution, activation):
59
+ super().__init__()
60
+ img_size: Tuple[int, int] = to_2tuple(resolution)
61
+ self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
62
+ self.num_patches = self.patches_resolution[0] * \
63
+ self.patches_resolution[1]
64
+ self.in_chans = in_chans
65
+ self.embed_dim = embed_dim
66
+ n = embed_dim
67
+ self.seq = nn.Sequential(
68
+ Conv2d_BN(in_chans, n // 2, 3, 2, 1),
69
+ activation(),
70
+ Conv2d_BN(n // 2, n, 3, 2, 1),
71
+ )
72
+
73
+ def forward(self, x):
74
+ return self.seq(x)
75
+
76
+
77
+ class MBConv(nn.Module):
78
+ def __init__(self, in_chans, out_chans, expand_ratio,
79
+ activation, drop_path):
80
+ super().__init__()
81
+ self.in_chans = in_chans
82
+ self.hidden_chans = int(in_chans * expand_ratio)
83
+ self.out_chans = out_chans
84
+
85
+ self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
86
+ self.act1 = activation()
87
+
88
+ self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans,
89
+ ks=3, stride=1, pad=1, groups=self.hidden_chans)
90
+ self.act2 = activation()
91
+
92
+ self.conv3 = Conv2d_BN(
93
+ self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
94
+ self.act3 = activation()
95
+
96
+ self.drop_path = DropPath(
97
+ drop_path) if drop_path > 0. else nn.Identity()
98
+
99
+ def forward(self, x):
100
+ shortcut = x
101
+
102
+ x = self.conv1(x)
103
+ x = self.act1(x)
104
+
105
+ x = self.conv2(x)
106
+ x = self.act2(x)
107
+
108
+ x = self.conv3(x)
109
+
110
+ x = self.drop_path(x)
111
+
112
+ x += shortcut
113
+ x = self.act3(x)
114
+
115
+ return x
116
+
117
+
118
+ class PatchMerging(nn.Module):
119
+ def __init__(self, input_resolution, dim, out_dim, activation):
120
+ super().__init__()
121
+
122
+ self.input_resolution = input_resolution
123
+ self.dim = dim
124
+ self.out_dim = out_dim
125
+ self.act = activation()
126
+ self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
127
+ stride_c=2
128
+ if(out_dim==320 or out_dim==448 or out_dim==576):
129
+ stride_c=1
130
+ self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
131
+ self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
132
+
133
+ def forward(self, x):
134
+ if x.ndim == 3:
135
+ H, W = self.input_resolution
136
+ B = len(x)
137
+ # (B, C, H, W)
138
+ x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
139
+
140
+ x = self.conv1(x)
141
+ x = self.act(x)
142
+
143
+ x = self.conv2(x)
144
+ x = self.act(x)
145
+ x = self.conv3(x)
146
+ x = x.flatten(2).transpose(1, 2)
147
+ return x
148
+
149
+
150
+ class ConvLayer(nn.Module):
151
+ def __init__(self, dim, input_resolution, depth,
152
+ activation,
153
+ drop_path=0., downsample=None, use_checkpoint=False,
154
+ out_dim=None,
155
+ conv_expand_ratio=4.,
156
+ ):
157
+
158
+ super().__init__()
159
+ self.dim = dim
160
+ self.input_resolution = input_resolution
161
+ self.depth = depth
162
+ self.use_checkpoint = use_checkpoint
163
+
164
+ # build blocks
165
+ self.blocks = nn.ModuleList([
166
+ MBConv(dim, dim, conv_expand_ratio, activation,
167
+ drop_path[i] if isinstance(drop_path, list) else drop_path,
168
+ )
169
+ for i in range(depth)])
170
+
171
+ # patch merging layer
172
+ if downsample is not None:
173
+ self.downsample = downsample(
174
+ input_resolution, dim=dim, out_dim=out_dim, activation=activation)
175
+ else:
176
+ self.downsample = None
177
+
178
+ def forward(self, x):
179
+ for blk in self.blocks:
180
+ if self.use_checkpoint:
181
+ x = checkpoint.checkpoint(blk, x)
182
+ else:
183
+ x = blk(x)
184
+ if self.downsample is not None:
185
+ x = self.downsample(x)
186
+ return x
187
+
188
+
189
+ class Mlp(nn.Module):
190
+ def __init__(self, in_features, hidden_features=None,
191
+ out_features=None, act_layer=nn.GELU, drop=0.):
192
+ super().__init__()
193
+ out_features = out_features or in_features
194
+ hidden_features = hidden_features or in_features
195
+ self.norm = nn.LayerNorm(in_features)
196
+ self.fc1 = nn.Linear(in_features, hidden_features)
197
+ self.fc2 = nn.Linear(hidden_features, out_features)
198
+ self.act = act_layer()
199
+ self.drop = nn.Dropout(drop)
200
+
201
+ def forward(self, x):
202
+ x = self.norm(x)
203
+
204
+ x = self.fc1(x)
205
+ x = self.act(x)
206
+ x = self.drop(x)
207
+ x = self.fc2(x)
208
+ x = self.drop(x)
209
+ return x
210
+
211
+
212
+ class Attention(torch.nn.Module):
213
+ def __init__(self, dim, key_dim, num_heads=8,
214
+ attn_ratio=4,
215
+ resolution=(14, 14),
216
+ ):
217
+ super().__init__()
218
+ # (h, w)
219
+ assert isinstance(resolution, tuple) and len(resolution) == 2
220
+ self.num_heads = num_heads
221
+ self.scale = key_dim ** -0.5
222
+ self.key_dim = key_dim
223
+ self.nh_kd = nh_kd = key_dim * num_heads
224
+ self.d = int(attn_ratio * key_dim)
225
+ self.dh = int(attn_ratio * key_dim) * num_heads
226
+ self.attn_ratio = attn_ratio
227
+ h = self.dh + nh_kd * 2
228
+
229
+ self.norm = nn.LayerNorm(dim)
230
+ self.qkv = nn.Linear(dim, h)
231
+ self.proj = nn.Linear(self.dh, dim)
232
+
233
+ points = list(itertools.product(
234
+ range(resolution[0]), range(resolution[1])))
235
+ N = len(points)
236
+ attention_offsets = {}
237
+ idxs = []
238
+ for p1 in points:
239
+ for p2 in points:
240
+ offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
241
+ if offset not in attention_offsets:
242
+ attention_offsets[offset] = len(attention_offsets)
243
+ idxs.append(attention_offsets[offset])
244
+ self.attention_biases = torch.nn.Parameter(
245
+ torch.zeros(num_heads, len(attention_offsets)))
246
+ self.register_buffer('attention_bias_idxs',
247
+ torch.LongTensor(idxs).view(N, N),
248
+ persistent=False)
249
+
250
+ @torch.no_grad()
251
+ def train(self, mode=True):
252
+ super().train(mode)
253
+ if mode and hasattr(self, 'ab'):
254
+ del self.ab
255
+ else:
256
+ self.register_buffer('ab',
257
+ self.attention_biases[:, self.attention_bias_idxs],
258
+ persistent=False)
259
+
260
+ def forward(self, x): # x (B,N,C)
261
+ B, N, _ = x.shape
262
+
263
+ # Normalization
264
+ x = self.norm(x)
265
+
266
+ qkv = self.qkv(x)
267
+ # (B, N, num_heads, d)
268
+ q, k, v = qkv.view(B, N, self.num_heads, -
269
+ 1).split([self.key_dim, self.key_dim, self.d], dim=3)
270
+ # (B, num_heads, N, d)
271
+ q = q.permute(0, 2, 1, 3)
272
+ k = k.permute(0, 2, 1, 3)
273
+ v = v.permute(0, 2, 1, 3)
274
+
275
+ attn = (
276
+ (q @ k.transpose(-2, -1)) * self.scale
277
+ +
278
+ (self.attention_biases[:, self.attention_bias_idxs]
279
+ if self.training else self.ab)
280
+ )
281
+ attn = attn.softmax(dim=-1)
282
+ x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
283
+ x = self.proj(x)
284
+ return x
285
+
286
+
287
+ class TinyViTBlock(nn.Module):
288
+ r""" TinyViT Block.
289
+
290
+ Args:
291
+ dim (int): Number of input channels.
292
+ input_resolution (tuple[int, int]): Input resolution.
293
+ num_heads (int): Number of attention heads.
294
+ window_size (int): Window size.
295
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
296
+ drop (float, optional): Dropout rate. Default: 0.0
297
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
298
+ local_conv_size (int): the kernel size of the convolution between
299
+ Attention and MLP. Default: 3
300
+ activation: the activation function. Default: nn.GELU
301
+ """
302
+
303
+ def __init__(self, dim, input_resolution, num_heads, window_size=7,
304
+ mlp_ratio=4., drop=0., drop_path=0.,
305
+ local_conv_size=3,
306
+ activation=nn.GELU,
307
+ ):
308
+ super().__init__()
309
+ self.dim = dim
310
+ self.input_resolution = input_resolution
311
+ self.num_heads = num_heads
312
+ assert window_size > 0, 'window_size must be greater than 0'
313
+ self.window_size = window_size
314
+ self.mlp_ratio = mlp_ratio
315
+
316
+ self.drop_path = DropPath(
317
+ drop_path) if drop_path > 0. else nn.Identity()
318
+
319
+ assert dim % num_heads == 0, 'dim must be divisible by num_heads'
320
+ head_dim = dim // num_heads
321
+
322
+ window_resolution = (window_size, window_size)
323
+ self.attn = Attention(dim, head_dim, num_heads,
324
+ attn_ratio=1, resolution=window_resolution)
325
+
326
+ mlp_hidden_dim = int(dim * mlp_ratio)
327
+ mlp_activation = activation
328
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
329
+ act_layer=mlp_activation, drop=drop)
330
+
331
+ pad = local_conv_size // 2
332
+ self.local_conv = Conv2d_BN(
333
+ dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
334
+
335
+ def forward(self, x):
336
+ H, W = self.input_resolution
337
+ B, L, C = x.shape
338
+ assert L == H * W, "input feature has wrong size"
339
+ res_x = x
340
+ if H == self.window_size and W == self.window_size:
341
+ x = self.attn(x)
342
+ else:
343
+ x = x.view(B, H, W, C)
344
+ pad_b = (self.window_size - H %
345
+ self.window_size) % self.window_size
346
+ pad_r = (self.window_size - W %
347
+ self.window_size) % self.window_size
348
+ padding = pad_b > 0 or pad_r > 0
349
+
350
+ if padding:
351
+ x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
352
+
353
+ pH, pW = H + pad_b, W + pad_r
354
+ nH = pH // self.window_size
355
+ nW = pW // self.window_size
356
+ # window partition
357
+ x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
358
+ B * nH * nW, self.window_size * self.window_size, C)
359
+ x = self.attn(x)
360
+ # window reverse
361
+ x = x.view(B, nH, nW, self.window_size, self.window_size,
362
+ C).transpose(2, 3).reshape(B, pH, pW, C)
363
+
364
+ if padding:
365
+ x = x[:, :H, :W].contiguous()
366
+
367
+ x = x.view(B, L, C)
368
+
369
+ x = res_x + self.drop_path(x)
370
+
371
+ x = x.transpose(1, 2).reshape(B, C, H, W)
372
+ x = self.local_conv(x)
373
+ x = x.view(B, C, L).transpose(1, 2)
374
+
375
+ x = x + self.drop_path(self.mlp(x))
376
+ return x
377
+
378
+ def extra_repr(self) -> str:
379
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
380
+ f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
381
+
382
+
383
+ class BasicLayer(nn.Module):
384
+ """ A basic TinyViT layer for one stage.
385
+
386
+ Args:
387
+ dim (int): Number of input channels.
388
+ input_resolution (tuple[int]): Input resolution.
389
+ depth (int): Number of blocks.
390
+ num_heads (int): Number of attention heads.
391
+ window_size (int): Local window size.
392
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
393
+ drop (float, optional): Dropout rate. Default: 0.0
394
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
395
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
396
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
397
+ local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
398
+ activation: the activation function. Default: nn.GELU
399
+ out_dim: the output dimension of the layer. Default: dim
400
+ """
401
+
402
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
403
+ mlp_ratio=4., drop=0.,
404
+ drop_path=0., downsample=None, use_checkpoint=False,
405
+ local_conv_size=3,
406
+ activation=nn.GELU,
407
+ out_dim=None,
408
+ ):
409
+
410
+ super().__init__()
411
+ self.dim = dim
412
+ self.input_resolution = input_resolution
413
+ self.depth = depth
414
+ self.use_checkpoint = use_checkpoint
415
+
416
+ # build blocks
417
+ self.blocks = nn.ModuleList([
418
+ TinyViTBlock(dim=dim, input_resolution=input_resolution,
419
+ num_heads=num_heads, window_size=window_size,
420
+ mlp_ratio=mlp_ratio,
421
+ drop=drop,
422
+ drop_path=drop_path[i] if isinstance(
423
+ drop_path, list) else drop_path,
424
+ local_conv_size=local_conv_size,
425
+ activation=activation,
426
+ )
427
+ for i in range(depth)])
428
+
429
+ # patch merging layer
430
+ if downsample is not None:
431
+ self.downsample = downsample(
432
+ input_resolution, dim=dim, out_dim=out_dim, activation=activation)
433
+ else:
434
+ self.downsample = None
435
+
436
+ def forward(self, x):
437
+ for blk in self.blocks:
438
+ if self.use_checkpoint:
439
+ x = checkpoint.checkpoint(blk, x)
440
+ else:
441
+ x = blk(x)
442
+ if self.downsample is not None:
443
+ x = self.downsample(x)
444
+ return x
445
+
446
+ def extra_repr(self) -> str:
447
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
448
+
449
+ class LayerNorm2d(nn.Module):
450
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
451
+ super().__init__()
452
+ self.weight = nn.Parameter(torch.ones(num_channels))
453
+ self.bias = nn.Parameter(torch.zeros(num_channels))
454
+ self.eps = eps
455
+
456
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
457
+ u = x.mean(1, keepdim=True)
458
+ s = (x - u).pow(2).mean(1, keepdim=True)
459
+ x = (x - u) / torch.sqrt(s + self.eps)
460
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
461
+ return x
462
+ class TinyViT(nn.Module):
463
+ def __init__(self, img_size=224, in_chans=3, num_classes=1000,
464
+ embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],
465
+ num_heads=[3, 6, 12, 24],
466
+ window_sizes=[7, 7, 14, 7],
467
+ mlp_ratio=4.,
468
+ drop_rate=0.,
469
+ drop_path_rate=0.1,
470
+ use_checkpoint=False,
471
+ mbconv_expand_ratio=4.0,
472
+ local_conv_size=3,
473
+ layer_lr_decay=1.0,
474
+ ):
475
+ super().__init__()
476
+ self.img_size=img_size
477
+ self.num_classes = num_classes
478
+ self.depths = depths
479
+ self.num_layers = len(depths)
480
+ self.mlp_ratio = mlp_ratio
481
+
482
+ activation = nn.GELU
483
+
484
+ self.patch_embed = PatchEmbed(in_chans=in_chans,
485
+ embed_dim=embed_dims[0],
486
+ resolution=img_size,
487
+ activation=activation)
488
+
489
+ patches_resolution = self.patch_embed.patches_resolution
490
+ self.patches_resolution = patches_resolution
491
+
492
+ # stochastic depth
493
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
494
+ sum(depths))] # stochastic depth decay rule
495
+
496
+ # build layers
497
+ self.layers = nn.ModuleList()
498
+ for i_layer in range(self.num_layers):
499
+ kwargs = dict(dim=embed_dims[i_layer],
500
+ input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),
501
+ patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),
502
+ # input_resolution=(patches_resolution[0] // (2 ** i_layer),
503
+ # patches_resolution[1] // (2 ** i_layer)),
504
+ depth=depths[i_layer],
505
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
506
+ downsample=PatchMerging if (
507
+ i_layer < self.num_layers - 1) else None,
508
+ use_checkpoint=use_checkpoint,
509
+ out_dim=embed_dims[min(
510
+ i_layer + 1, len(embed_dims) - 1)],
511
+ activation=activation,
512
+ )
513
+ if i_layer == 0:
514
+ layer = ConvLayer(
515
+ conv_expand_ratio=mbconv_expand_ratio,
516
+ **kwargs,
517
+ )
518
+ else:
519
+ layer = BasicLayer(
520
+ num_heads=num_heads[i_layer],
521
+ window_size=window_sizes[i_layer],
522
+ mlp_ratio=self.mlp_ratio,
523
+ drop=drop_rate,
524
+ local_conv_size=local_conv_size,
525
+ **kwargs)
526
+ self.layers.append(layer)
527
+
528
+ # Classifier head
529
+ self.norm_head = nn.LayerNorm(embed_dims[-1])
530
+ self.head = nn.Linear(
531
+ embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
532
+
533
+ # init weights
534
+ self.apply(self._init_weights)
535
+ self.set_layer_lr_decay(layer_lr_decay)
536
+ self.neck = nn.Sequential(
537
+ nn.Conv2d(
538
+ embed_dims[-1],
539
+ 256,
540
+ kernel_size=1,
541
+ bias=False,
542
+ ),
543
+ LayerNorm2d(256),
544
+ nn.Conv2d(
545
+ 256,
546
+ 256,
547
+ kernel_size=3,
548
+ padding=1,
549
+ bias=False,
550
+ ),
551
+ LayerNorm2d(256),
552
+ )
553
+ def set_layer_lr_decay(self, layer_lr_decay):
554
+ decay_rate = layer_lr_decay
555
+
556
+ # layers -> blocks (depth)
557
+ depth = sum(self.depths)
558
+ lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
559
+ #print("LR SCALES:", lr_scales)
560
+
561
+ def _set_lr_scale(m, scale):
562
+ for p in m.parameters():
563
+ p.lr_scale = scale
564
+
565
+ self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
566
+ i = 0
567
+ for layer in self.layers:
568
+ for block in layer.blocks:
569
+ block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
570
+ i += 1
571
+ if layer.downsample is not None:
572
+ layer.downsample.apply(
573
+ lambda x: _set_lr_scale(x, lr_scales[i - 1]))
574
+ assert i == depth
575
+ for m in [self.norm_head, self.head]:
576
+ m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
577
+
578
+ for k, p in self.named_parameters():
579
+ p.param_name = k
580
+
581
+ def _check_lr_scale(m):
582
+ for p in m.parameters():
583
+ assert hasattr(p, 'lr_scale'), p.param_name
584
+
585
+ self.apply(_check_lr_scale)
586
+
587
+ def _init_weights(self, m):
588
+ if isinstance(m, nn.Linear):
589
+ trunc_normal_(m.weight, std=.02)
590
+ if isinstance(m, nn.Linear) and m.bias is not None:
591
+ nn.init.constant_(m.bias, 0)
592
+ elif isinstance(m, nn.LayerNorm):
593
+ nn.init.constant_(m.bias, 0)
594
+ nn.init.constant_(m.weight, 1.0)
595
+
596
+ @torch.jit.ignore
597
+ def no_weight_decay_keywords(self):
598
+ return {'attention_biases'}
599
+
600
+ def forward_features(self, x):
601
+ # x: (N, C, H, W)
602
+ x = self.patch_embed(x)
603
+
604
+ x = self.layers[0](x)
605
+ start_i = 1
606
+
607
+ interm_embeddings=[]
608
+ for i in range(start_i, len(self.layers)):
609
+ layer = self.layers[i]
610
+ x = layer(x)
611
+ # print('x shape:', x.shape, '---i:', i)
612
+ if i == 1:
613
+ interm_embeddings.append(x.view(x.shape[0], 64, 64, -1))
614
+
615
+ B,_,C=x.size()
616
+ x = x.view(B, 64, 64, C)
617
+ x=x.permute(0, 3, 1, 2)
618
+ x=self.neck(x)
619
+ return x, interm_embeddings
620
+
621
+ def forward(self, x):
622
+ x, interm_embeddings = self.forward_features(x)
623
+ #x = self.norm_head(x)
624
+ #x = self.head(x)
625
+ # print('come to here is correct'* 3)
626
+ return x, interm_embeddings
627
+
628
+
629
+ _checkpoint_url_format = \
630
+ 'https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/{}.pth'
631
+ _provided_checkpoints = {
632
+ 'tiny_vit_5m_224': 'tiny_vit_5m_22kto1k_distill',
633
+ 'tiny_vit_11m_224': 'tiny_vit_11m_22kto1k_distill',
634
+ 'tiny_vit_21m_224': 'tiny_vit_21m_22kto1k_distill',
635
+ 'tiny_vit_21m_384': 'tiny_vit_21m_22kto1k_384_distill',
636
+ 'tiny_vit_21m_512': 'tiny_vit_21m_22kto1k_512_distill',
637
+ }
638
+
639
+
640
+ def register_tiny_vit_model(fn):
641
+ '''Register a TinyViT model
642
+ It is a wrapper of `register_model` with loading the pretrained checkpoint.
643
+ '''
644
+ def fn_wrapper(pretrained=False, **kwargs):
645
+ model = fn()
646
+ if pretrained:
647
+ model_name = fn.__name__
648
+ assert model_name in _provided_checkpoints, \
649
+ f'Sorry that the checkpoint `{model_name}` is not provided yet.'
650
+ url = _checkpoint_url_format.format(
651
+ _provided_checkpoints[model_name])
652
+ checkpoint = torch.hub.load_state_dict_from_url(
653
+ url=url,
654
+ map_location='cpu', check_hash=False,
655
+ )
656
+ model.load_state_dict(checkpoint['model'])
657
+
658
+ return model
659
+
660
+ # rename the name of fn_wrapper
661
+ fn_wrapper.__name__ = fn.__name__
662
+ return register_model(fn_wrapper)
663
+
664
+
665
+ @register_tiny_vit_model
666
+ def tiny_vit_5m_224(pretrained=False, num_classes=1000, drop_path_rate=0.0):
667
+ return TinyViT(
668
+ num_classes=num_classes,
669
+ embed_dims=[64, 128, 160, 320],
670
+ depths=[2, 2, 6, 2],
671
+ num_heads=[2, 4, 5, 10],
672
+ window_sizes=[7, 7, 14, 7],
673
+ drop_path_rate=drop_path_rate,
674
+ )
675
+
676
+
677
+ @register_tiny_vit_model
678
+ def tiny_vit_11m_224(pretrained=False, num_classes=1000, drop_path_rate=0.1):
679
+ return TinyViT(
680
+ num_classes=num_classes,
681
+ embed_dims=[64, 128, 256, 448],
682
+ depths=[2, 2, 6, 2],
683
+ num_heads=[2, 4, 8, 14],
684
+ window_sizes=[7, 7, 14, 7],
685
+ drop_path_rate=drop_path_rate,
686
+ )
687
+
688
+
689
+ @register_tiny_vit_model
690
+ def tiny_vit_21m_224(pretrained=False, num_classes=1000, drop_path_rate=0.2):
691
+ return TinyViT(
692
+ num_classes=num_classes,
693
+ embed_dims=[96, 192, 384, 576],
694
+ depths=[2, 2, 6, 2],
695
+ num_heads=[3, 6, 12, 18],
696
+ window_sizes=[7, 7, 14, 7],
697
+ drop_path_rate=drop_path_rate,
698
+ )
699
+
700
+
701
+ @register_tiny_vit_model
702
+ def tiny_vit_21m_384(pretrained=False, num_classes=1000, drop_path_rate=0.1):
703
+ return TinyViT(
704
+ img_size=384,
705
+ num_classes=num_classes,
706
+ embed_dims=[96, 192, 384, 576],
707
+ depths=[2, 2, 6, 2],
708
+ num_heads=[3, 6, 12, 18],
709
+ window_sizes=[12, 12, 24, 12],
710
+ drop_path_rate=drop_path_rate,
711
+ )
712
+
713
+
714
+ @register_tiny_vit_model
715
+ def tiny_vit_21m_512(pretrained=False, num_classes=1000, drop_path_rate=0.1):
716
+ return TinyViT(
717
+ img_size=512,
718
+ num_classes=num_classes,
719
+ embed_dims=[96, 192, 384, 576],
720
+ depths=[2, 2, 6, 2],
721
+ num_heads=[3, 6, 12, 18],
722
+ window_sizes=[16, 16, 32, 16],
723
+ drop_path_rate=drop_path_rate,
724
+ )
sam2.1HQ/sam-hq-main/build/lib/segment_anything/modeling/transformer.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import Tensor, nn
9
+
10
+ import math
11
+ from typing import Tuple, Type
12
+
13
+ from .common import MLPBlock
14
+
15
+
16
+ class TwoWayTransformer(nn.Module):
17
+ def __init__(
18
+ self,
19
+ depth: int,
20
+ embedding_dim: int,
21
+ num_heads: int,
22
+ mlp_dim: int,
23
+ activation: Type[nn.Module] = nn.ReLU,
24
+ attention_downsample_rate: int = 2,
25
+ ) -> None:
26
+ """
27
+ A transformer decoder that attends to an input image using
28
+ queries whose positional embedding is supplied.
29
+
30
+ Args:
31
+ depth (int): number of layers in the transformer
32
+ embedding_dim (int): the channel dimension for the input embeddings
33
+ num_heads (int): the number of heads for multihead attention. Must
34
+ divide embedding_dim
35
+ mlp_dim (int): the channel dimension internal to the MLP block
36
+ activation (nn.Module): the activation to use in the MLP block
37
+ """
38
+ super().__init__()
39
+ self.depth = depth
40
+ self.embedding_dim = embedding_dim
41
+ self.num_heads = num_heads
42
+ self.mlp_dim = mlp_dim
43
+ self.layers = nn.ModuleList()
44
+
45
+ for i in range(depth):
46
+ self.layers.append(
47
+ TwoWayAttentionBlock(
48
+ embedding_dim=embedding_dim,
49
+ num_heads=num_heads,
50
+ mlp_dim=mlp_dim,
51
+ activation=activation,
52
+ attention_downsample_rate=attention_downsample_rate,
53
+ skip_first_layer_pe=(i == 0),
54
+ )
55
+ )
56
+
57
+ self.final_attn_token_to_image = Attention(
58
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
59
+ )
60
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
61
+
62
+ def forward(
63
+ self,
64
+ image_embedding: Tensor,
65
+ image_pe: Tensor,
66
+ point_embedding: Tensor,
67
+ ) -> Tuple[Tensor, Tensor]:
68
+ """
69
+ Args:
70
+ image_embedding (torch.Tensor): image to attend to. Should be shape
71
+ B x embedding_dim x h x w for any h and w.
72
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
73
+ have the same shape as image_embedding.
74
+ point_embedding (torch.Tensor): the embedding to add to the query points.
75
+ Must have shape B x N_points x embedding_dim for any N_points.
76
+
77
+ Returns:
78
+ torch.Tensor: the processed point_embedding
79
+ torch.Tensor: the processed image_embedding
80
+ """
81
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
82
+ bs, c, h, w = image_embedding.shape
83
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
84
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
85
+
86
+ # Prepare queries
87
+ queries = point_embedding
88
+ keys = image_embedding
89
+
90
+ # Apply transformer blocks and final layernorm
91
+ for layer in self.layers:
92
+ queries, keys = layer(
93
+ queries=queries,
94
+ keys=keys,
95
+ query_pe=point_embedding,
96
+ key_pe=image_pe,
97
+ )
98
+
99
+ # Apply the final attention layer from the points to the image
100
+ q = queries + point_embedding
101
+ k = keys + image_pe
102
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
103
+ queries = queries + attn_out
104
+ queries = self.norm_final_attn(queries)
105
+
106
+ return queries, keys
107
+
108
+
109
+ class TwoWayAttentionBlock(nn.Module):
110
+ def __init__(
111
+ self,
112
+ embedding_dim: int,
113
+ num_heads: int,
114
+ mlp_dim: int = 2048,
115
+ activation: Type[nn.Module] = nn.ReLU,
116
+ attention_downsample_rate: int = 2,
117
+ skip_first_layer_pe: bool = False,
118
+ ) -> None:
119
+ """
120
+ A transformer block with four layers: (1) self-attention of sparse
121
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
122
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
123
+ inputs.
124
+
125
+ Arguments:
126
+ embedding_dim (int): the channel dimension of the embeddings
127
+ num_heads (int): the number of heads in the attention layers
128
+ mlp_dim (int): the hidden dimension of the mlp block
129
+ activation (nn.Module): the activation of the mlp block
130
+ skip_first_layer_pe (bool): skip the PE on the first layer
131
+ """
132
+ super().__init__()
133
+ self.self_attn = Attention(embedding_dim, num_heads)
134
+ self.norm1 = nn.LayerNorm(embedding_dim)
135
+
136
+ self.cross_attn_token_to_image = Attention(
137
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
138
+ )
139
+ self.norm2 = nn.LayerNorm(embedding_dim)
140
+
141
+ self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
142
+ self.norm3 = nn.LayerNorm(embedding_dim)
143
+
144
+ self.norm4 = nn.LayerNorm(embedding_dim)
145
+ self.cross_attn_image_to_token = Attention(
146
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
147
+ )
148
+
149
+ self.skip_first_layer_pe = skip_first_layer_pe
150
+
151
+ def forward(
152
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
153
+ ) -> Tuple[Tensor, Tensor]:
154
+ # Self attention block
155
+ if self.skip_first_layer_pe:
156
+ queries = self.self_attn(q=queries, k=queries, v=queries)
157
+ else:
158
+ q = queries + query_pe
159
+ attn_out = self.self_attn(q=q, k=q, v=queries)
160
+ queries = queries + attn_out
161
+ queries = self.norm1(queries)
162
+
163
+ # Cross attention block, tokens attending to image embedding
164
+ q = queries + query_pe
165
+ k = keys + key_pe
166
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
167
+ queries = queries + attn_out
168
+ queries = self.norm2(queries)
169
+
170
+ # MLP block
171
+ mlp_out = self.mlp(queries)
172
+ queries = queries + mlp_out
173
+ queries = self.norm3(queries)
174
+
175
+ # Cross attention block, image embedding attending to tokens
176
+ q = queries + query_pe
177
+ k = keys + key_pe
178
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
179
+ keys = keys + attn_out
180
+ keys = self.norm4(keys)
181
+
182
+ return queries, keys
183
+
184
+
185
+ class Attention(nn.Module):
186
+ """
187
+ An attention layer that allows for downscaling the size of the embedding
188
+ after projection to queries, keys, and values.
189
+ """
190
+
191
+ def __init__(
192
+ self,
193
+ embedding_dim: int,
194
+ num_heads: int,
195
+ downsample_rate: int = 1,
196
+ ) -> None:
197
+ super().__init__()
198
+ self.embedding_dim = embedding_dim
199
+ self.internal_dim = embedding_dim // downsample_rate
200
+ self.num_heads = num_heads
201
+ assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
202
+
203
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
204
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
205
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
206
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
207
+
208
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
209
+ b, n, c = x.shape
210
+ x = x.reshape(b, n, num_heads, c // num_heads)
211
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
212
+
213
+ def _recombine_heads(self, x: Tensor) -> Tensor:
214
+ b, n_heads, n_tokens, c_per_head = x.shape
215
+ x = x.transpose(1, 2)
216
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
217
+
218
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
219
+ # Input projections
220
+ q = self.q_proj(q)
221
+ k = self.k_proj(k)
222
+ v = self.v_proj(v)
223
+
224
+ # Separate into heads
225
+ q = self._separate_heads(q, self.num_heads)
226
+ k = self._separate_heads(k, self.num_heads)
227
+ v = self._separate_heads(v, self.num_heads)
228
+
229
+ # Attention
230
+ _, _, _, c_per_head = q.shape
231
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
232
+ attn = attn / math.sqrt(c_per_head)
233
+ attn = torch.softmax(attn, dim=-1)
234
+
235
+ # Get output
236
+ out = attn @ v
237
+ out = self._recombine_heads(out)
238
+ out = self.out_proj(out)
239
+
240
+ return out
sam2.1HQ/sam-hq-main/build/lib/segment_anything/predictor.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ from .modeling import Sam
11
+
12
+ from typing import Optional, Tuple
13
+
14
+ from .utils.transforms import ResizeLongestSide
15
+
16
+
17
+ class SamPredictor:
18
+ def __init__(
19
+ self,
20
+ sam_model: Sam,
21
+ ) -> None:
22
+ """
23
+ Uses SAM to calculate the image embedding for an image, and then
24
+ allow repeated, efficient mask prediction given prompts.
25
+
26
+ Arguments:
27
+ sam_model (Sam): The model to use for mask prediction.
28
+ """
29
+ super().__init__()
30
+ self.model = sam_model
31
+ self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
32
+ self.reset_image()
33
+
34
+ def set_image(
35
+ self,
36
+ image: np.ndarray,
37
+ image_format: str = "RGB",
38
+ ) -> None:
39
+ """
40
+ Calculates the image embeddings for the provided image, allowing
41
+ masks to be predicted with the 'predict' method.
42
+
43
+ Arguments:
44
+ image (np.ndarray): The image for calculating masks. Expects an
45
+ image in HWC uint8 format, with pixel values in [0, 255].
46
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
47
+ """
48
+ assert image_format in [
49
+ "RGB",
50
+ "BGR",
51
+ ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
52
+ # import pdb;pdb.set_trace()
53
+ if image_format != self.model.image_format:
54
+ image = image[..., ::-1]
55
+
56
+ # Transform the image to the form expected by the model
57
+ # import pdb;pdb.set_trace()
58
+ input_image = self.transform.apply_image(image)
59
+ input_image_torch = torch.as_tensor(input_image, device=self.device)
60
+ input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
61
+
62
+ self.set_torch_image(input_image_torch, image.shape[:2])
63
+
64
+ @torch.no_grad()
65
+ def set_torch_image(
66
+ self,
67
+ transformed_image: torch.Tensor,
68
+ original_image_size: Tuple[int, ...],
69
+ ) -> None:
70
+ """
71
+ Calculates the image embeddings for the provided image, allowing
72
+ masks to be predicted with the 'predict' method. Expects the input
73
+ image to be already transformed to the format expected by the model.
74
+
75
+ Arguments:
76
+ transformed_image (torch.Tensor): The input image, with shape
77
+ 1x3xHxW, which has been transformed with ResizeLongestSide.
78
+ original_image_size (tuple(int, int)): The size of the image
79
+ before transformation, in (H, W) format.
80
+ """
81
+ assert (
82
+ len(transformed_image.shape) == 4
83
+ and transformed_image.shape[1] == 3
84
+ and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
85
+ ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
86
+ self.reset_image()
87
+
88
+ self.original_size = original_image_size
89
+ self.input_size = tuple(transformed_image.shape[-2:])
90
+ input_image = self.model.preprocess(transformed_image)
91
+ self.features, self.interm_features = self.model.image_encoder(input_image)
92
+ self.is_image_set = True
93
+
94
+ def predict(
95
+ self,
96
+ point_coords: Optional[np.ndarray] = None,
97
+ point_labels: Optional[np.ndarray] = None,
98
+ box: Optional[np.ndarray] = None,
99
+ mask_input: Optional[np.ndarray] = None,
100
+ multimask_output: bool = True,
101
+ return_logits: bool = False,
102
+ hq_token_only: bool =False,
103
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
104
+ """
105
+ Predict masks for the given input prompts, using the currently set image.
106
+
107
+ Arguments:
108
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
109
+ model. Each point is in (X,Y) in pixels.
110
+ point_labels (np.ndarray or None): A length N array of labels for the
111
+ point prompts. 1 indicates a foreground point and 0 indicates a
112
+ background point.
113
+ box (np.ndarray or None): A length 4 array given a box prompt to the
114
+ model, in XYXY format.
115
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
116
+ coming from a previous prediction iteration. Has form 1xHxW, where
117
+ for SAM, H=W=256.
118
+ multimask_output (bool): If true, the model will return three masks.
119
+ For ambiguous input prompts (such as a single click), this will often
120
+ produce better masks than a single prediction. If only a single
121
+ mask is needed, the model's predicted quality score can be used
122
+ to select the best mask. For non-ambiguous prompts, such as multiple
123
+ input prompts, multimask_output=False can give better results.
124
+ return_logits (bool): If true, returns un-thresholded masks logits
125
+ instead of a binary mask.
126
+
127
+ Returns:
128
+ (np.ndarray): The output masks in CxHxW format, where C is the
129
+ number of masks, and (H, W) is the original image size.
130
+ (np.ndarray): An array of length C containing the model's
131
+ predictions for the quality of each mask.
132
+ (np.ndarray): An array of shape CxHxW, where C is the number
133
+ of masks and H=W=256. These low resolution logits can be passed to
134
+ a subsequent iteration as mask input.
135
+ """
136
+ if not self.is_image_set:
137
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
138
+
139
+ # Transform input prompts
140
+ coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
141
+ if point_coords is not None:
142
+ assert (
143
+ point_labels is not None
144
+ ), "point_labels must be supplied if point_coords is supplied."
145
+ point_coords = self.transform.apply_coords(point_coords, self.original_size)
146
+ coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
147
+ labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
148
+ coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
149
+ if box is not None:
150
+ box = self.transform.apply_boxes(box, self.original_size)
151
+ box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
152
+ box_torch = box_torch[None, :]
153
+ if mask_input is not None:
154
+ mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
155
+ mask_input_torch = mask_input_torch[None, :, :, :]
156
+
157
+ masks, iou_predictions, low_res_masks = self.predict_torch(
158
+ coords_torch,
159
+ labels_torch,
160
+ box_torch,
161
+ mask_input_torch,
162
+ multimask_output,
163
+ return_logits=return_logits,
164
+ hq_token_only=hq_token_only,
165
+ )
166
+
167
+ masks_np = masks[0].detach().cpu().numpy()
168
+ iou_predictions_np = iou_predictions[0].detach().cpu().numpy()
169
+ low_res_masks_np = low_res_masks[0].detach().cpu().numpy()
170
+ return masks_np, iou_predictions_np, low_res_masks_np
171
+
172
+ @torch.no_grad()
173
+ def predict_torch(
174
+ self,
175
+ point_coords: Optional[torch.Tensor],
176
+ point_labels: Optional[torch.Tensor],
177
+ boxes: Optional[torch.Tensor] = None,
178
+ mask_input: Optional[torch.Tensor] = None,
179
+ multimask_output: bool = True,
180
+ return_logits: bool = False,
181
+ hq_token_only: bool =False,
182
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
183
+ """
184
+ Predict masks for the given input prompts, using the currently set image.
185
+ Input prompts are batched torch tensors and are expected to already be
186
+ transformed to the input frame using ResizeLongestSide.
187
+
188
+ Arguments:
189
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
190
+ model. Each point is in (X,Y) in pixels.
191
+ point_labels (torch.Tensor or None): A BxN array of labels for the
192
+ point prompts. 1 indicates a foreground point and 0 indicates a
193
+ background point.
194
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
195
+ model, in XYXY format.
196
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
197
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
198
+ for SAM, H=W=256. Masks returned by a previous iteration of the
199
+ predict method do not need further transformation.
200
+ multimask_output (bool): If true, the model will return three masks.
201
+ For ambiguous input prompts (such as a single click), this will often
202
+ produce better masks than a single prediction. If only a single
203
+ mask is needed, the model's predicted quality score can be used
204
+ to select the best mask. For non-ambiguous prompts, such as multiple
205
+ input prompts, multimask_output=False can give better results.
206
+ return_logits (bool): If true, returns un-thresholded masks logits
207
+ instead of a binary mask.
208
+
209
+ Returns:
210
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
211
+ number of masks, and (H, W) is the original image size.
212
+ (torch.Tensor): An array of shape BxC containing the model's
213
+ predictions for the quality of each mask.
214
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
215
+ of masks and H=W=256. These low res logits can be passed to
216
+ a subsequent iteration as mask input.
217
+ """
218
+ if not self.is_image_set:
219
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
220
+
221
+ if point_coords is not None:
222
+ points = (point_coords, point_labels)
223
+ else:
224
+ points = None
225
+
226
+ # Embed prompts
227
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
228
+ points=points,
229
+ boxes=boxes,
230
+ masks=mask_input,
231
+ )
232
+
233
+ # Predict masks
234
+ low_res_masks, iou_predictions = self.model.mask_decoder(
235
+ image_embeddings=self.features,
236
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
237
+ sparse_prompt_embeddings=sparse_embeddings,
238
+ dense_prompt_embeddings=dense_embeddings,
239
+ multimask_output=multimask_output,
240
+ hq_token_only=hq_token_only,
241
+ interm_embeddings=self.interm_features,
242
+ )
243
+
244
+ # Upscale the masks to the original image resolution
245
+ masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
246
+
247
+ if not return_logits:
248
+ masks = masks > self.model.mask_threshold
249
+
250
+ return masks, iou_predictions, low_res_masks
251
+
252
+ def get_image_embedding(self) -> torch.Tensor:
253
+ """
254
+ Returns the image embeddings for the currently set image, with
255
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
256
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
257
+ """
258
+ if not self.is_image_set:
259
+ raise RuntimeError(
260
+ "An image must be set with .set_image(...) to generate an embedding."
261
+ )
262
+ assert self.features is not None, "Features must exist if an image has been set."
263
+ return self.features
264
+
265
+ @property
266
+ def device(self) -> torch.device:
267
+ return self.model.device
268
+
269
+ def reset_image(self) -> None:
270
+ """Resets the currently set image."""
271
+ self.is_image_set = False
272
+ self.features = None
273
+ self.orig_h = None
274
+ self.orig_w = None
275
+ self.input_h = None
276
+ self.input_w = None
sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/amg.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ import math
11
+ from copy import deepcopy
12
+ from itertools import product
13
+ from typing import Any, Dict, Generator, ItemsView, List, Tuple
14
+
15
+
16
+ class MaskData:
17
+ """
18
+ A structure for storing masks and their related data in batched format.
19
+ Implements basic filtering and concatenation.
20
+ """
21
+
22
+ def __init__(self, **kwargs) -> None:
23
+ for v in kwargs.values():
24
+ assert isinstance(
25
+ v, (list, np.ndarray, torch.Tensor)
26
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
27
+ self._stats = dict(**kwargs)
28
+
29
+ def __setitem__(self, key: str, item: Any) -> None:
30
+ assert isinstance(
31
+ item, (list, np.ndarray, torch.Tensor)
32
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
33
+ self._stats[key] = item
34
+
35
+ def __delitem__(self, key: str) -> None:
36
+ del self._stats[key]
37
+
38
+ def __getitem__(self, key: str) -> Any:
39
+ return self._stats[key]
40
+
41
+ def items(self) -> ItemsView[str, Any]:
42
+ return self._stats.items()
43
+
44
+ def filter(self, keep: torch.Tensor) -> None:
45
+ for k, v in self._stats.items():
46
+ if v is None:
47
+ self._stats[k] = None
48
+ elif isinstance(v, torch.Tensor):
49
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
50
+ elif isinstance(v, np.ndarray):
51
+ self._stats[k] = v[keep.detach().cpu().numpy()]
52
+ elif isinstance(v, list) and keep.dtype == torch.bool:
53
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
54
+ elif isinstance(v, list):
55
+ self._stats[k] = [v[i] for i in keep]
56
+ else:
57
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
58
+
59
+ def cat(self, new_stats: "MaskData") -> None:
60
+ for k, v in new_stats.items():
61
+ if k not in self._stats or self._stats[k] is None:
62
+ self._stats[k] = deepcopy(v)
63
+ elif isinstance(v, torch.Tensor):
64
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
65
+ elif isinstance(v, np.ndarray):
66
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
67
+ elif isinstance(v, list):
68
+ self._stats[k] = self._stats[k] + deepcopy(v)
69
+ else:
70
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
71
+
72
+ def to_numpy(self) -> None:
73
+ for k, v in self._stats.items():
74
+ if isinstance(v, torch.Tensor):
75
+ self._stats[k] = v.detach().cpu().numpy()
76
+
77
+
78
+ def is_box_near_crop_edge(
79
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
80
+ ) -> torch.Tensor:
81
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
82
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
83
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
84
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
85
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
86
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
87
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
88
+ return torch.any(near_crop_edge, dim=1)
89
+
90
+
91
+ def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
92
+ box_xywh = deepcopy(box_xyxy)
93
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
94
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
95
+ return box_xywh
96
+
97
+
98
+ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
99
+ assert len(args) > 0 and all(
100
+ len(a) == len(args[0]) for a in args
101
+ ), "Batched iteration must have inputs of all the same size."
102
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
103
+ for b in range(n_batches):
104
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
105
+
106
+
107
+ def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
108
+ """
109
+ Encodes masks to an uncompressed RLE, in the format expected by
110
+ pycoco tools.
111
+ """
112
+ # Put in fortran order and flatten h,w
113
+ b, h, w = tensor.shape
114
+ tensor = tensor.permute(0, 2, 1).flatten(1)
115
+
116
+ # Compute change indices
117
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
118
+ change_indices = diff.nonzero()
119
+
120
+ # Encode run length
121
+ out = []
122
+ for i in range(b):
123
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
124
+ cur_idxs = torch.cat(
125
+ [
126
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
127
+ cur_idxs + 1,
128
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
129
+ ]
130
+ )
131
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
132
+ counts = [] if tensor[i, 0] == 0 else [0]
133
+ counts.extend(btw_idxs.detach().cpu().tolist())
134
+ out.append({"size": [h, w], "counts": counts})
135
+ return out
136
+
137
+
138
+ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
139
+ """Compute a binary mask from an uncompressed RLE."""
140
+ h, w = rle["size"]
141
+ mask = np.empty(h * w, dtype=bool)
142
+ idx = 0
143
+ parity = False
144
+ for count in rle["counts"]:
145
+ mask[idx : idx + count] = parity
146
+ idx += count
147
+ parity ^= True
148
+ mask = mask.reshape(w, h)
149
+ return mask.transpose() # Put in C order
150
+
151
+
152
+ def area_from_rle(rle: Dict[str, Any]) -> int:
153
+ return sum(rle["counts"][1::2])
154
+
155
+
156
+ def calculate_stability_score(
157
+ masks: torch.Tensor, mask_threshold: float, threshold_offset: float
158
+ ) -> torch.Tensor:
159
+ """
160
+ Computes the stability score for a batch of masks. The stability
161
+ score is the IoU between the binary masks obtained by thresholding
162
+ the predicted mask logits at high and low values.
163
+ """
164
+ # One mask is always contained inside the other.
165
+ # Save memory by preventing unnecessary cast to torch.int64
166
+ intersections = (
167
+ (masks > (mask_threshold + threshold_offset))
168
+ .sum(-1, dtype=torch.int16)
169
+ .sum(-1, dtype=torch.int32)
170
+ )
171
+ unions = (
172
+ (masks > (mask_threshold - threshold_offset))
173
+ .sum(-1, dtype=torch.int16)
174
+ .sum(-1, dtype=torch.int32)
175
+ )
176
+ return intersections / unions
177
+
178
+
179
+ def build_point_grid(n_per_side: int) -> np.ndarray:
180
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
181
+ offset = 1 / (2 * n_per_side)
182
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
183
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
184
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
185
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
186
+ return points
187
+
188
+
189
+ def build_all_layer_point_grids(
190
+ n_per_side: int, n_layers: int, scale_per_layer: int
191
+ ) -> List[np.ndarray]:
192
+ """Generates point grids for all crop layers."""
193
+ points_by_layer = []
194
+ for i in range(n_layers + 1):
195
+ n_points = int(n_per_side / (scale_per_layer**i))
196
+ points_by_layer.append(build_point_grid(n_points))
197
+ return points_by_layer
198
+
199
+
200
+ def generate_crop_boxes(
201
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
202
+ ) -> Tuple[List[List[int]], List[int]]:
203
+ """
204
+ Generates a list of crop boxes of different sizes. Each layer
205
+ has (2**i)**2 boxes for the ith layer.
206
+ """
207
+ crop_boxes, layer_idxs = [], []
208
+ im_h, im_w = im_size
209
+ short_side = min(im_h, im_w)
210
+
211
+ # Original image
212
+ crop_boxes.append([0, 0, im_w, im_h])
213
+ layer_idxs.append(0)
214
+
215
+ def crop_len(orig_len, n_crops, overlap):
216
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
217
+
218
+ for i_layer in range(n_layers):
219
+ n_crops_per_side = 2 ** (i_layer + 1)
220
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
221
+
222
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
223
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
224
+
225
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
226
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
227
+
228
+ # Crops in XYWH format
229
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
230
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
231
+ crop_boxes.append(box)
232
+ layer_idxs.append(i_layer + 1)
233
+
234
+ return crop_boxes, layer_idxs
235
+
236
+
237
+ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
238
+ x0, y0, _, _ = crop_box
239
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
240
+ # Check if boxes has a channel dimension
241
+ if len(boxes.shape) == 3:
242
+ offset = offset.unsqueeze(1)
243
+ return boxes + offset
244
+
245
+
246
+ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
247
+ x0, y0, _, _ = crop_box
248
+ offset = torch.tensor([[x0, y0]], device=points.device)
249
+ # Check if points has a channel dimension
250
+ if len(points.shape) == 3:
251
+ offset = offset.unsqueeze(1)
252
+ return points + offset
253
+
254
+
255
+ def uncrop_masks(
256
+ masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
257
+ ) -> torch.Tensor:
258
+ x0, y0, x1, y1 = crop_box
259
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
260
+ return masks
261
+ # Coordinate transform masks
262
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
263
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
264
+ return torch.nn.functional.pad(masks, pad, value=0)
265
+
266
+
267
+ def remove_small_regions(
268
+ mask: np.ndarray, area_thresh: float, mode: str
269
+ ) -> Tuple[np.ndarray, bool]:
270
+ """
271
+ Removes small disconnected regions and holes in a mask. Returns the
272
+ mask and an indicator of if the mask has been modified.
273
+ """
274
+ import cv2 # type: ignore
275
+
276
+ assert mode in ["holes", "islands"]
277
+ correct_holes = mode == "holes"
278
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
279
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
280
+ sizes = stats[:, -1][1:] # Row 0 is background label
281
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
282
+ if len(small_regions) == 0:
283
+ return mask, False
284
+ fill_labels = [0] + small_regions
285
+ if not correct_holes:
286
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
287
+ # If every region is below threshold, keep largest
288
+ if len(fill_labels) == 0:
289
+ fill_labels = [int(np.argmax(sizes)) + 1]
290
+ mask = np.isin(regions, fill_labels)
291
+ return mask, True
292
+
293
+
294
+ def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
295
+ from pycocotools import mask as mask_utils # type: ignore
296
+
297
+ h, w = uncompressed_rle["size"]
298
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
299
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
300
+ return rle
301
+
302
+
303
+ def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
304
+ """
305
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
306
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
307
+ """
308
+ # torch.max below raises an error on empty inputs, just skip in this case
309
+ if torch.numel(masks) == 0:
310
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
311
+
312
+ # Normalize shape to CxHxW
313
+ shape = masks.shape
314
+ h, w = shape[-2:]
315
+ if len(shape) > 2:
316
+ masks = masks.flatten(0, -3)
317
+ else:
318
+ masks = masks.unsqueeze(0)
319
+
320
+ # Get top and bottom edges
321
+ in_height, _ = torch.max(masks, dim=-1)
322
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
323
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
324
+ in_height_coords = in_height_coords + h * (~in_height)
325
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
326
+
327
+ # Get left and right edges
328
+ in_width, _ = torch.max(masks, dim=-2)
329
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
330
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
331
+ in_width_coords = in_width_coords + w * (~in_width)
332
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
333
+
334
+ # If the mask is empty the right edge will be to the left of the left edge.
335
+ # Replace these boxes with [0, 0, 0, 0]
336
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
337
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
338
+ out = out * (~empty_filter).unsqueeze(-1)
339
+
340
+ # Return to original shape
341
+ if len(shape) > 2:
342
+ out = out.reshape(*shape[:-2], 4)
343
+ else:
344
+ out = out[0]
345
+
346
+ return out
sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/onnx.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import Tuple
12
+
13
+ from ..modeling import Sam
14
+ from .amg import calculate_stability_score
15
+
16
+
17
+ class SamOnnxModel(nn.Module):
18
+ """
19
+ This model should not be called directly, but is used in ONNX export.
20
+ It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
21
+ with some functions modified to enable model tracing. Also supports extra
22
+ options controlling what information. See the ONNX export script for details.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ model: Sam,
28
+ hq_token_only: bool = False,
29
+ multimask_output: bool = False,
30
+ use_stability_score: bool = False,
31
+ return_extra_metrics: bool = False,
32
+ ) -> None:
33
+ super().__init__()
34
+ self.mask_decoder = model.mask_decoder
35
+ self.model = model
36
+ self.img_size = model.image_encoder.img_size
37
+ self.hq_token_only = hq_token_only
38
+ self.multimask_output = multimask_output
39
+ self.use_stability_score = use_stability_score
40
+ self.stability_score_offset = 1.0
41
+ self.return_extra_metrics = return_extra_metrics
42
+
43
+ @staticmethod
44
+ def resize_longest_image_size(
45
+ input_image_size: torch.Tensor, longest_side: int
46
+ ) -> torch.Tensor:
47
+ input_image_size = input_image_size.to(torch.float32)
48
+ scale = longest_side / torch.max(input_image_size)
49
+ transformed_size = scale * input_image_size
50
+ transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
51
+ return transformed_size
52
+
53
+ def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
54
+ point_coords = point_coords + 0.5
55
+ point_coords = point_coords / self.img_size
56
+ point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
57
+ point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
58
+
59
+ point_embedding = point_embedding * (point_labels != -1)
60
+ point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
61
+ point_labels == -1
62
+ )
63
+
64
+ for i in range(self.model.prompt_encoder.num_point_embeddings):
65
+ point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
66
+ i
67
+ ].weight * (point_labels == i)
68
+
69
+ return point_embedding
70
+
71
+ def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
72
+ mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
73
+ mask_embedding = mask_embedding + (
74
+ 1 - has_mask_input
75
+ ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
76
+ return mask_embedding
77
+
78
+ def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
79
+ masks = F.interpolate(
80
+ masks,
81
+ size=(self.img_size, self.img_size),
82
+ mode="bilinear",
83
+ align_corners=False,
84
+ )
85
+
86
+ prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64)
87
+ masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
88
+
89
+ orig_im_size = orig_im_size.to(torch.int64)
90
+ h, w = orig_im_size[0], orig_im_size[1]
91
+ masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
92
+ return masks
93
+
94
+
95
+ @torch.no_grad()
96
+ def forward(
97
+ self,
98
+ image_embeddings: torch.Tensor,
99
+ interm_embeddings: torch.Tensor,
100
+ point_coords: torch.Tensor,
101
+ point_labels: torch.Tensor,
102
+ mask_input: torch.Tensor,
103
+ has_mask_input: torch.Tensor,
104
+ orig_im_size: torch.Tensor,
105
+ ):
106
+ sparse_embedding = self._embed_points(point_coords, point_labels)
107
+ dense_embedding = self._embed_masks(mask_input, has_mask_input)
108
+
109
+ vit_features = interm_embeddings[0].permute(0, 3, 1, 2) # early-layer ViT feature, after 1st global attention block in ViT
110
+ hq_features = self.model.mask_decoder.embedding_encoder(image_embeddings) + self.model.mask_decoder.compress_vit_feat(vit_features)
111
+
112
+ masks, scores = self.model.mask_decoder.predict_masks(
113
+ image_embeddings=image_embeddings,
114
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
115
+ sparse_prompt_embeddings=sparse_embedding,
116
+ dense_prompt_embeddings=dense_embedding,
117
+ hq_features=hq_features,
118
+ )
119
+
120
+ if self.use_stability_score:
121
+ scores = calculate_stability_score(
122
+ masks, self.model.mask_threshold, self.stability_score_offset
123
+ )
124
+
125
+ if self.multimask_output:
126
+ # mask with highest score
127
+ mask_slice = slice(1,self.model.mask_decoder.num_mask_tokens-1)
128
+ scores = scores[:, mask_slice]
129
+ scores, max_iou_idx = torch.max(scores,dim=1)
130
+ scores = scores.unsqueeze(1)
131
+ masks_multi = masks[:, mask_slice, :, :]
132
+ masks_sam = masks_multi[torch.arange(masks_multi.size(0)),max_iou_idx].unsqueeze(1)
133
+ else:
134
+ # singale mask output, default
135
+ mask_slice = slice(0, 1)
136
+ scores = scores[:,mask_slice]
137
+ masks_sam = masks[:,mask_slice]
138
+
139
+ masks_hq = masks[:,slice(self.model.mask_decoder.num_mask_tokens-1, self.model.mask_decoder.num_mask_tokens)]
140
+
141
+ if self.hq_token_only:
142
+ masks = masks_hq
143
+ else:
144
+ masks = masks_sam + masks_hq
145
+
146
+ upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
147
+
148
+ if self.return_extra_metrics:
149
+ stability_scores = calculate_stability_score(
150
+ upscaled_masks, self.model.mask_threshold, self.stability_score_offset
151
+ )
152
+ areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
153
+ return upscaled_masks, scores, stability_scores, areas, masks
154
+
155
+ return upscaled_masks, scores, masks
sam2.1HQ/sam-hq-main/build/lib/segment_anything/utils/transforms.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch.nn import functional as F
10
+ from torchvision.transforms.functional import resize, to_pil_image # type: ignore
11
+
12
+ from copy import deepcopy
13
+ from typing import Tuple
14
+
15
+
16
+ class ResizeLongestSide:
17
+ """
18
+ Resizes images to the longest side 'target_length', as well as provides
19
+ methods for resizing coordinates and boxes. Provides methods for
20
+ transforming both numpy array and batched torch tensors.
21
+ """
22
+
23
+ def __init__(self, target_length: int) -> None:
24
+ self.target_length = target_length
25
+
26
+ def apply_image(self, image: np.ndarray) -> np.ndarray:
27
+ """
28
+ Expects a numpy array with shape HxWxC in uint8 format.
29
+ """
30
+ target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
31
+ return np.array(resize(to_pil_image(image), target_size))
32
+
33
+ def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
34
+ """
35
+ Expects a numpy array of length 2 in the final dimension. Requires the
36
+ original image size in (H, W) format.
37
+ """
38
+ old_h, old_w = original_size
39
+ new_h, new_w = self.get_preprocess_shape(
40
+ original_size[0], original_size[1], self.target_length
41
+ )
42
+ coords = deepcopy(coords).astype(float)
43
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
44
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
45
+ return coords
46
+
47
+ def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
48
+ """
49
+ Expects a numpy array shape Bx4. Requires the original image size
50
+ in (H, W) format.
51
+ """
52
+ boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
53
+ return boxes.reshape(-1, 4)
54
+
55
+ def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
56
+ """
57
+ Expects batched images with shape BxCxHxW and float format. This
58
+ transformation may not exactly match apply_image. apply_image is
59
+ the transformation expected by the model.
60
+ """
61
+ # Expects an image in BCHW format. May not exactly match apply_image.
62
+ target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length)
63
+ return F.interpolate(
64
+ image, target_size, mode="bilinear", align_corners=False, antialias=True
65
+ )
66
+
67
+ def apply_coords_torch(
68
+ self, coords: torch.Tensor, original_size: Tuple[int, ...]
69
+ ) -> torch.Tensor:
70
+ """
71
+ Expects a torch tensor with length 2 in the last dimension. Requires the
72
+ original image size in (H, W) format.
73
+ """
74
+ old_h, old_w = original_size
75
+ new_h, new_w = self.get_preprocess_shape(
76
+ original_size[0], original_size[1], self.target_length
77
+ )
78
+ coords = deepcopy(coords).to(torch.float)
79
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
80
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
81
+ return coords
82
+
83
+ def apply_boxes_torch(
84
+ self, boxes: torch.Tensor, original_size: Tuple[int, ...]
85
+ ) -> torch.Tensor:
86
+ """
87
+ Expects a torch tensor with shape Bx4. Requires the original image
88
+ size in (H, W) format.
89
+ """
90
+ boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
91
+ return boxes.reshape(-1, 4)
92
+
93
+ @staticmethod
94
+ def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
95
+ """
96
+ Compute the output size given input size and target long side length.
97
+ """
98
+ scale = long_side_length * 1.0 / max(oldh, oldw)
99
+ newh, neww = oldh * scale, oldw * scale
100
+ neww = int(neww + 0.5)
101
+ newh = int(newh + 0.5)
102
+ return (newh, neww)
sam2.1HQ/sam-hq-main/demo/demo_hqsam.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from segment_anything import sam_model_registry, SamPredictor
6
+ import os
7
+
8
+ def show_mask(mask, ax, random_color=False):
9
+ if random_color:
10
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
11
+ else:
12
+ color = np.array([30/255, 144/255, 255/255, 0.6])
13
+ h, w = mask.shape[-2:]
14
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
15
+ ax.imshow(mask_image)
16
+
17
+ def show_points(coords, labels, ax, marker_size=375):
18
+ pos_points = coords[labels==1]
19
+ neg_points = coords[labels==0]
20
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
21
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
22
+
23
+ def show_box(box, ax):
24
+ x0, y0 = box[0], box[1]
25
+ w, h = box[2] - box[0], box[3] - box[1]
26
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
27
+
28
+
29
+ def show_res(masks, scores, input_point, input_label, input_box, filename, image):
30
+ for i, (mask, score) in enumerate(zip(masks, scores)):
31
+ plt.figure(figsize=(10,10))
32
+ plt.imshow(image)
33
+ show_mask(mask, plt.gca())
34
+ if input_box is not None:
35
+ box = input_box[i]
36
+ show_box(box, plt.gca())
37
+ if (input_point is not None) and (input_label is not None):
38
+ show_points(input_point, input_label, plt.gca())
39
+
40
+ print(f"Score: {score:.3f}")
41
+ plt.axis('off')
42
+ plt.savefig(filename+'_'+str(i)+'.png',bbox_inches='tight',pad_inches=-0.1)
43
+ plt.close()
44
+
45
+ def show_res_multi(masks, scores, input_point, input_label, input_box, filename, image):
46
+ plt.figure(figsize=(10, 10))
47
+ plt.imshow(image)
48
+ for mask in masks:
49
+ show_mask(mask, plt.gca(), random_color=True)
50
+ for box in input_box:
51
+ show_box(box, plt.gca())
52
+ for score in scores:
53
+ print(f"Score: {score:.3f}")
54
+ plt.axis('off')
55
+ plt.savefig(filename +'.png',bbox_inches='tight',pad_inches=-0.1)
56
+ plt.close()
57
+
58
+
59
+ if __name__ == "__main__":
60
+ sam_checkpoint = "./pretrained_checkpoint/sam_hq_vit_l.pth"
61
+ model_type = "vit_l"
62
+ device = "cuda"
63
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
64
+ sam.to(device=device)
65
+ predictor = SamPredictor(sam)
66
+
67
+ for i in range(8):
68
+ print("image: ",i)
69
+ # hq_token_only: False means use hq output to correct SAM output.
70
+ # True means use hq output only.
71
+ # Default: False
72
+ hq_token_only = False
73
+ # To achieve best visualization effect, for images contain multiple objects (like typical coco images), we suggest to set hq_token_only=False
74
+ # For images contain single object, we suggest to set hq_token_only = True
75
+ # For quantiative evaluation on COCO/YTVOS/DAVIS/UVO/LVIS etc., we set hq_token_only = False
76
+
77
+ image = cv2.imread('demo/input_imgs/example'+str(i)+'.png')
78
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
79
+ predictor.set_image(image)
80
+
81
+ if i==0:
82
+ input_box = np.array([[4,13,1007,1023]])
83
+ input_point, input_label = None, None
84
+ elif i==1:
85
+ input_box = np.array([[306, 132, 925, 893]])
86
+ input_point, input_label = None, None
87
+ hq_token_only = True
88
+ elif i==2:
89
+ input_point = np.array([[495,518],[217,140]])
90
+ input_label = np.ones(input_point.shape[0])
91
+ input_box = None
92
+ hq_token_only = True
93
+ elif i==3:
94
+ input_point = np.array([[221,482],[498,633],[750,379]])
95
+ input_label = np.ones(input_point.shape[0])
96
+ input_box = None
97
+ elif i==4:
98
+ input_box = np.array([[64,76,940,919]])
99
+ input_point, input_label = None, None
100
+ hq_token_only = True
101
+ elif i==5:
102
+ input_point = np.array([[373,363], [452, 575]])
103
+ input_label = np.ones(input_point.shape[0])
104
+ input_box = None
105
+ elif i==6:
106
+ input_box = np.array([[181, 196, 757, 495]])
107
+ input_point, input_label = None, None
108
+ elif i==7:
109
+ # multi box input
110
+ input_box = torch.tensor([[45,260,515,470], [310,228,424,296]],device=predictor.device)
111
+ transformed_box = predictor.transform.apply_boxes_torch(input_box, image.shape[:2])
112
+ input_point, input_label = None, None
113
+
114
+ batch_box = False if input_box is None else len(input_box)>1
115
+ result_path = 'demo/hq_sam_result/'
116
+ os.makedirs(result_path, exist_ok=True)
117
+
118
+ if not batch_box:
119
+ masks, scores, logits = predictor.predict(
120
+ point_coords=input_point,
121
+ point_labels=input_label,
122
+ box = input_box,
123
+ multimask_output=False,
124
+ hq_token_only=hq_token_only,
125
+ )
126
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'example'+str(i), image)
127
+
128
+ else:
129
+ masks, scores, logits = predictor.predict_torch(
130
+ point_coords=input_point,
131
+ point_labels=input_label,
132
+ boxes=transformed_box,
133
+ multimask_output=False,
134
+ hq_token_only=hq_token_only,
135
+ )
136
+ masks = masks.squeeze(1).cpu().numpy()
137
+ scores = scores.squeeze(1).cpu().numpy()
138
+ input_box = input_box.cpu().numpy()
139
+ show_res_multi(masks, scores, input_point, input_label, input_box, result_path + 'example'+str(i), image)
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
sam2.1HQ/sam-hq-main/demo/demo_hqsam_light.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from segment_anything import sam_model_registry, SamPredictor
6
+ import os
7
+
8
+ def show_mask(mask, ax, random_color=False):
9
+ if random_color:
10
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
11
+ else:
12
+ color = np.array([30/255, 144/255, 255/255, 0.6])
13
+ h, w = mask.shape[-2:]
14
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
15
+ ax.imshow(mask_image)
16
+
17
+ def show_points(coords, labels, ax, marker_size=375):
18
+ pos_points = coords[labels==1]
19
+ neg_points = coords[labels==0]
20
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
21
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
22
+
23
+ def show_box(box, ax):
24
+ x0, y0 = box[0], box[1]
25
+ w, h = box[2] - box[0], box[3] - box[1]
26
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
27
+
28
+
29
+ def show_res(masks, scores, input_point, input_label, input_box, filename, image):
30
+ for i, (mask, score) in enumerate(zip(masks, scores)):
31
+ plt.figure(figsize=(10,10))
32
+ plt.imshow(image)
33
+ show_mask(mask, plt.gca())
34
+ if input_box is not None:
35
+ box = input_box[i]
36
+ show_box(box, plt.gca())
37
+ if (input_point is not None) and (input_label is not None):
38
+ show_points(input_point, input_label, plt.gca())
39
+
40
+ print(f"Score: {score:.3f}")
41
+ plt.axis('off')
42
+ plt.savefig(filename+'_'+str(i)+'.png',bbox_inches='tight',pad_inches=-0.1)
43
+ plt.close()
44
+
45
+ def show_res_multi(masks, scores, input_point, input_label, input_box, filename, image):
46
+ plt.figure(figsize=(10, 10))
47
+ plt.imshow(image)
48
+ for mask in masks:
49
+ show_mask(mask, plt.gca(), random_color=True)
50
+ for box in input_box:
51
+ show_box(box, plt.gca())
52
+ for score in scores:
53
+ print(f"Score: {score:.3f}")
54
+ plt.axis('off')
55
+ plt.savefig(filename +'.png',bbox_inches='tight',pad_inches=-0.1)
56
+ plt.close()
57
+
58
+
59
+ if __name__ == "__main__":
60
+ sam_checkpoint = "./pretrained_checkpoint/sam_hq_vit_tiny.pth"
61
+ model_type = "vit_tiny"
62
+
63
+ device = "cuda"
64
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
65
+ sam.to(device=device)
66
+ sam.eval()
67
+ predictor = SamPredictor(sam)
68
+
69
+
70
+ image = cv2.imread('demo/input_imgs/dog.jpg')
71
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
72
+ predictor.set_image(image)
73
+ # hq_token_only: False means use hq output to correct SAM output.
74
+ # True means use hq output only.
75
+ # Default: False
76
+ hq_token_only = False
77
+ # To achieve best visualization effect, for images contain multiple objects (like typical coco images), we suggest to set hq_token_only=False
78
+ # For images contain single object, we suggest to set hq_token_only = True
79
+ # For quantiative evaluation on COCO/YTVOS/DAVIS/UVO/LVIS etc., we set hq_token_only = False
80
+
81
+ # box prompt
82
+ input_box = np.array([[784,500,1789,1000]])
83
+ input_point, input_label = None, None
84
+
85
+ masks, scores, logits = predictor.predict(
86
+ point_coords=input_point,
87
+ point_labels=input_label,
88
+ box = input_box,
89
+ multimask_output=False,
90
+ hq_token_only=hq_token_only,
91
+ )
92
+ result_path = 'demo/hq_sam_tiny_result/'
93
+ os.makedirs(result_path, exist_ok=True)
94
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'dog', image)
95
+
96
+
97
+
98
+ image = cv2.imread('demo/input_imgs/example3.png')
99
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
100
+ predictor.set_image(image)
101
+ hq_token_only = True
102
+ # point prompt
103
+ input_point = np.array([[221,482],[498,633],[750,379]])
104
+ input_label = np.ones(input_point.shape[0])
105
+ input_box = None
106
+
107
+ masks, scores, logits = predictor.predict(
108
+ point_coords=input_point,
109
+ point_labels=input_label,
110
+ box = input_box,
111
+ multimask_output=False,
112
+ hq_token_only=hq_token_only,
113
+ )
114
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'example3', image)
115
+
116
+
117
+ image = cv2.imread('demo/input_imgs/example7.png')
118
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
119
+ predictor.set_image(image)
120
+ hq_token_only = False
121
+ # multi box prompt
122
+ input_box = torch.tensor([[45,260,515,470], [310,228,424,296]],device=predictor.device)
123
+ transformed_box = predictor.transform.apply_boxes_torch(input_box, image.shape[:2])
124
+ input_point, input_label = None, None
125
+ masks, scores, logits = predictor.predict_torch(
126
+ point_coords=input_point,
127
+ point_labels=input_label,
128
+ boxes=transformed_box,
129
+ multimask_output=False,
130
+ hq_token_only=hq_token_only,
131
+ )
132
+ masks = masks.squeeze(1).cpu().numpy()
133
+ scores = scores.squeeze(1).cpu().numpy()
134
+ input_box = input_box.cpu().numpy()
135
+ show_res_multi(masks, scores, input_point, input_label, input_box, result_path + 'example7', image)
136
+
137
+
138
+
139
+
140
+
141
+
sam2.1HQ/sam-hq-main/demo/demo_hqsam_pip_example.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from segment_anything_hq import sam_model_registry, SamPredictor
6
+ import os
7
+
8
+ def show_mask(mask, ax, random_color=False):
9
+ if random_color:
10
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
11
+ else:
12
+ color = np.array([30/255, 144/255, 255/255, 0.6])
13
+ h, w = mask.shape[-2:]
14
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
15
+ ax.imshow(mask_image)
16
+
17
+ def show_points(coords, labels, ax, marker_size=375):
18
+ pos_points = coords[labels==1]
19
+ neg_points = coords[labels==0]
20
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
21
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
22
+
23
+ def show_box(box, ax):
24
+ x0, y0 = box[0], box[1]
25
+ w, h = box[2] - box[0], box[3] - box[1]
26
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
27
+
28
+
29
+ def show_res(masks, scores, input_point, input_label, input_box, filename, image):
30
+ for i, (mask, score) in enumerate(zip(masks, scores)):
31
+ plt.figure(figsize=(10,10))
32
+ plt.imshow(image)
33
+ show_mask(mask, plt.gca())
34
+ if input_box is not None:
35
+ box = input_box[i]
36
+ show_box(box, plt.gca())
37
+ if (input_point is not None) and (input_label is not None):
38
+ show_points(input_point, input_label, plt.gca())
39
+
40
+ print(f"Score: {score:.3f}")
41
+ plt.axis('off')
42
+ plt.savefig(filename+'_'+str(i)+'.png',bbox_inches='tight',pad_inches=-0.1)
43
+ plt.close()
44
+
45
+ def show_res_multi(masks, scores, input_point, input_label, input_box, filename, image):
46
+ plt.figure(figsize=(10, 10))
47
+ plt.imshow(image)
48
+ for mask in masks:
49
+ show_mask(mask, plt.gca(), random_color=True)
50
+ for box in input_box:
51
+ show_box(box, plt.gca())
52
+ for score in scores:
53
+ print(f"Score: {score:.3f}")
54
+ plt.axis('off')
55
+ plt.savefig(filename +'.png',bbox_inches='tight',pad_inches=-0.1)
56
+ plt.close()
57
+
58
+
59
+ if __name__ == "__main__":
60
+ sam_checkpoint = "./pretrained_checkpoint/sam_hq_vit_l.pth"
61
+ model_type = "vit_l"
62
+ device = "cuda"
63
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
64
+ sam.to(device=device)
65
+ predictor = SamPredictor(sam)
66
+
67
+ for i in range(8):
68
+ print("image: ",i)
69
+ # hq_token_only: False means use hq output to correct SAM output.
70
+ # True means use hq output only.
71
+ # Default: False
72
+ hq_token_only = False
73
+ # To achieve best visualization effect, for images contain multiple objects (like typical coco images), we suggest to set hq_token_only=False
74
+ # For images contain single object, we suggest to set hq_token_only = True
75
+ # For quantiative evaluation on COCO/YTVOS/DAVIS/UVO/LVIS etc., we set hq_token_only = False
76
+
77
+ image = cv2.imread('demo/input_imgs/example'+str(i)+'.png')
78
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
79
+ predictor.set_image(image)
80
+
81
+ if i==0:
82
+ input_box = np.array([[4,13,1007,1023]])
83
+ input_point, input_label = None, None
84
+ elif i==1:
85
+ input_box = np.array([[306, 132, 925, 893]])
86
+ input_point, input_label = None, None
87
+ hq_token_only = True
88
+ elif i==2:
89
+ input_point = np.array([[495,518],[217,140]])
90
+ input_label = np.ones(input_point.shape[0])
91
+ input_box = None
92
+ hq_token_only = True
93
+ elif i==3:
94
+ input_point = np.array([[221,482],[498,633],[750,379]])
95
+ input_label = np.ones(input_point.shape[0])
96
+ input_box = None
97
+ elif i==4:
98
+ input_box = np.array([[64,76,940,919]])
99
+ input_point, input_label = None, None
100
+ hq_token_only = True
101
+ elif i==5:
102
+ input_point = np.array([[373,363], [452, 575]])
103
+ input_label = np.ones(input_point.shape[0])
104
+ input_box = None
105
+ elif i==6:
106
+ input_box = np.array([[181, 196, 757, 495]])
107
+ input_point, input_label = None, None
108
+ elif i==7:
109
+ # multi box input
110
+ input_box = torch.tensor([[45,260,515,470], [310,228,424,296]],device=predictor.device)
111
+ transformed_box = predictor.transform.apply_boxes_torch(input_box, image.shape[:2])
112
+ input_point, input_label = None, None
113
+
114
+ batch_box = False if input_box is None else len(input_box)>1
115
+ result_path = 'demo/hq_sam_result/'
116
+ os.makedirs(result_path, exist_ok=True)
117
+
118
+ if not batch_box:
119
+ masks, scores, logits = predictor.predict(
120
+ point_coords=input_point,
121
+ point_labels=input_label,
122
+ box = input_box,
123
+ multimask_output=False,
124
+ hq_token_only=hq_token_only,
125
+ )
126
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'example'+str(i), image)
127
+
128
+ else:
129
+ masks, scores, logits = predictor.predict_torch(
130
+ point_coords=input_point,
131
+ point_labels=input_label,
132
+ boxes=transformed_box,
133
+ multimask_output=False,
134
+ hq_token_only=hq_token_only,
135
+ )
136
+ masks = masks.squeeze(1).cpu().numpy()
137
+ scores = scores.squeeze(1).cpu().numpy()
138
+ input_box = input_box.cpu().numpy()
139
+ show_res_multi(masks, scores, input_point, input_label, input_box, result_path + 'example'+str(i), image)
140
+
141
+
sam2.1HQ/sam-hq-main/demo/demo_sam.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from segment_anything import sam_model_registry_baseline, SamPredictor
6
+ import os
7
+
8
+ def show_mask(mask, ax, random_color=False):
9
+ if random_color:
10
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
11
+ else:
12
+ color = np.array([30/255, 144/255, 255/255, 0.6])
13
+ h, w = mask.shape[-2:]
14
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
15
+ ax.imshow(mask_image)
16
+
17
+ def show_points(coords, labels, ax, marker_size=375):
18
+ pos_points = coords[labels==1]
19
+ neg_points = coords[labels==0]
20
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
21
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
22
+
23
+ def show_box(box, ax):
24
+ x0, y0 = box[0], box[1]
25
+ w, h = box[2] - box[0], box[3] - box[1]
26
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
27
+
28
+
29
+ def show_res(masks, scores, input_point, input_label, input_box, filename, image):
30
+ for i, (mask, score) in enumerate(zip(masks, scores)):
31
+ plt.figure(figsize=(10,10))
32
+ plt.imshow(image)
33
+ show_mask(mask, plt.gca())
34
+ if input_box is not None:
35
+ box = input_box[i]
36
+ show_box(box, plt.gca())
37
+ if (input_point is not None) and (input_label is not None):
38
+ show_points(input_point, input_label, plt.gca())
39
+
40
+ print(f"Score: {score:.3f}")
41
+ plt.axis('off')
42
+ plt.savefig(filename+'_'+str(i)+'.png',bbox_inches='tight',pad_inches=-0.1)
43
+ plt.close()
44
+
45
+ def show_res_multi(masks, scores, input_point, input_label, input_box, filename, image):
46
+ plt.figure(figsize=(10, 10))
47
+ plt.imshow(image)
48
+ for mask in masks:
49
+ show_mask(mask, plt.gca(), random_color=True)
50
+ for box in input_box:
51
+ show_box(box, plt.gca())
52
+ for score in scores:
53
+ print(f"Score: {score:.3f}")
54
+ plt.axis('off')
55
+ plt.savefig(filename +'.png',bbox_inches='tight',pad_inches=-0.1)
56
+ plt.close()
57
+
58
+ if __name__ == "__main__":
59
+ sam_checkpoint = "./pretrained_checkpoint/sam_vit_l_0b3195.pth"
60
+ model_type = "vit_l"
61
+ device = "cuda"
62
+ sam = sam_model_registry_baseline[model_type](checkpoint=sam_checkpoint)
63
+ sam.to(device=device)
64
+ predictor = SamPredictor(sam)
65
+
66
+ for i in range(8):
67
+ print("image: ",i)
68
+ image = cv2.imread('demo/input_imgs/example'+str(i)+'.png')
69
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
70
+ predictor.set_image(image)
71
+
72
+ if i==0:
73
+ input_box = np.array([[4,13,1007,1023]])
74
+ input_point, input_label = None, None
75
+ elif i==1:
76
+ input_box = np.array([[306, 132, 925, 893]])
77
+ input_point, input_label = None, None
78
+ elif i==2:
79
+ input_point = np.array([[495,518],[217,140]])
80
+ input_label = np.ones(input_point.shape[0])
81
+ input_box = None
82
+ elif i==3:
83
+ input_point = np.array([[221,482],[498,633],[750,379]])
84
+ input_label = np.ones(input_point.shape[0])
85
+ input_box = None
86
+ elif i==4:
87
+ input_box = np.array([[64,76,940,919]])
88
+ input_point, input_label = None, None
89
+ elif i==5:
90
+ input_point = np.array([[373,363], [452, 575]])
91
+ input_label = np.ones(input_point.shape[0])
92
+ input_box = None
93
+ elif i==6:
94
+ input_box = np.array([[181, 196, 757, 495]])
95
+ input_point, input_label = None, None
96
+ elif i==7:
97
+ # multi box input
98
+ input_box = torch.tensor([[45,260,515,470], [310,228,424,296]],device=predictor.device)
99
+ transformed_box = predictor.transform.apply_boxes_torch(input_box, image.shape[:2])
100
+ input_point, input_label = None, None
101
+
102
+ batch_box = False if input_box is None else len(input_box)>1
103
+ result_path = 'demo/baseline_sam_result/'
104
+ os.makedirs(result_path, exist_ok=True)
105
+
106
+ if not batch_box:
107
+ masks, scores, logits = predictor.predict(
108
+ point_coords=input_point,
109
+ point_labels=input_label,
110
+ box = input_box,
111
+ multimask_output=False,
112
+ )
113
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'example'+str(i), image)
114
+ else:
115
+ masks, scores, logits = predictor.predict_torch(
116
+ point_coords=input_point,
117
+ point_labels=input_label,
118
+ boxes=transformed_box,
119
+ multimask_output=False,
120
+ )
121
+ masks = masks.squeeze(1).cpu().numpy()
122
+ scores = scores.squeeze(1).cpu().numpy()
123
+ input_box = input_box.cpu().numpy()
124
+ show_res_multi(masks, scores, input_point, input_label, input_box, result_path + 'example'+str(i), image)
125
+
126
+
127
+
sam2.1HQ/sam-hq-main/demo/input_imgs/dog.jpg ADDED

Git LFS Details

  • SHA256: a5062538fc67074179eb884fb1d514854af6e759bc8ac623f94035835472937e
  • Pointer size: 131 Bytes
  • Size of remote file: 222 kB
sam2.1HQ/sam-hq-main/demo/input_imgs/example0.png ADDED

Git LFS Details

  • SHA256: 75b113e521d89addb6c48344ef27fefd0f494eafc703e9d0657978929fce4601
  • Pointer size: 132 Bytes
  • Size of remote file: 2.32 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example1.png ADDED

Git LFS Details

  • SHA256: 8533657226d481a90f648dda0a05c81c69d3135ce6ca4d74838167d9e61a8116
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example2.png ADDED

Git LFS Details

  • SHA256: d42a70173297297b654cd067e7ed3de717c3d2b37fd6d13b0396e5fc58449850
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example3.png ADDED

Git LFS Details

  • SHA256: 8286ff50b6f2a6929deab438d96a9c16443b422b0e3dc2f95e1885e634532753
  • Pointer size: 132 Bytes
  • Size of remote file: 2.57 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example4.png ADDED

Git LFS Details

  • SHA256: 866820ace9a150b791c00f955c2b436fc72a2e6a43b36187aba975be196161c4
  • Pointer size: 132 Bytes
  • Size of remote file: 2.32 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example5.png ADDED

Git LFS Details

  • SHA256: 453a3e1627effb4d8ed6049e8d457ebe7f869537acf8e2846b36cc62ee23d1a6
  • Pointer size: 132 Bytes
  • Size of remote file: 1.22 MB
sam2.1HQ/sam-hq-main/demo/input_imgs/example6.png ADDED

Git LFS Details

  • SHA256: 631bc19a9b5a3bd291de6375abf63c33234aaee5194ca95245d418581ff294d1
  • Pointer size: 131 Bytes
  • Size of remote file: 384 kB
sam2.1HQ/sam-hq-main/demo/input_imgs/example7.png ADDED
sam2.1HQ/sam-hq-main/demo/input_imgs/example8.png ADDED

Git LFS Details

  • SHA256: bd91c443e78d3422d0b845f69a456938c6ce858d20b1640c84fabba3b8929442
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
sam2.1HQ/sam-hq-main/figs/coco_vis_comp.png ADDED

Git LFS Details

  • SHA256: 4c175cdfa66170f1ddd39b8c33bf06a194421a822b0c331bc239d298280f73e1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.98 MB
sam2.1HQ/sam-hq-main/figs/davis.png ADDED
sam2.1HQ/sam-hq-main/figs/points_comp.png ADDED

Git LFS Details

  • SHA256: a6b04d8c08bedcebc10cc349f3cd7a040a031ef3ba6f789b7807164e6e7b5bd4
  • Pointer size: 131 Bytes
  • Size of remote file: 950 kB
sam2.1HQ/sam-hq-main/figs/sam-hf-framework.png ADDED

Git LFS Details

  • SHA256: 3dabe5c3cd7b20baf01cd44c7f78029e06a3ba0e1ba4e5cc80e3f96c6f1bc527
  • Pointer size: 131 Bytes
  • Size of remote file: 737 kB
sam2.1HQ/sam-hq-main/figs/sam_variants_comp.png ADDED

Git LFS Details

  • SHA256: 6decd1e5e430c343fda96afc55c6d4db128cabea89586a640568821127af98cd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.28 MB
sam2.1HQ/sam-hq-main/figs/sam_vs_hqsam_backbones.png ADDED

Git LFS Details

  • SHA256: b6b1e99cd91277d1a4be3e6f7ffd28a4002a8393b1050896b7a824e2818020a9
  • Pointer size: 131 Bytes
  • Size of remote file: 119 kB
sam2.1HQ/sam-hq-main/figs/ytvis.png ADDED
sam2.1HQ/sam-hq-main/sam-hq2/INSTALL.md ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Installation
2
+
3
+ ### Requirements
4
+
5
+ - Linux with Python ≥ 3.10, PyTorch ≥ 2.3.1 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. Install them together at https://pytorch.org to ensure this.
6
+ * Note older versions of Python or PyTorch may also work. However, the versions above are strongly recommended to provide all features such as `torch.compile`.
7
+ - [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) that match the CUDA version for your PyTorch installation. This should typically be CUDA 12.1 if you follow the default installation command.
8
+ - If you are installing on Windows, it's strongly recommended to use [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install) with Ubuntu.
9
+
10
+ Then, install SAM 2 from the root of this repository via
11
+ ```bash
12
+ pip install -e ".[notebooks]"
13
+ ```
14
+
15
+ Note that you may skip building the SAM 2 CUDA extension during installation via environment variable `SAM2_BUILD_CUDA=0`, as follows:
16
+ ```bash
17
+ # skip the SAM 2 CUDA extension
18
+ SAM2_BUILD_CUDA=0 pip install -e ".[notebooks]"
19
+ ```
20
+ This would also skip the post-processing step at runtime (removing small holes and sprinkles in the output masks, which requires the CUDA extension), but shouldn't affect the results in most cases.
21
+
22
+ ### Building the SAM 2 CUDA extension
23
+
24
+ By default, we allow the installation to proceed even if the SAM 2 CUDA extension fails to build. (In this case, the build errors are hidden unless using `-v` for verbose output in `pip install`.)
25
+
26
+ If you see a message like `Skipping the post-processing step due to the error above` at runtime or `Failed to build the SAM 2 CUDA extension due to the error above` during installation, it indicates that the SAM 2 CUDA extension failed to build in your environment. In this case, **you can still use SAM 2 for both image and video applications**. The post-processing step (removing small holes and sprinkles in the output masks) will be skipped, but this shouldn't affect the results in most cases.
27
+
28
+ If you would like to enable this post-processing step, you can reinstall SAM 2 on a GPU machine with environment variable `SAM2_BUILD_ALLOW_ERRORS=0` to force building the CUDA extension (and raise errors if it fails to build), as follows
29
+ ```bash
30
+ pip uninstall -y SAM-2 && \
31
+ rm -f ./sam2/*.so && \
32
+ SAM2_BUILD_ALLOW_ERRORS=0 pip install -v -e ".[notebooks]"
33
+ ```
34
+
35
+ Note that PyTorch needs to be installed first before building the SAM 2 CUDA extension. It's also necessary to install [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) that match the CUDA version for your PyTorch installation. (This should typically be CUDA 12.1 if you follow the default installation command.) After installing the CUDA toolkits, you can check its version via `nvcc --version`.
36
+
37
+ Please check the section below on common installation issues if the CUDA extension fails to build during installation or load at runtime.
38
+
39
+ ### Common Installation Issues
40
+
41
+ Click each issue for its solutions:
42
+
43
+ <details>
44
+ <summary>
45
+ I got `ImportError: cannot import name '_C' from 'sam2'`
46
+ </summary>
47
+ <br/>
48
+
49
+ This is usually because you haven't run the `pip install -e ".[notebooks]"` step above or the installation failed. Please install SAM 2 first, and see the other issues if your installation fails.
50
+
51
+ In some systems, you may need to run `python setup.py build_ext --inplace` in the SAM 2 repo root as suggested in https://github.com/facebookresearch/sam2/issues/77.
52
+ </details>
53
+
54
+ <details>
55
+ <summary>
56
+ I got `MissingConfigException: Cannot find primary config 'configs/sam2.1/sam2.1_hiera_l.yaml'`
57
+ </summary>
58
+ <br/>
59
+
60
+ This is usually because you haven't run the `pip install -e .` step above, so `sam2` isn't in your Python's `sys.path`. Please run this installation step. In case it still fails after the installation step, you may try manually adding the root of this repo to `PYTHONPATH` via
61
+ ```bash
62
+ export SAM2_REPO_ROOT=/path/to/sam2 # path to this repo
63
+ export PYTHONPATH="${SAM2_REPO_ROOT}:${PYTHONPATH}"
64
+ ```
65
+ to manually add `sam2_configs` into your Python's `sys.path`.
66
+
67
+ </details>
68
+
69
+ <details>
70
+ <summary>
71
+ I got `RuntimeError: Error(s) in loading state_dict for SAM2Base` when loading the new SAM 2.1 checkpoints
72
+ </summary>
73
+ <br/>
74
+
75
+ This is likely because you have installed a previous version of this repo, which doesn't have the new modules to support the SAM 2.1 checkpoints yet. Please try the following steps:
76
+
77
+ 1. pull the latest code from the `main` branch of this repo
78
+ 2. run `pip uninstall -y SAM-2` to uninstall any previous installations
79
+ 3. then install the latest repo again using `pip install -e ".[notebooks]"`
80
+
81
+ In case the steps above still don't resolve the error, please try running in your Python environment the following
82
+ ```python
83
+ from sam2.modeling import sam2_base
84
+
85
+ print(sam2_base.__file__)
86
+ ```
87
+ and check whether the content in the printed local path of `sam2/modeling/sam2_base.py` matches the latest one in https://github.com/facebookresearch/sam2/blob/main/sam2/modeling/sam2_base.py (e.g. whether your local file has `no_obj_embed_spatial`) to indentify if you're still using a previous installation.
88
+
89
+ </details>
90
+
91
+ <details>
92
+ <summary>
93
+ My installation failed with `CUDA_HOME environment variable is not set`
94
+ </summary>
95
+ <br/>
96
+
97
+ This usually happens because the installation step cannot find the CUDA toolkits (that contain the NVCC compiler) to build a custom CUDA kernel in SAM 2. Please install [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) or the version that matches the CUDA version for your PyTorch installation. If the error persists after installing CUDA toolkits, you may explicitly specify `CUDA_HOME` via
98
+ ```
99
+ export CUDA_HOME=/usr/local/cuda # change to your CUDA toolkit path
100
+ ```
101
+ and rerun the installation.
102
+
103
+ Also, you should make sure
104
+ ```
105
+ python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)'
106
+ ```
107
+ print `(True, a directory with cuda)` to verify that the CUDA toolkits are correctly set up.
108
+
109
+ If you are still having problems after verifying that the CUDA toolkit is installed and the `CUDA_HOME` environment variable is set properly, you may have to add the `--no-build-isolation` flag to the pip command:
110
+ ```
111
+ pip install --no-build-isolation -e .
112
+ ```
113
+
114
+ </details>
115
+
116
+ <details>
117
+ <summary>
118
+ I got `undefined symbol: _ZN3c1015SmallVectorBaseIjE8grow_podEPKvmm` (or similar errors)
119
+ </summary>
120
+ <br/>
121
+
122
+ This usually happens because you have multiple versions of dependencies (PyTorch or CUDA) in your environment. During installation, the SAM 2 library is compiled against one version library while at run time it links against another version. This might be due to that you have different versions of PyTorch or CUDA installed separately via `pip` or `conda`. You may delete one of the duplicates to only keep a single PyTorch and CUDA version.
123
+
124
+ In particular, if you have a lower PyTorch version than 2.3.1, it's recommended to upgrade to PyTorch 2.3.1 or higher first. Otherwise, the installation script will try to upgrade to the latest PyTorch using `pip`, which could sometimes lead to duplicated PyTorch installation if you have previously installed another PyTorch version using `conda`.
125
+
126
+ We have been building SAM 2 against PyTorch 2.3.1 internally. However, a few user comments (e.g. https://github.com/facebookresearch/sam2/issues/22, https://github.com/facebookresearch/sam2/issues/14) suggested that downgrading to PyTorch 2.1.0 might resolve this problem. In case the error persists, you may try changing the restriction from `torch>=2.3.1` to `torch>=2.1.0` in both [`pyproject.toml`](pyproject.toml) and [`setup.py`](setup.py) to allow PyTorch 2.1.0.
127
+ </details>
128
+
129
+ <details>
130
+ <summary>
131
+ I got `CUDA error: no kernel image is available for execution on the device`
132
+ </summary>
133
+ <br/>
134
+
135
+ A possible cause could be that the CUDA kernel is somehow not compiled towards your GPU's CUDA [capability](https://developer.nvidia.com/cuda-gpus). This could happen if the installation is done in an environment different from the runtime (e.g. in a slurm system).
136
+
137
+ You can try pulling the latest code from the SAM 2 repo and running the following
138
+ ```
139
+ export TORCH_CUDA_ARCH_LIST=9.0 8.0 8.6 8.9 7.0 7.2 7.5 6.0`
140
+ ```
141
+ to manually specify the CUDA capability in the compilation target that matches your GPU.
142
+ </details>
143
+
144
+ <details>
145
+ <summary>
146
+ I got `RuntimeError: No available kernel. Aborting execution.` (or similar errors)
147
+ </summary>
148
+ <br/>
149
+
150
+ This is probably because your machine doesn't have a GPU or a compatible PyTorch version for Flash Attention (see also https://discuss.pytorch.org/t/using-f-scaled-dot-product-attention-gives-the-error-runtimeerror-no-available-kernel-aborting-execution/180900 for a discussion in PyTorch forum). You may be able to resolve this error by replacing the line
151
+ ```python
152
+ OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
153
+ ```
154
+ in [`sam2/modeling/sam/transformer.py`](sam2/modeling/sam/transformer.py) with
155
+ ```python
156
+ OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = True, True, True
157
+ ```
158
+ to relax the attention kernel setting and use other kernels than Flash Attention.
159
+ </details>
160
+
161
+ <details>
162
+ <summary>
163
+ I got `Error compiling objects for extension`
164
+ </summary>
165
+ <br/>
166
+
167
+ You may see error log of:
168
+ > unsupported Microsoft Visual Studio version! Only the versions between 2017 and 2022 (inclusive) are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
169
+
170
+ This is probably because your versions of CUDA and Visual Studio are incompatible. (see also https://stackoverflow.com/questions/78515942/cuda-compatibility-with-visual-studio-2022-version-17-10 for a discussion in stackoverflow).<br>
171
+ You may be able to fix this by adding the `-allow-unsupported-compiler` argument to `nvcc` after L48 in the [setup.py](https://github.com/facebookresearch/sam2/blob/main/setup.py). <br>
172
+ After adding the argument, `get_extension()` will look like this:
173
+ ```python
174
+ def get_extensions():
175
+ srcs = ["sam2/csrc/connected_components.cu"]
176
+ compile_args = {
177
+ "cxx": [],
178
+ "nvcc": [
179
+ "-DCUDA_HAS_FP16=1",
180
+ "-D__CUDA_NO_HALF_OPERATORS__",
181
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
182
+ "-D__CUDA_NO_HALF2_OPERATORS__",
183
+ "-allow-unsupported-compiler" # Add this argument
184
+ ],
185
+ }
186
+ ext_modules = [CUDAExtension("sam2._C", srcs, extra_compile_args=compile_args)]
187
+ return ext_modules
188
+ ```
189
+ </details>
sam2.1HQ/sam-hq-main/sam-hq2/README.md ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HQ-SAM 2: Segment Anything in High Quality for Images and Videos
2
+
3
+
4
+ We propose **HQ-SAM2** to upgrade SAM2 to **higher quality** by extending our training strategy in [HQ-SAM](https://arxiv.org/abs/2306.01567).
5
+
6
+ ## Latest updates
7
+
8
+ **2024/11/17 -- HQ-SAM 2 is released**
9
+
10
+ - A new suite of improved model checkpoints (denoted as **HQ-SAM 2**, **beta-version**) are released. See [Model Description](#model-description) for details.
11
+
12
+ ![HQ-SAM2 results comparison](assets/hq-sam2-results.png?raw=true)
13
+
14
+ ## Installation
15
+
16
+ HQ-SAM 2 needs to be installed first before use. The code requires `python>=3.10`, as well as `torch>=2.3.1` and `torchvision>=0.18.1`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. You can install SAM 2 on a GPU machine using:
17
+
18
+ ```bash
19
+ git clone https://github.com/SysCV/sam-hq.git
20
+ conda create -n sam_hq2 python=3.10 -y
21
+ conda activate sam_hq2
22
+ cd sam-hq/sam-hq2
23
+ pip install -e .
24
+ ```
25
+ If you are installing on Windows, it's strongly recommended to use [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install) with Ubuntu.
26
+
27
+ To use the HQ-SAM 2 predictor and run the example notebooks, `jupyter` and `matplotlib` are required and can be installed by:
28
+
29
+ ```bash
30
+ pip install -e ".[notebooks]"
31
+ ```
32
+
33
+ Note:
34
+ 1. It's recommended to create a new Python environment via [Anaconda](https://www.anaconda.com/) for this installation and install PyTorch 2.3.1 (or higher) via `pip` following https://pytorch.org/. If you have a PyTorch version lower than 2.3.1 in your current environment, the installation command above will try to upgrade it to the latest PyTorch version using `pip`.
35
+ 2. The step above requires compiling a custom CUDA kernel with the `nvcc` compiler. If it isn't already available on your machine, please install the [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) with a version that matches your PyTorch CUDA version.
36
+ 3. If you see a message like `Failed to build the SAM 2 CUDA extension` during installation, you can ignore it and still use SAM 2 (some post-processing functionality may be limited, but it doesn't affect the results in most cases).
37
+
38
+ Please see [`INSTALL.md`](./INSTALL.md) for FAQs on potential issues and solutions.
39
+
40
+ ## Getting Started
41
+
42
+ ### Download Checkpoints
43
+
44
+ First, we need to download a model checkpoint. All the model checkpoints can be downloaded by running:
45
+
46
+ ```bash
47
+ cd checkpoints && \
48
+ ./download_ckpts.sh && \
49
+ cd ..
50
+ ```
51
+
52
+ or individually from:
53
+
54
+ <!-- - [sam2.1_hiera_large.pt](https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt) -->
55
+ - [sam2.1_hq_hiera_large.pt](https://huggingface.co/lkeab/hq-sam/resolve/main/sam2.1_hq_hiera_large.pt?download=true)
56
+
57
+ (note that these are the improved checkpoints denoted as SAM 2.1; see [Model Description](#model-description) for details.)
58
+
59
+ Then HQ-SAM 2 can be used in a few lines as follows for image and video prediction.
60
+
61
+ ### Image prediction
62
+
63
+ HQ-SAM 2 has all the capabilities of [HQ-SAM](https://github.com/SysCV/sam-hq) on static images, and we provide image prediction APIs that closely resemble SAM for image use cases. The `SAM2ImagePredictor` class has an easy interface for image prompting.
64
+
65
+ ```python
66
+ import torch
67
+ from sam2.build_sam import build_sam2
68
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
69
+ # Baseline SAM2.1
70
+ # checkpoint = "./checkpoints/sam2.1_hiera_large.pt"
71
+ # model_cfg = "configs/sam2.1/sam2.1_hiera_l.yaml"
72
+
73
+ # Ours HQ-SAM 2
74
+ checkpoint = "./checkpoints/sam2.1_hq_hiera_large.pt"
75
+ model_cfg = "configs/sam2.1/sam2.1_hq_hiera_l.yaml"
76
+ predictor = SAM2ImagePredictor(build_sam2(model_cfg, checkpoint))
77
+
78
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
79
+ predictor.set_image(<your_image>)
80
+ masks, _, _ = predictor.predict(<input_prompts>, multimask_output=False)
81
+ ```
82
+
83
+ Please refer to the examples in [python demo/demo_hqsam2.py](./demo/demo_hqsam2.py) for details on how to add click or box prompts.
84
+
85
+ Please refer to the examples in [image_predictor_example.ipynb](./notebooks/image_predictor_example.ipynb) for static image use cases.
86
+
87
+ ### Video prediction
88
+
89
+ For promptable segmentation and tracking in videos, we provide a video predictor with APIs for example to add prompts and propagate masklets throughout a video. SAM 2 supports video inference on multiple objects and uses an inference state to keep track of the interactions in each video.
90
+
91
+ ```python
92
+ import torch
93
+ from sam2.build_sam import build_sam2_video_predictor
94
+ from sam2.build_sam import build_sam2_hq_video_predictor
95
+ # Baseline SAM2.1
96
+ # checkpoint = "./checkpoints/sam2.1_hiera_large.pt"
97
+ # model_cfg = "configs/sam2.1/sam2.1_hiera_l.yaml"
98
+ # predictor = build_sam2_video_predictor(model_cfg, checkpoint)
99
+
100
+ # Ours HQ-SAM 2
101
+ checkpoint = "./checkpoints/sam2.1_hq_hiera_large.pt"
102
+ model_cfg = "configs/sam2.1/sam2.1_hq_hiera_l.yaml"
103
+ predictor = build_sam2_hq_video_predictor(model_cfg, checkpoint)
104
+
105
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
106
+ state = predictor.init_state(<your_video>)
107
+
108
+ # add new prompts and instantly get the output on the same frame
109
+ frame_idx, object_ids, masks = predictor.add_new_points_or_box(state, <your_prompts>):
110
+
111
+ # propagate the prompts to get masklets throughout the video
112
+ for frame_idx, object_ids, masks in predictor.propagate_in_video(state):
113
+ ...
114
+ ```
115
+
116
+
117
+ Please refer to the examples in [video_predictor_example.ipynb](./notebooks/video_predictor_example.ipynb) for static image use cases.
118
+
119
+
120
+ ## Model Description
121
+
122
+ ### HQ-SAM 2 checkpoints
123
+
124
+ The table below shows the **zero-shot** image segmentation performance of SAM2.1 and HQ-SAM 2 on **COCO (AP)** using same bounding box detector from Focal-net DINO. The FPS speed of SAM2.1 and HQ-SAM 2 is on par.
125
+ | **Model** | **Size (M)** | **Single Mode (AP)** | **Multi-Mode (AP)** |
126
+ | :------------------: | :----------: | :-----------------: | :----------------: |
127
+ | sam2.1_hiera_large <br /> ([config](sam2/configs/sam2.1/sam2.1_hiera_l.yaml), [checkpoint](https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt)) | 224.4 | 50.0 | 48.3 |
128
+ | sam2.1_hq_hiera_large <br /> ([config](sam2/configs/sam2.1/sam2.1_hq_hiera_l.yaml), [checkpoint](https://huggingface.co/lkeab/hq-sam/resolve/main/sam2.1_hq_hiera_large.pt?download=true)) | 224.7 | **50.9** | **50.4** |
129
+
130
+ The table below shows the **zero-shot** image segmentation AP performance of Grounded-SAM 2 and Grounded-HQ-SAM 2 on [**Seginw (Segmentation in the Wild)** dataset](https://github.com/SysCV/sam-hq/tree/main/seginw).
131
+
132
+
133
+ <table><tbody>
134
+ <!-- START TABLE -->
135
+ <!-- TABLE HEADER -->
136
+ <th valign="bottom">Model Name</th>
137
+ <th valign="bottom">SAM</th>
138
+ <th valign="bottom">GroundingDINO</th>
139
+ <th valign="bottom">Mean AP</th>
140
+ <th valign="bottom">Airplane-Parts</th>
141
+ <th valign="bottom">Bottles</th>
142
+ <th valign="bottom">Brain-Tumor</th>
143
+ <th valign="bottom">Chicken</th>
144
+ <th valign="bottom">Cows</th>
145
+ <th valign="bottom">Electric-Shaver</th>
146
+ <th valign="bottom">Elephants</th>
147
+ <th valign="bottom">Fruits</th>
148
+ <th valign="bottom">Garbage</th>
149
+ <th valign="bottom">Ginger-Garlic</th>
150
+ <th valign="bottom">Hand-Metal</th>
151
+ <th valign="bottom">Hand</th>
152
+ <th valign="bottom">House-Parts</th>
153
+ <th valign="bottom">HouseHold-Items</th>
154
+ <th valign="bottom">Nutterfly-Squireel</th>
155
+ <th valign="bottom">Phones</th>
156
+ <th valign="bottom">Poles</th>
157
+ <th valign="bottom">Puppies</th>
158
+ <th valign="bottom">Rail</th>
159
+ <th valign="bottom">Salmon-Fillet</th>
160
+ <th valign="bottom">Strawberry</th>
161
+ <th valign="bottom">Tablets</th>
162
+ <th valign="bottom">Toolkits</th>
163
+ <th valign="bottom">Trash</th>
164
+ <th valign="bottom">Watermelon</th>
165
+ <!-- TABLE BODY -->
166
+ <tr><td align="left">Grounded SAM2</td>
167
+ <td align="center">vit-l</td>
168
+ <td align="center">swin-b</td>
169
+ <td align="center">49.5</td>
170
+ <td align="center">38.3</td>
171
+ <td align="center">67.1</td>
172
+ <td align="center">12.1</td>
173
+ <td align="center">80.7</td>
174
+ <td align="center">52.8</td>
175
+ <td align="center">72.0</td>
176
+ <td align="center">78.2</td>
177
+ <td align="center">83.3</td>
178
+ <td align="center">26.0</td>
179
+ <td align="center">45.7</td>
180
+ <td align="center">73.7</td>
181
+ <td align="center">77.6</td>
182
+ <td align="center">8.6</td>
183
+ <td align="center">60.1</td>
184
+ <td align="center">84.1</td>
185
+ <td align="center">34.6</td>
186
+ <td align="center">28.8</td>
187
+ <td align="center">48.9</td>
188
+ <td align="center">14.3</td>
189
+ <td align="center">24.2</td>
190
+ <td align="center">83.7</td>
191
+ <td align="center">29.1</td>
192
+ <td align="center">20.1</td>
193
+ <td align="center">28.4</td>
194
+ <td align="center">66.0</td>
195
+ </tr>
196
+
197
+ <tr><td align="left">Grounded HQ-SAM2</td>
198
+ <td align="center">vit-l</td>
199
+ <td align="center">swin-b</td>
200
+ <td align="center"><b>50.0</b></td>
201
+ <td align="center">38.6</td>
202
+ <td align="center">66.8</td>
203
+ <td align="center">12.0</td>
204
+ <td align="center">81.0</td>
205
+ <td align="center">52.8</td>
206
+ <td align="center">71.9</td>
207
+ <td align="center">77.2</td>
208
+ <td align="center">83.3</td>
209
+ <td align="center">26.1</td>
210
+ <td align="center">45.5</td>
211
+ <td align="center">74.8</td>
212
+ <td align="center">79.0</td>
213
+ <td align="center">8.6</td>
214
+ <td align="center">60.1</td>
215
+ <td align="center">84.7</td>
216
+ <td align="center">34.3</td>
217
+ <td align="center">25.5</td>
218
+ <td align="center">48.9</td>
219
+ <td align="center">14.1</td>
220
+ <td align="center">34.1</td>
221
+ <td align="center">85.7</td>
222
+ <td align="center">29.2</td>
223
+ <td align="center">21.5</td>
224
+ <td align="center">28.9</td>
225
+ <td align="center">66.6</td>
226
+
227
+ </tr>
228
+ </tbody></table>
229
+
230
+
231
+ The table below shows the **zero-shot** video object segmentation performance of SAM2.1 and HQ-SAM 2.
232
+ | **Model** | **Size (M)** | **DAVIS val (J&F)** | **MOSE(J&F)** |
233
+ | :------------------: | :----------: |:----------------: | :---------------: |
234
+ | sam2.1_hiera_large <br /> ([config](sam2/configs/sam2.1/sam2.1_hiera_l.yaml), [checkpoint](https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt)) | 224.4 | 89.8 | 74.6 |
235
+ | sam2.1_hq_hiera_large <br /> ([config](sam2/configs/sam2.1/sam2.1_hq_hiera_l.yaml), [checkpoint](https://huggingface.co/lkeab/hq-sam/resolve/main/sam2.1_hq_hiera_large.pt?download=true)) | 224.7 | **91.0** | **74.7** |
236
+
237
+
238
+
239
+ ## License
240
+
241
+ The HQ-SAM 2, SAM 2 model checkpoints, SAM 2 demo code (front-end and back-end), and SAM 2 training code are licensed under [Apache 2.0](./LICENSE), however the [Inter Font](https://github.com/rsms/inter?tab=OFL-1.1-1-ov-file) and [Noto Color Emoji](https://github.com/googlefonts/noto-emoji) used in the SAM 2 demo code are made available under the [SIL Open Font License, version 1.1](https://openfontlicense.org/open-font-license-official-text/).
242
+
243
+ ## Citing HQ-SAM 2
244
+ If you find HQ-SAM2 useful in your research or refer to the provided baseline results, please star :star: this repository and consider citing :pencil::
245
+ ```
246
+ @inproceedings{sam_hq,
247
+ title={Segment Anything in High Quality},
248
+ author={Ke, Lei and Ye, Mingqiao and Danelljan, Martin and Liu, Yifan and Tai, Yu-Wing and Tang, Chi-Keung and Yu, Fisher},
249
+ booktitle={NeurIPS},
250
+ year={2023}
251
+ }
252
+ ```
sam2.1HQ/sam-hq-main/sam-hq2/assets/hq-sam2-results.png ADDED

Git LFS Details

  • SHA256: 21fbe464ad8328576cca5e68f9ab45d7cef56143527575abb9c9f53812628466
  • Pointer size: 131 Bytes
  • Size of remote file: 240 kB
sam2.1HQ/sam-hq-main/sam-hq2/checkpoints/download_ckpts.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
4
+ # All rights reserved.
5
+
6
+ # This source code is licensed under the license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ # Use either wget or curl to download the checkpoints
10
+ if command -v wget &> /dev/null; then
11
+ CMD="wget"
12
+ elif command -v curl &> /dev/null; then
13
+ CMD="curl -L -O"
14
+ else
15
+ echo "Please install wget or curl to download the checkpoints."
16
+ exit 1
17
+ fi
18
+
19
+ # Define the URLs for SAM 2 checkpoints
20
+ # SAM2_BASE_URL="https://dl.fbaipublicfiles.com/segment_anything_2/072824"
21
+ # sam2_hiera_t_url="${SAM2_BASE_URL}/sam2_hiera_tiny.pt"
22
+ # sam2_hiera_s_url="${SAM2_BASE_URL}/sam2_hiera_small.pt"
23
+ # sam2_hiera_b_plus_url="${SAM2_BASE_URL}/sam2_hiera_base_plus.pt"
24
+ # sam2_hiera_l_url="${SAM2_BASE_URL}/sam2_hiera_large.pt"
25
+
26
+ # Download each of the four checkpoints using wget
27
+ # echo "Downloading sam2_hiera_tiny.pt checkpoint..."
28
+ # $CMD $sam2_hiera_t_url || { echo "Failed to download checkpoint from $sam2_hiera_t_url"; exit 1; }
29
+
30
+ # echo "Downloading sam2_hiera_small.pt checkpoint..."
31
+ # $CMD $sam2_hiera_s_url || { echo "Failed to download checkpoint from $sam2_hiera_s_url"; exit 1; }
32
+
33
+ # echo "Downloading sam2_hiera_base_plus.pt checkpoint..."
34
+ # $CMD $sam2_hiera_b_plus_url || { echo "Failed to download checkpoint from $sam2_hiera_b_plus_url"; exit 1; }
35
+
36
+ # echo "Downloading sam2_hiera_large.pt checkpoint..."
37
+ # $CMD $sam2_hiera_l_url || { echo "Failed to download checkpoint from $sam2_hiera_l_url"; exit 1; }
38
+
39
+ # Define the URLs for SAM 2.1 checkpoints
40
+ #SAM2p1_BASE_URL="https://dl.fbaipublicfiles.com/segment_anything_2/092824"
41
+ #sam2p1_hiera_t_url="${SAM2p1_BASE_URL}/sam2.1_hiera_tiny.pt"
42
+ #sam2p1_hiera_s_url="${SAM2p1_BASE_URL}/sam2.1_hiera_small.pt"
43
+ #sam2p1_hiera_b_plus_url="${SAM2p1_BASE_URL}/sam2.1_hiera_base_plus.pt"
44
+ #sam2p1_hiera_l_url="${SAM2p1_BASE_URL}/sam2.1_hiera_large.pt"
45
+ # sam2p1_hq_hiera_l_url="https://huggingface.co/mqye/sam-hq2/resolve/main/sam2.1_hq_hiera_large.pt?download=true"
46
+ sam2p1_hq_hiera_l_url="https://huggingface.co/lkeab/hq-sam/resolve/main/sam2.1_hq_hiera_large.pt?download=true"
47
+ # SAM 2.1 checkpoints
48
+
49
+ echo "Downloading sam2.1_hq_hiera_l.pt checkpoint..."
50
+ $CMD $sam2p1_hq_hiera_l_url || { echo "Failed to download checkpoint from $sam2p1_hiera_t_url"; exit 1; }
51
+
52
+ mv sam2.1_hq_hiera_large.pt?download=true sam2.1_hq_hiera_large.pt
53
+
54
+ echo "HQ-SAM-2 checkpoints are downloaded successfully."
sam2.1HQ/sam-hq-main/sam-hq2/demo/demo_hqsam2.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import matplotlib.pyplot as plt
4
+ import cv2
5
+ from sam2.build_sam import build_sam2
6
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
7
+ import os
8
+
9
+ def show_mask(mask, ax, random_color=False):
10
+ if random_color:
11
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
12
+ else:
13
+ color = np.array([30/255, 144/255, 255/255, 0.6])
14
+ h, w = mask.shape[-2:]
15
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
16
+ ax.imshow(mask_image)
17
+
18
+ def show_points(coords, labels, ax, marker_size=375):
19
+ pos_points = coords[labels==1]
20
+ neg_points = coords[labels==0]
21
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
22
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
23
+
24
+ def show_box(box, ax):
25
+ x0, y0 = box[0], box[1]
26
+ w, h = box[2] - box[0], box[3] - box[1]
27
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
28
+
29
+
30
+ def show_res(masks, scores, input_point, input_label, input_box, filename, image):
31
+ for i, (mask, score) in enumerate(zip(masks, scores)):
32
+ plt.figure(figsize=(10,10))
33
+ plt.imshow(image)
34
+ show_mask(mask, plt.gca())
35
+ if input_box is not None:
36
+ box = input_box[i]
37
+ show_box(box, plt.gca())
38
+ if (input_point is not None) and (input_label is not None):
39
+ show_points(input_point, input_label, plt.gca())
40
+
41
+ print(f"Score: {score:.3f}")
42
+ plt.axis('off')
43
+ plt.savefig(filename+'_'+str(i)+'.png',bbox_inches='tight',pad_inches=-0.1)
44
+ plt.close()
45
+
46
+ def show_res_multi(masks, scores, input_point, input_label, input_box, filename, image):
47
+ plt.figure(figsize=(10, 10))
48
+ plt.imshow(image)
49
+ for mask in masks:
50
+ show_mask(mask, plt.gca(), random_color=True)
51
+ for box in input_box:
52
+ show_box(box, plt.gca())
53
+ for score in scores:
54
+ print(f"Score: {score:.3f}")
55
+ plt.axis('off')
56
+ plt.savefig(filename +'.png',bbox_inches='tight',pad_inches=-0.1)
57
+ plt.close()
58
+
59
+
60
+ if __name__ == "__main__":
61
+ checkpoint = "./checkpoints/sam2.1_hq_hiera_large.pt"
62
+ model_cfg = "configs/sam2.1/sam2.1_hq_hiera_l.yaml"
63
+ predictor = SAM2ImagePredictor(build_sam2(model_cfg, checkpoint))
64
+
65
+ for i in range(1,5):
66
+ print("image: ",i)
67
+ # hq_token_only: False means use hq output to correct SAM output.
68
+ # True means use hq output only.
69
+ # Default: False
70
+ hq_token_only = False
71
+ # To achieve best visualization effect, for images contain multiple objects (like typical coco images), we suggest to set hq_token_only=False
72
+ # For images contain single object, we suggest to set hq_token_only = True
73
+ # For quantiative evaluation on COCO/YTVOS/DAVIS/UVO/LVIS etc., we set hq_token_only = False
74
+
75
+ image = cv2.imread('./demo/input_images/example'+str(i)+'.png')
76
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
77
+ predictor.set_image(image)
78
+
79
+ if i==1:
80
+ input_box = np.array([[306, 132, 925, 893]])
81
+ input_point, input_label = None, None
82
+ elif i==2:
83
+ input_point = np.array([[495,518],[217,140]])
84
+ input_label = np.ones(input_point.shape[0])
85
+ input_box = None
86
+ elif i==3:
87
+ input_box = np.array([[64,76,940,919]])
88
+ input_point, input_label = None, None
89
+ elif i==4:
90
+ # multi box input
91
+ input_box = torch.tensor([[45,260,515,470], [310,228,424,296]],device=predictor.device)
92
+ # transformed_box = predictor.transform.apply_boxes_torch(input_box, image.shape[:2])
93
+ input_point, input_label = None, None
94
+
95
+ batch_box = False if input_box is None else len(input_box)>1
96
+ result_path = 'demo/hq_sam_result_vis/'
97
+ os.makedirs(result_path, exist_ok=True)
98
+
99
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
100
+ masks, scores, logits = predictor.predict(point_coords=input_point,
101
+ point_labels=input_label,
102
+ box=input_box,
103
+ multimask_output=False, hq_token_only=hq_token_only)
104
+
105
+ if not batch_box:
106
+ show_res(masks,scores,input_point, input_label, input_box, result_path + 'example'+str(i), image)
107
+ else:
108
+ masks = masks.squeeze(1)
109
+ scores = scores.squeeze(1)
110
+ input_box = input_box.cpu().numpy()
111
+ show_res_multi(masks, scores, input_point, input_label, input_box, result_path + 'example'+str(i), image)
112
+
113
+
114
+
115
+
116
+
117
+
118
+
sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example1.png ADDED

Git LFS Details

  • SHA256: 8533657226d481a90f648dda0a05c81c69d3135ce6ca4d74838167d9e61a8116
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example2.png ADDED

Git LFS Details

  • SHA256: d42a70173297297b654cd067e7ed3de717c3d2b37fd6d13b0396e5fc58449850
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
sam2.1HQ/sam-hq-main/sam-hq2/demo/input_images/example3.png ADDED

Git LFS Details

  • SHA256: 866820ace9a150b791c00f955c2b436fc72a2e6a43b36187aba975be196161c4
  • Pointer size: 132 Bytes
  • Size of remote file: 2.32 MB