ziyanlu commited on
Commit
9859ea2
·
verified ·
1 Parent(s): 0f7d023

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. .github/workflows/update_space.yml +28 -0
  3. .gitignore +168 -0
  4. .gradio/certificate.pem +31 -0
  5. LICENSE +201 -0
  6. app.py +399 -0
  7. assets/QRCode.jpg +3 -0
  8. assets/architecture.png +3 -0
  9. assets/comparison.png +3 -0
  10. assets/motivation.png +3 -0
  11. assets/vis_anat.png +3 -0
  12. assets/vis_modal.png +3 -0
  13. examples/1.3.6.1.4.1.9328.50.4.0327.nii.gz +3 -0
  14. examples/1.3.6.1.4.1.9328.50.4.0357.nii.gz +3 -0
  15. examples/1.3.6.1.4.1.9328.50.4.0477.nii.gz +3 -0
  16. examples/1.3.6.1.4.1.9328.50.4.0491.nii.gz +3 -0
  17. examples/1.3.6.1.4.1.9328.50.4.0708.nii.gz +3 -0
  18. examples/1.3.6.1.4.1.9328.50.4.0719.nii.gz +3 -0
  19. examples/labels/1.3.6.1.4.1.9328.50.4.0357.nii.gz +3 -0
  20. infer.sh +7 -0
  21. infer_sequence.py +647 -0
  22. infer_sequence.sh +5 -0
  23. inference.py +531 -0
  24. medim_infer.py +294 -0
  25. readme.md +286 -0
  26. requirements.txt +8 -0
  27. sample.py +93 -0
  28. scripts/val_default.sh +5 -0
  29. scripts/val_med2d.sh +5 -0
  30. scripts/val_sam.sh +5 -0
  31. segment_anything/__init__.py +11 -0
  32. segment_anything/automatic_mask_generator.py +372 -0
  33. segment_anything/build_sam.py +161 -0
  34. segment_anything/build_sam3D.py +161 -0
  35. segment_anything/modeling/__init__.py +10 -0
  36. segment_anything/modeling/common.py +45 -0
  37. segment_anything/modeling/image_encoder.py +401 -0
  38. segment_anything/modeling/image_encoder3D.py +442 -0
  39. segment_anything/modeling/mask_decoder.py +186 -0
  40. segment_anything/modeling/mask_decoder3D.py +458 -0
  41. segment_anything/modeling/prompt_encoder.py +227 -0
  42. segment_anything/modeling/prompt_encoder3D.py +230 -0
  43. segment_anything/modeling/sam.py +174 -0
  44. segment_anything/modeling/sam3D.py +176 -0
  45. segment_anything/modeling/sam_model.py +106 -0
  46. segment_anything/modeling/transformer.py +244 -0
  47. segment_anything/predictor.py +271 -0
  48. segment_anything/utils/__init__.py +1 -0
  49. segment_anything/utils/amg.py +346 -0
  50. segment_anything/utils/onnx.py +144 -0
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/architecture.png filter=lfs diff=lfs merge=lfs -text
37
+ assets/comparison.png filter=lfs diff=lfs merge=lfs -text
38
+ assets/motivation.png filter=lfs diff=lfs merge=lfs -text
39
+ assets/QRCode.jpg filter=lfs diff=lfs merge=lfs -text
40
+ assets/vis_anat.png filter=lfs diff=lfs merge=lfs -text
41
+ assets/vis_modal.png filter=lfs diff=lfs merge=lfs -text
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - y
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data/*
2
+ ckpt/*
3
+ results/*
4
+ work_dir/*
5
+ */__pycache__/*
6
+ __pycache__/*
7
+ *.pyc
8
+
9
+ # Byte-compiled / optimized / DLL files
10
+ __pycache__/
11
+ *.py[cod]
12
+ *$py.class
13
+
14
+ # C extensions
15
+ *.so
16
+
17
+ # Distribution / packaging
18
+ .Python
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ wheels/
31
+ share/python-wheels/
32
+ *.egg-info/
33
+ .installed.cfg
34
+ *.egg
35
+ MANIFEST
36
+
37
+ # PyInstaller
38
+ # Usually these files are written by a python script from a template
39
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
40
+ *.manifest
41
+ *.spec
42
+
43
+ # Installer logs
44
+ pip-log.txt
45
+ pip-delete-this-directory.txt
46
+
47
+ # Unit test / coverage reports
48
+ htmlcov/
49
+ .tox/
50
+ .nox/
51
+ .coverage
52
+ .coverage.*
53
+ .cache
54
+ nosetests.xml
55
+ coverage.xml
56
+ *.cover
57
+ *.py,cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+ cover/
61
+
62
+ # Translations
63
+ *.mo
64
+ *.pot
65
+
66
+ # Django stuff:
67
+ *.log
68
+ local_settings.py
69
+ db.sqlite3
70
+ db.sqlite3-journal
71
+
72
+ # Flask stuff:
73
+ instance/
74
+ .webassets-cache
75
+
76
+ # Scrapy stuff:
77
+ .scrapy
78
+
79
+ # Sphinx documentation
80
+ docs/_build/
81
+
82
+ # PyBuilder
83
+ .pybuilder/
84
+ target/
85
+
86
+ # Jupyter Notebook
87
+ .ipynb_checkpoints
88
+
89
+ # IPython
90
+ profile_default/
91
+ ipython_config.py
92
+
93
+ # pyenv
94
+ # For a library or package, you might want to ignore these files since the code is
95
+ # intended to run in multiple environments; otherwise, check them in:
96
+ # .python-version
97
+
98
+ # pipenv
99
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
100
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
101
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
102
+ # install all needed dependencies.
103
+ #Pipfile.lock
104
+
105
+ # poetry
106
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
107
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
108
+ # commonly ignored for libraries.
109
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
110
+ #poetry.lock
111
+
112
+ # pdm
113
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
114
+ #pdm.lock
115
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
116
+ # in version control.
117
+ # https://pdm.fming.dev/#use-with-ide
118
+ .pdm.toml
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # PyCharm
164
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
167
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168
+ #.idea/
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
app.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import SimpleITK as sitk
5
+ import torch.nn.functional as F
6
+ import cv2
7
+ from PIL import Image, ImageDraw, ImageOps
8
+ import tempfile
9
+ import gradio as gr
10
+ from segment_anything.build_sam3D import sam_model_registry3D
11
+ from utils.click_method import get_next_click3D_torch_ritm, get_next_click3D_torch_2
12
+
13
+
14
+ def build_model():
15
+ checkpoint_path = 'ckpt\\BoSAM.pth'
16
+
17
+ checkpoint = torch.load(checkpoint_path, map_location='cuda', weights_only=False)
18
+
19
+ state_dict = checkpoint['model_state_dict']
20
+
21
+ sam_model = sam_model_registry3D['vit_b_ori'](checkpoint=None).to('cuda')
22
+ sam_model.load_state_dict(state_dict)
23
+
24
+ return sam_model
25
+
26
+
27
+ def center_crop_or_pad(image_array, target_shape=(128, 128, 128)):
28
+ """中心裁剪或填充图像到目标尺寸"""
29
+ current_shape = image_array.shape
30
+
31
+ start = [(c - t) // 2 if c > t else 0 for c, t in zip(current_shape, target_shape)]
32
+ end = [s + t if c > t else c for s, t, c in zip(start, target_shape, current_shape)]
33
+
34
+ result = np.zeros(target_shape, dtype=image_array.dtype)
35
+
36
+ target_start = [0 if c > t else (t - c) // 2 for c, t in zip(current_shape, target_shape)]
37
+ target_end = [t if c > t else ts + c for ts, c, t in zip(target_start, current_shape, target_shape)]
38
+
39
+ if all(c >= t for c, t in zip(current_shape, target_shape)):
40
+ cropped = image_array[
41
+ start[0]:start[0]+target_shape[0],
42
+ start[1]:start[1]+target_shape[1],
43
+ start[2]:start[2]+target_shape[2]
44
+ ]
45
+ return cropped
46
+ else:
47
+ source_slices = tuple(slice(0 if c <= t else s, c if c <= t else e)
48
+ for s, e, c, t in zip(start, end, current_shape, target_shape))
49
+ target_slices = tuple(slice(ts, te)
50
+ for ts, te in zip(target_start, target_end))
51
+
52
+ result[target_slices] = image_array[source_slices]
53
+ return result
54
+
55
+
56
+ def preprocess_image(image_path):
57
+ """预处理图像为128x128x128"""
58
+ image = sitk.ReadImage(image_path)
59
+ image_array = sitk.GetArrayFromImage(image)
60
+
61
+ processed_array = center_crop_or_pad(image_array, (128, 128, 128))
62
+
63
+ image_tensor = torch.tensor(processed_array).float().unsqueeze(0).unsqueeze(0)
64
+
65
+ return image_tensor.to('cuda')
66
+
67
+
68
+ def load_gt3d(image3d_path):
69
+ """加载并预处理GT标签为128x128x128"""
70
+ gt3d_path = r'examples\labels\1.3.6.1.4.1.9328.50.4.0357.nii.gz' # 使用固定的GT
71
+ if not os.path.exists(gt3d_path):
72
+ raise FileNotFoundError(f"The file {gt3d_path} does not exist.")
73
+
74
+ image = sitk.ReadImage(gt3d_path)
75
+ image_array = sitk.GetArrayFromImage(image)
76
+
77
+ processed_array = center_crop_or_pad(image_array, (128, 128, 128))
78
+
79
+ gt_tensor = torch.tensor(processed_array).float().unsqueeze(0).unsqueeze(0)
80
+
81
+ return gt_tensor.to('cuda')
82
+
83
+
84
+ def overlay_mask_on_image(image_slice, mask_slice, alpha=0.6):
85
+ """在图像切片上叠加掩码,增强视觉效果"""
86
+ # 增强对比度
87
+ p2, p98 = np.percentile(image_slice, (2, 98))
88
+ image_contrast = np.clip((image_slice - p2) / (p98 - p2), 0, 1)
89
+ image_contrast = (image_contrast * 255).astype(np.uint8)
90
+
91
+ # 创建彩色图像
92
+ image_rgb = Image.fromarray(image_contrast).convert("RGB")
93
+
94
+ # 应用轻微的锐化和增强
95
+ enhancer = ImageOps.autocontrast(image_rgb, cutoff=0.5)
96
+ image_rgba = enhancer.convert("RGBA")
97
+
98
+ # 创建更鲜明的掩码颜色
99
+ mask_image = Image.new('RGBA', image_rgba.size, (0, 0, 0, 0))
100
+ mask_draw = ImageDraw.Draw(mask_image)
101
+
102
+ mask = (mask_slice > 0).astype(np.uint8) * 255
103
+ mask_pil = Image.fromarray(mask, mode='L')
104
+
105
+ # 使用高饱和度的蓝色
106
+ mask_draw.bitmap((0, 0), mask_pil, fill=(41, 128, 255, int(255 * alpha)))
107
+
108
+ # 叠加并添加轻微的发光效果
109
+ combined_image = Image.alpha_composite(image_rgba, mask_image)
110
+
111
+ return combined_image
112
+
113
+
114
+ def predict(image3D, sam_model, points=None, prev_masks=None, num_clicks=5):
115
+ """使用SAM模型预测掩码"""
116
+ sam_model.eval()
117
+
118
+ image3D = (image3D - image3D.mean()) / image3D.std()
119
+
120
+ gt3D = load_gt3d(None)
121
+
122
+ if prev_masks is None:
123
+ prev_masks = torch.zeros_like(image3D).to('cuda')
124
+
125
+ low_res_masks = F.interpolate(prev_masks.float(), size=(32, 32, 32))
126
+
127
+ with torch.no_grad():
128
+ image_embedding = sam_model.image_encoder(image3D)
129
+
130
+ for num_click in range(num_clicks):
131
+ with torch.no_grad():
132
+ batch_points, batch_labels = get_next_click3D_torch_2(prev_masks.to('cuda'), gt3D.to('cuda'))
133
+
134
+ points_co = torch.cat(batch_points, dim=0).to('cuda')
135
+ points_la = torch.cat(batch_labels, dim=0).to('cuda')
136
+
137
+ sparse_embeddings, dense_embeddings = sam_model.prompt_encoder(
138
+ points=[points_co, points_la],
139
+ boxes=None,
140
+ masks=low_res_masks.to('cuda'),
141
+ )
142
+
143
+ low_res_masks, iou_predictions = sam_model.mask_decoder(
144
+ image_embeddings=image_embedding.to('cuda'),
145
+ image_pe=sam_model.prompt_encoder.get_dense_pe(),
146
+ sparse_prompt_embeddings=sparse_embeddings,
147
+ dense_prompt_embeddings=dense_embeddings,
148
+ multimask_output=False,
149
+ )
150
+
151
+ prev_masks = F.interpolate(low_res_masks, size=[128, 128, 128], mode='trilinear', align_corners=False)
152
+
153
+ medsam_seg_prob = torch.sigmoid(prev_masks)
154
+ medsam_seg_prob = medsam_seg_prob.cpu().numpy().squeeze()
155
+ medsam_seg = (medsam_seg_prob > 0.5).astype(np.uint8)
156
+
157
+ return medsam_seg, medsam_seg_prob
158
+
159
+
160
+ def normalize_image(image):
161
+ """增强图像对比度"""
162
+ # 使用百分位数来增强对比度
163
+ p2, p98 = np.percentile(image, (2, 98))
164
+ if p98 - p2 != 0:
165
+ image = np.clip((image - p2) / (p98 - p2), 0, 1)
166
+ else:
167
+ image = np.zeros_like(image)
168
+ image = (image * 255).astype(np.uint8)
169
+ return image
170
+
171
+
172
+ def predicts(img_path, sam_model):
173
+ """预处理图像并预测"""
174
+ img = preprocess_image(img_path)
175
+ prediction, prediction_prob = predict(img, sam_model)
176
+ return prediction, prediction_prob
177
+
178
+
179
+ def save_nifti(prediction, original_image_path):
180
+ """保存预测结果为NIFTI文件"""
181
+ original_image = sitk.ReadImage(original_image_path)
182
+
183
+ output_image = sitk.GetImageFromArray(prediction.astype(np.uint8))
184
+
185
+ output_image.SetDirection(original_image.GetDirection())
186
+ output_image.SetOrigin(original_image.GetOrigin())
187
+
188
+ original_size = original_image.GetSize()
189
+ original_spacing = original_image.GetSpacing()
190
+
191
+ new_spacing = [
192
+ original_spacing[0] * (original_size[0] / 128),
193
+ original_spacing[1] * (original_size[1] / 128),
194
+ original_spacing[2] * (original_size[2] / 128)
195
+ ]
196
+ output_image.SetSpacing(new_spacing)
197
+
198
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".nii.gz")
199
+ temp_filename = temp_file.name
200
+
201
+ sitk.WriteImage(output_image, temp_filename)
202
+
203
+ return temp_filename
204
+
205
+
206
+ def gr_interface(img_path, sam_model=None):
207
+ """增强的Gradio界面函数"""
208
+ if sam_model is None:
209
+ sam_model = build_model()
210
+
211
+ # 显示进度信息
212
+ yield None, gr.update(value="正在加载数据..."), None, None, None
213
+
214
+ processed_img = preprocess_image(img_path)
215
+
216
+ yield None, gr.update(value="正在分割..."), None, None, None
217
+
218
+ prediction, prediction_prob = predicts(img_path, sam_model)
219
+
220
+ yield None, gr.update(value="正在生成可视化..."), None, None, None
221
+
222
+ processed_slices = []
223
+ combined_slices = []
224
+ predicted_slices = []
225
+
226
+ nifti_file_path = save_nifti(prediction, img_path)
227
+
228
+ # 计算中心32张切片的索引
229
+ start_idx = (128 - 32) // 2 # 48
230
+ end_idx = start_idx + 32 # 80
231
+
232
+ for i in range(start_idx, end_idx):
233
+ # 处理原始图像切片
234
+ processed_slice = processed_img[0, 0, i].cpu().numpy()
235
+ processed_slices.append(normalize_image(processed_slice))
236
+
237
+ # 处理预测掩码切片
238
+ mask_slice = prediction[i]
239
+ normalized_mask = normalize_image(mask_slice)
240
+
241
+ # 叠加掩码到图像上 - 使用更醒目的视觉效果
242
+ combined_image = overlay_mask_on_image(processed_slices[-1], mask_slice)
243
+ combined_slices.append(combined_image)
244
+
245
+ # 添加预测切片
246
+ predicted_slices.append(normalized_mask)
247
+
248
+ yield processed_slices, gr.update(value="分割完成!"), combined_slices, predicted_slices, nifti_file_path
249
+
250
+
251
+ # 使用示例文件路径作为常量
252
+ DEFAULT_EXAMPLE = "examples\\1.3.6.1.4.1.9328.50.4.0327.nii.gz"
253
+ EXAMPLES = [
254
+ ["examples\\1.3.6.1.4.1.9328.50.4.0327.nii.gz"],
255
+ ["examples\\1.3.6.1.4.1.9328.50.4.0357.nii.gz"],
256
+ ["examples\\1.3.6.1.4.1.9328.50.4.0477.nii.gz"],
257
+ ["examples\\1.3.6.1.4.1.9328.50.4.0491.nii.gz"],
258
+ ["examples\\1.3.6.1.4.1.9328.50.4.0708.nii.gz"],
259
+ ["examples\\1.3.6.1.4.1.9328.50.4.0719.nii.gz"]
260
+ ]
261
+
262
+ # 自定义CSS样式以美化界面
263
+ css = """
264
+ body {
265
+ background-color: #f8fafc;
266
+ }
267
+
268
+ .container {
269
+ max-width: 1200px;
270
+ margin: 0 auto;
271
+ }
272
+
273
+ .main-title {
274
+ text-align: center;
275
+ color: #2563eb;
276
+ font-size: 2.5rem;
277
+ margin-bottom: 1rem;
278
+ font-weight: bold;
279
+ animation: fadeIn 1.5s ease-in-out;
280
+ }
281
+
282
+ .sub-title {
283
+ text-align: center;
284
+ color: #1e293b;
285
+ margin-bottom: 2rem;
286
+ animation: fadeIn 2s ease-in-out;
287
+ }
288
+
289
+ .custom-button {
290
+ background-color: #2563eb !important;
291
+ color: white !important;
292
+ transition: transform 0.3s, box-shadow 0.3s;
293
+ }
294
+
295
+ .custom-button:hover {
296
+ transform: translateY(-2px);
297
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
298
+ }
299
+
300
+ .gallery-item {
301
+ border-radius: 8px;
302
+ overflow: hidden;
303
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
304
+ transition: transform 0.3s;
305
+ }
306
+
307
+ .gallery-item:hover {
308
+ transform: scale(1.02);
309
+ box-shadow: 0 6px 12px rgba(0, 0, 0, 0.15);
310
+ }
311
+
312
+ @keyframes fadeIn {
313
+ from { opacity: 0; transform: translateY(20px); }
314
+ to { opacity: 1; transform: translateY(0); }
315
+ }
316
+ """
317
+
318
+ # 加载模型(全局变量)
319
+ sam_model = build_model()
320
+
321
+ # 创建更美观的Gradio界面,使用兼容的组件
322
+ with gr.Blocks(title="3D医学影像智能分割系统", css=css) as demo:
323
+ gr.HTML("<h1 class='main-title'>3D医学影像智能分割系统</h1>")
324
+ gr.HTML("<p class='sub-title'>基于BoSAM的前沿人工智能自动分割技术,为医学影像分析提供高精度解决方案</p>")
325
+
326
+ with gr.Row():
327
+ with gr.Column(scale=1):
328
+ # 输入区域
329
+ gr.Markdown("### 上传/选择影像")
330
+ input_file = gr.File(label="上传NIfTI文件", value=DEFAULT_EXAMPLE)
331
+
332
+ status = gr.Textbox(label="处理状态", value="准备就绪")
333
+ process_btn = gr.Button("开始智能分割", elem_classes=["custom-button"])
334
+
335
+ # 示例区域
336
+ gr.Markdown("### 示例数据")
337
+ examples = gr.Examples(
338
+ examples=EXAMPLES,
339
+ inputs=[input_file]
340
+ )
341
+
342
+ gr.HTML("""
343
+ <div style="margin-top: 2rem; padding: 1rem; background-color: rgba(16, 185, 129, 0.1); border-radius: 8px;">
344
+ <h3 style="color: #10b981; margin-bottom: 0.5rem;">技术亮点</h3>
345
+ <ul style="margin-left: 1.5rem;">
346
+ <li>基于最新的Segment Anything Model (SAM) 技术</li>
347
+ <li>专为3D医学影像优化的深度学习模型</li>
348
+ <li>智能识别解剖结构,无需手动绘制边界</li>
349
+ <li>高精度分割结果,提升诊断效率</li>
350
+ </ul>
351
+ </div>
352
+ """)
353
+
354
+ with gr.Column(scale=2):
355
+ # 输出区域
356
+ with gr.Row():
357
+ gr.Markdown("## 原始医学影像")
358
+ output_original = gr.Gallery(label="", show_label=False, columns=4, rows=8, height="600px", elem_classes=["gallery-item"])
359
+
360
+ with gr.Row():
361
+ with gr.Column():
362
+ gr.Markdown("## 分割叠加结果")
363
+ output_combined = gr.Gallery(label="", show_label=False, columns=4, rows=4, height="400px", elem_classes=["gallery-item"])
364
+
365
+ with gr.Column():
366
+ gr.Markdown("## 分割掩码")
367
+ output_mask = gr.Gallery(label="", show_label=False, columns=4, rows=4, height="400px", elem_classes=["gallery-item"])
368
+
369
+ gr.Markdown("## 分割结果下载")
370
+ output_file = gr.File(label="下载完整3D分割结果 (NIFTI格式)")
371
+
372
+ gr.HTML("""
373
+ <div style="text-align: center; margin-top: 2rem; padding: 1rem; border-top: 1px solid rgba(0, 0, 0, 0.1);">
374
+ <p>© 2025 3D医学影像智能分割系统 | 人工智能辅助医学影像分析平台</p>
375
+ <p>基于最新的BoaSAM模型,为医疗影像分析提供高精度自动分割解决方案</p>
376
+ </div>
377
+ """)
378
+
379
+ # 处理事件
380
+ process_btn.click(
381
+ fn=gr_interface,
382
+ inputs=[input_file],
383
+ outputs=[output_original, status, output_combined, output_mask, output_file]
384
+ )
385
+
386
+ examples.dataset.click(
387
+ fn=gr_interface,
388
+ inputs=[input_file],
389
+ outputs=[output_original, status, output_combined, output_mask, output_file]
390
+ )
391
+
392
+ demo.load(
393
+ fn=gr_interface,
394
+ inputs=[input_file],
395
+ outputs=[output_original, status, output_combined, output_mask, output_file]
396
+ )
397
+
398
+ if __name__ == "__main__":
399
+ demo.launch(debug=True, share = True)
assets/QRCode.jpg ADDED

Git LFS Details

  • SHA256: 8ee7ef025b7b28b31ec78080e7e67ad4c6261eb7668d7f430d01d1f36036be31
  • Pointer size: 131 Bytes
  • Size of remote file: 115 kB
assets/architecture.png ADDED

Git LFS Details

  • SHA256: be0c9df6334dcc063466e63d531defc7dfc94671df71b4a849ee52c720902c1a
  • Pointer size: 131 Bytes
  • Size of remote file: 409 kB
assets/comparison.png ADDED

Git LFS Details

  • SHA256: af4b45ba8259a1512ecb63e5a9603f71540a48d002d21a5b0579441d298d74ce
  • Pointer size: 131 Bytes
  • Size of remote file: 148 kB
assets/motivation.png ADDED

Git LFS Details

  • SHA256: dfe06a339ad62a740bd8c8dd9725f8f54558ebf89f6c6249a942d12263cb487e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.08 MB
assets/vis_anat.png ADDED

Git LFS Details

  • SHA256: 503b78160431540e88f04fdfa6419c168a54856fff6e4732c88a8f3baeaefc40
  • Pointer size: 132 Bytes
  • Size of remote file: 1.92 MB
assets/vis_modal.png ADDED

Git LFS Details

  • SHA256: deb77140297ae5cbe25cfd99dcb86d18e9379b26d3c02213d71727c85809dabf
  • Pointer size: 132 Bytes
  • Size of remote file: 2.01 MB
examples/1.3.6.1.4.1.9328.50.4.0327.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17261b6a18c222ef7e7796f4bfcbef6f4c440f9135a101435e8159e0e52231a5
3
+ size 103484205
examples/1.3.6.1.4.1.9328.50.4.0357.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:473d38a0fbec79d86882575808bdca81835898a27ce29be35e9d860c15abbaae
3
+ size 112380358
examples/1.3.6.1.4.1.9328.50.4.0477.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bba2263ff3fa175b88033d9d4186c18802770114c9c1b01c90139daf6ef738a6
3
+ size 26655732
examples/1.3.6.1.4.1.9328.50.4.0491.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f349f80f354c5a3a6db24d022df4ba87e431a12e080a133347f4b9278fa0d418
3
+ size 117995983
examples/1.3.6.1.4.1.9328.50.4.0708.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f7ba2b41b1eaaff732c6a3668b1b59b77a9cdd28cf5d2156e6370fd083da26b
3
+ size 118607138
examples/1.3.6.1.4.1.9328.50.4.0719.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b65efaa595473c9804745be94b033e06934e26b333d855c2d85d73667f6e5aa
3
+ size 111900628
examples/labels/1.3.6.1.4.1.9328.50.4.0357.nii.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cfdee17b5ada204b3340aa4079c5883f9bb4baa723400e0e5c66945d2a5bece
3
+ size 189731
infer.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ python inference.py --seed 2024\
2
+ -cp ./ckpt/sam_med3d_turbo.pth \
3
+ -tdp ./data/medical_preprocessed -nc 1 \
4
+ --output_dir ./results \
5
+ --task_name infer_turbo
6
+ #--sliding_window
7
+ #--save_image_and_gt
infer_sequence.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Run inference without label masks. Based on inference.py, and requires new click methods
3
+ from updated utils/click_method.py. Check the new click method details for more information.
4
+
5
+ Author: Karson Chrispens
6
+ Date: 5/15/2024
7
+ """
8
+
9
+ import os
10
+ import os.path as osp
11
+
12
+ join = osp.join
13
+ import argparse
14
+ import json
15
+ import pickle
16
+ from collections import OrderedDict, defaultdict
17
+ from glob import glob
18
+ from itertools import product
19
+
20
+ import numpy as np
21
+ import SimpleITK as sitk
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torchio as tio
25
+ from torch.utils.data import DataLoader
26
+ from tqdm import tqdm
27
+
28
+ from segment_anything import sam_model_registry
29
+ from segment_anything.build_sam3D import sam_model_registry3D
30
+ from segment_anything.utils.transforms3D import ResizeLongestSide3D
31
+ from utils.click_method import (
32
+ get_next_click3D_torch_no_gt_naive,
33
+ get_next_click3D_torch_no_gt,
34
+ )
35
+ from utils.data_loader import Dataset_Union_ALL_Infer
36
+
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument("-tdp", "--test_data_path", type=str, default="./data/validation")
39
+ parser.add_argument(
40
+ "-cp", "--checkpoint_path", type=str, default="./ckpt/sam_med3d.pth"
41
+ )
42
+ parser.add_argument("--output_dir", type=str, default="./visualization")
43
+ parser.add_argument("--task_name", type=str, default="test_amos")
44
+ parser.add_argument("--skip_existing_pred", action="store_true", default=False)
45
+ parser.add_argument("--save_image", action="store_true", default=True)
46
+ parser.add_argument("--sliding_window", action="store_true", default=False)
47
+
48
+ parser.add_argument("--image_size", type=int, default=256)
49
+ parser.add_argument("--crop_size", type=int, default=128)
50
+ parser.add_argument("--device", type=str, default="cuda")
51
+ parser.add_argument("-mt", "--model_type", type=str, default="vit_b_ori")
52
+ parser.add_argument("-nc", "--num_clicks", type=int, default=5)
53
+ parser.add_argument("-pm", "--point_method", type=str, default="no_gt")
54
+ parser.add_argument("-dt", "--data_type", type=str, default="infer")
55
+
56
+ parser.add_argument("--threshold", type=int, default=0)
57
+ parser.add_argument("--dim", type=int, default=3)
58
+ parser.add_argument("--split_idx", type=int, default=0)
59
+ parser.add_argument("--split_num", type=int, default=1)
60
+ parser.add_argument("--ft2d", action="store_true", default=False)
61
+ parser.add_argument("--seed", type=int, default=2023)
62
+
63
+ args = parser.parse_args()
64
+
65
+ """ parse and output_dir and task_name """
66
+ args.output_dir = join(args.output_dir, args.task_name)
67
+ args.pred_output_dir = join(args.output_dir, "pred")
68
+ os.makedirs(args.output_dir, exist_ok=True)
69
+ os.makedirs(args.pred_output_dir, exist_ok=True)
70
+ args.save_name = join(args.output_dir, "dice.py")
71
+ print("output_dir set to", args.output_dir)
72
+
73
+ SEED = args.seed
74
+ print("set seed as", SEED)
75
+ torch.manual_seed(SEED)
76
+ np.random.seed(SEED)
77
+
78
+ if torch.cuda.is_available():
79
+ torch.cuda.init()
80
+
81
+ click_methods = {
82
+ "no_gt": get_next_click3D_torch_no_gt,
83
+ "no_gt_naive": get_next_click3D_torch_no_gt_naive,
84
+ }
85
+
86
+
87
+ def postprocess_masks(low_res_masks, image_size, original_size):
88
+ ori_h, ori_w = original_size
89
+ masks = F.interpolate(
90
+ low_res_masks,
91
+ (image_size, image_size),
92
+ mode="bilinear",
93
+ align_corners=False,
94
+ )
95
+ if args.ft2d and ori_h < image_size and ori_w < image_size:
96
+ top = (image_size - ori_h) // 2
97
+ left = (image_size - ori_w) // 2
98
+ masks = masks[..., top : ori_h + top, left : ori_w + left]
99
+ pad = (top, left)
100
+ else:
101
+ masks = F.interpolate(
102
+ masks, original_size, mode="bilinear", align_corners=False
103
+ )
104
+ pad = None
105
+ return masks, pad
106
+
107
+
108
+ def sam_decoder_inference(
109
+ target_size,
110
+ points_coords,
111
+ points_labels,
112
+ model,
113
+ image_embeddings,
114
+ mask_inputs=None,
115
+ multimask=False,
116
+ ):
117
+ with torch.no_grad():
118
+ sparse_embeddings, dense_embeddings = model.prompt_encoder(
119
+ points=(points_coords.to(model.device), points_labels.to(model.device)),
120
+ boxes=None,
121
+ masks=mask_inputs,
122
+ )
123
+
124
+ low_res_masks, iou_predictions = model.mask_decoder(
125
+ image_embeddings=image_embeddings,
126
+ image_pe=model.prompt_encoder.get_dense_pe(),
127
+ sparse_prompt_embeddings=sparse_embeddings,
128
+ dense_prompt_embeddings=dense_embeddings,
129
+ multimask_output=multimask,
130
+ )
131
+
132
+ if multimask:
133
+ max_values, max_indexs = torch.max(iou_predictions, dim=1)
134
+ max_values = max_values.unsqueeze(1)
135
+ iou_predictions = max_values
136
+ low_res = []
137
+ for i, idx in enumerate(max_indexs):
138
+ low_res.append(low_res_masks[i : i + 1, idx])
139
+ low_res_masks = torch.stack(low_res, 0)
140
+ masks = F.interpolate(
141
+ low_res_masks,
142
+ (target_size, target_size),
143
+ mode="bilinear",
144
+ align_corners=False,
145
+ )
146
+ return masks, low_res_masks, iou_predictions
147
+
148
+
149
+ def repixel_value(arr, is_seg=False):
150
+ if not is_seg:
151
+ min_val = arr.min()
152
+ max_val = arr.max()
153
+ new_arr = (arr - min_val) / (max_val - min_val + 1e-10) * 255.0
154
+ return new_arr
155
+
156
+
157
+ def random_point_sampling(mask, get_point=1):
158
+ if isinstance(mask, torch.Tensor):
159
+ mask = mask.numpy()
160
+ fg_coords = np.argwhere(mask == 1)[:, ::-1]
161
+ bg_coords = np.argwhere(mask == 0)[:, ::-1]
162
+
163
+ fg_size = len(fg_coords)
164
+ bg_size = len(bg_coords)
165
+
166
+ if get_point == 1:
167
+ if fg_size > 0:
168
+ index = np.random.randint(fg_size)
169
+ fg_coord = fg_coords[index]
170
+ label = 1
171
+ else:
172
+ index = np.random.randint(bg_size)
173
+ fg_coord = bg_coords[index]
174
+ label = 0
175
+ return torch.as_tensor([fg_coord.tolist()], dtype=torch.float), torch.as_tensor(
176
+ [label], dtype=torch.int
177
+ )
178
+ else:
179
+ num_fg = get_point // 2
180
+ num_bg = get_point - num_fg
181
+ fg_indices = np.random.choice(fg_size, size=num_fg, replace=True)
182
+ bg_indices = np.random.choice(bg_size, size=num_bg, replace=True)
183
+ fg_coords = fg_coords[fg_indices]
184
+ bg_coords = bg_coords[bg_indices]
185
+ coords = np.concatenate([fg_coords, bg_coords], axis=0)
186
+ labels = np.concatenate([np.ones(num_fg), np.zeros(num_bg)]).astype(int)
187
+ indices = np.random.permutation(get_point)
188
+ coords, labels = torch.as_tensor(
189
+ coords[indices], dtype=torch.float
190
+ ), torch.as_tensor(labels[indices], dtype=torch.int)
191
+ return coords, labels
192
+
193
+
194
+ def finetune_model_predict2D(
195
+ img3D,
196
+ gt3D,
197
+ sam_model_tune,
198
+ target_size=256,
199
+ click_method="no_gt",
200
+ device="cuda",
201
+ num_clicks=1,
202
+ prev_masks=None,
203
+ ):
204
+ pred_list = []
205
+
206
+ slice_mask_list = defaultdict(list)
207
+
208
+ img3D = torch.repeat_interleave(
209
+ img3D, repeats=3, dim=1
210
+ ) # 1 channel -> 3 channel (align to RGB)
211
+
212
+ click_points = []
213
+ click_labels = []
214
+ for slice_idx in tqdm(range(img3D.size(-1)), desc="transverse slices", leave=False):
215
+ img2D, gt2D = repixel_value(img3D[..., slice_idx]), gt3D[..., slice_idx]
216
+
217
+ if (gt2D == 0).all():
218
+ empty_result = torch.zeros(list(gt3D.size()[:-1]) + [1]).to(device)
219
+ for iter in range(num_clicks):
220
+ slice_mask_list[iter].append(empty_result)
221
+ continue
222
+
223
+ img2D = F.interpolate(
224
+ img2D, (target_size, target_size), mode="bilinear", align_corners=False
225
+ )
226
+ gt2D = F.interpolate(
227
+ gt2D.float(), (target_size, target_size), mode="nearest"
228
+ ).int()
229
+
230
+ img2D, gt2D = img2D.to(device), gt2D.to(device)
231
+ img2D = (img2D - img2D.mean()) / img2D.std()
232
+
233
+ with torch.no_grad():
234
+ image_embeddings = sam_model_tune.image_encoder(img2D.float())
235
+
236
+ points_co, points_la = torch.zeros(1, 0, 2).to(device), torch.zeros(1, 0).to(
237
+ device
238
+ )
239
+ low_res_masks = None
240
+ gt_semantic_seg = gt2D[0, 0].to(device)
241
+ true_masks = gt_semantic_seg > 0
242
+ for iter in range(num_clicks):
243
+ if low_res_masks == None:
244
+ pred_masks = torch.zeros_like(true_masks).to(device)
245
+ else:
246
+ pred_masks = (prev_masks[0, 0] > 0.0).to(device)
247
+ fn_masks = torch.logical_and(true_masks, torch.logical_not(pred_masks))
248
+ fp_masks = torch.logical_and(torch.logical_not(true_masks), pred_masks)
249
+ mask_to_sample = torch.logical_or(fn_masks, fp_masks)
250
+ new_points_co, _ = random_point_sampling(mask_to_sample.cpu(), get_point=1)
251
+ new_points_la = (
252
+ torch.Tensor([1]).to(torch.int64)
253
+ if (true_masks[new_points_co[0, 1].int(), new_points_co[0, 0].int()])
254
+ else torch.Tensor([0]).to(torch.int64)
255
+ )
256
+ new_points_co, new_points_la = new_points_co[None].to(
257
+ device
258
+ ), new_points_la[None].to(device)
259
+ points_co = torch.cat([points_co, new_points_co], dim=1)
260
+ points_la = torch.cat([points_la, new_points_la], dim=1)
261
+ prev_masks, low_res_masks, iou_predictions = sam_decoder_inference(
262
+ target_size,
263
+ points_co,
264
+ points_la,
265
+ sam_model_tune,
266
+ image_embeddings,
267
+ mask_inputs=low_res_masks,
268
+ multimask=True,
269
+ )
270
+ click_points.append(new_points_co)
271
+ click_labels.append(new_points_la)
272
+
273
+ slice_mask, _ = postprocess_masks(
274
+ low_res_masks, target_size, (gt3D.size(2), gt3D.size(3))
275
+ )
276
+ slice_mask_list[iter].append(
277
+ slice_mask[..., None]
278
+ ) # append (B, C, H, W, 1)
279
+
280
+ for iter in range(num_clicks):
281
+ medsam_seg = torch.cat(slice_mask_list[iter], dim=-1).cpu().numpy().squeeze()
282
+ medsam_seg = medsam_seg > sam_model_tune.mask_threshold
283
+ medsam_seg = medsam_seg.astype(np.uint8)
284
+
285
+ pred_list.append(medsam_seg)
286
+
287
+ return pred_list, click_points, click_labels
288
+
289
+
290
+ def finetune_model_predict3D(
291
+ img3D,
292
+ sam_model_tune,
293
+ device="cuda",
294
+ click_method="no_gt",
295
+ num_clicks=10,
296
+ prev_masks=None,
297
+ ):
298
+ img3D = norm_transform(img3D.squeeze(dim=1)) # (N, C, W, H, D)
299
+ img3D = img3D.unsqueeze(dim=1)
300
+
301
+ click_points = []
302
+ click_labels = []
303
+
304
+ pred_list = []
305
+
306
+ if prev_masks is None:
307
+ prev_masks = torch.zeros_like(img3D).to(device)
308
+ low_res_masks = F.interpolate(
309
+ prev_masks.float(),
310
+ size=(args.crop_size // 4, args.crop_size // 4, args.crop_size // 4),
311
+ )
312
+
313
+ with torch.no_grad():
314
+ image_embedding = sam_model_tune.image_encoder(
315
+ img3D.to(device)
316
+ ) # (1, 384, 16, 16, 16)
317
+
318
+ for click_idx in range(num_clicks):
319
+ with torch.no_grad():
320
+
321
+ batch_points, batch_labels = click_methods[click_method](
322
+ prev_masks.to(device), img3D.to(device), 170
323
+ ) # default threshold is 170, showing that here
324
+
325
+ points_co = torch.cat(batch_points, dim=0).to(device)
326
+ points_la = torch.cat(batch_labels, dim=0).to(device)
327
+
328
+ click_points.append(points_co)
329
+ click_labels.append(points_la)
330
+
331
+ points_input = points_co
332
+ labels_input = points_la
333
+
334
+ sparse_embeddings, dense_embeddings = sam_model_tune.prompt_encoder(
335
+ points=[points_input, labels_input],
336
+ boxes=None,
337
+ masks=low_res_masks.to(device),
338
+ )
339
+ low_res_masks, _ = sam_model_tune.mask_decoder(
340
+ image_embeddings=image_embedding.to(device), # (B, 384, 64, 64, 64)
341
+ image_pe=sam_model_tune.prompt_encoder.get_dense_pe(), # (1, 384, 64, 64, 64)
342
+ sparse_prompt_embeddings=sparse_embeddings, # (B, 2, 384)
343
+ dense_prompt_embeddings=dense_embeddings, # (B, 384, 64, 64, 64)
344
+ multimask_output=False,
345
+ )
346
+ prev_masks = F.interpolate(
347
+ low_res_masks,
348
+ size=img3D.shape[-3:],
349
+ mode="trilinear",
350
+ align_corners=False,
351
+ )
352
+
353
+ medsam_seg_prob = torch.sigmoid(prev_masks) # (B, 1, 64, 64, 64)
354
+ # convert prob to mask
355
+ medsam_seg_prob = medsam_seg_prob.cpu().numpy().squeeze()
356
+ medsam_seg = (medsam_seg_prob > 0.5).astype(np.uint8)
357
+ pred_list.append(medsam_seg)
358
+
359
+ return pred_list, click_points, click_labels
360
+
361
+
362
+ # TODO: check if this works?
363
+ def pad_and_crop_with_sliding_window(img3D, crop_transform, offset_mode="center"):
364
+ subject = tio.Subject(
365
+ image=tio.ScalarImage(tensor=img3D.squeeze(0)),
366
+ )
367
+ padding_params, cropping_params = crop_transform.compute_crop_or_pad(subject)
368
+ # cropping_params: (x_start, x_max-(x_start+roi_size), y_start, ...)
369
+ # padding_params: (x_left_pad, x_right_pad, y_left_pad, ...)
370
+ if cropping_params is None:
371
+ cropping_params = (0, 0, 0, 0, 0, 0)
372
+ if padding_params is None:
373
+ padding_params = (0, 0, 0, 0, 0, 0)
374
+ roi_shape = crop_transform.target_shape
375
+ vol_bound = (0, img3D.shape[2], 0, img3D.shape[3], 0, img3D.shape[4])
376
+ center_oob_ori_roi = (
377
+ cropping_params[0] - padding_params[0],
378
+ cropping_params[0] + roi_shape[0] - padding_params[0],
379
+ cropping_params[2] - padding_params[2],
380
+ cropping_params[2] + roi_shape[1] - padding_params[2],
381
+ cropping_params[4] - padding_params[4],
382
+ cropping_params[4] + roi_shape[2] - padding_params[4],
383
+ )
384
+ window_list = []
385
+ offset_dict = {
386
+ "rounded": list(product((-32, +32, 0), repeat=3)),
387
+ "center": [(0, 0, 0)],
388
+ }
389
+ for offset in offset_dict[offset_mode]:
390
+ # get the position in original volume~(allow out-of-bound) for current offset
391
+ oob_ori_roi = (
392
+ center_oob_ori_roi[0] + offset[0],
393
+ center_oob_ori_roi[1] + offset[0],
394
+ center_oob_ori_roi[2] + offset[1],
395
+ center_oob_ori_roi[3] + offset[1],
396
+ center_oob_ori_roi[4] + offset[2],
397
+ center_oob_ori_roi[5] + offset[2],
398
+ )
399
+ # get corresponing padding params based on `vol_bound`
400
+ padding_params = [0 for i in range(6)]
401
+ for idx, (ori_pos, bound) in enumerate(zip(oob_ori_roi, vol_bound)):
402
+ pad_val = 0
403
+ if idx % 2 == 0 and ori_pos < bound: # left bound
404
+ pad_val = bound - ori_pos
405
+ if idx % 2 == 1 and ori_pos > bound:
406
+ pad_val = ori_pos - bound
407
+ padding_params[idx] = pad_val
408
+ # get corresponding crop params after padding
409
+ cropping_params = (
410
+ oob_ori_roi[0] + padding_params[0],
411
+ vol_bound[1] - oob_ori_roi[1] + padding_params[1],
412
+ oob_ori_roi[2] + padding_params[2],
413
+ vol_bound[3] - oob_ori_roi[3] + padding_params[3],
414
+ oob_ori_roi[4] + padding_params[4],
415
+ vol_bound[5] - oob_ori_roi[5] + padding_params[5],
416
+ )
417
+ # pad and crop for the original subject
418
+ pad_and_crop = tio.Compose(
419
+ [
420
+ tio.Pad(padding_params, padding_mode=crop_transform.padding_mode),
421
+ tio.Crop(cropping_params),
422
+ ]
423
+ )
424
+ subject_roi = pad_and_crop(subject)
425
+ img3D_roi = subject_roi.image.data.clone().detach().unsqueeze(1)
426
+
427
+ # collect all position information, and set correct roi for sliding-windows in
428
+ # todo: get correct roi window of half because of the sliding
429
+ windows_clip = [0 for i in range(6)]
430
+ for i in range(3):
431
+ if offset[i] < 0:
432
+ windows_clip[2 * i] = 0
433
+ windows_clip[2 * i + 1] = -(roi_shape[i] + offset[i])
434
+ elif offset[i] > 0:
435
+ windows_clip[2 * i] = roi_shape[i] - offset[i]
436
+ windows_clip[2 * i + 1] = 0
437
+ pos3D_roi = dict(
438
+ padding_params=padding_params,
439
+ cropping_params=cropping_params,
440
+ ori_roi=(
441
+ cropping_params[0] + windows_clip[0],
442
+ cropping_params[0]
443
+ + roi_shape[0]
444
+ - padding_params[0]
445
+ - padding_params[1]
446
+ + windows_clip[1],
447
+ cropping_params[2] + windows_clip[2],
448
+ cropping_params[2]
449
+ + roi_shape[1]
450
+ - padding_params[2]
451
+ - padding_params[3]
452
+ + windows_clip[3],
453
+ cropping_params[4] + windows_clip[4],
454
+ cropping_params[4]
455
+ + roi_shape[2]
456
+ - padding_params[4]
457
+ - padding_params[5]
458
+ + windows_clip[5],
459
+ ),
460
+ pred_roi=(
461
+ padding_params[0] + windows_clip[0],
462
+ roi_shape[0] - padding_params[1] + windows_clip[1],
463
+ padding_params[2] + windows_clip[2],
464
+ roi_shape[1] - padding_params[3] + windows_clip[3],
465
+ padding_params[4] + windows_clip[4],
466
+ roi_shape[2] - padding_params[5] + windows_clip[5],
467
+ ),
468
+ )
469
+ pred_roi = pos3D_roi["pred_roi"]
470
+
471
+ # if((gt3D_roi[pred_roi[0]:pred_roi[1],pred_roi[2]:pred_roi[3],pred_roi[4]:pred_roi[5]]==0).all()):
472
+ # print("skip empty window with offset", offset)
473
+ # continue
474
+
475
+ window_list.append((img3D_roi, pos3D_roi))
476
+ return window_list
477
+
478
+
479
+ def save_numpy_to_nifti(in_arr: np.array, out_path, meta_info):
480
+ # torchio turn 1xHxWxD -> DxWxH
481
+ # so we need to squeeze and transpose back to HxWxD
482
+ ori_arr = np.transpose(in_arr.squeeze(), (2, 1, 0))
483
+ out = sitk.GetImageFromArray(ori_arr)
484
+ sitk_meta_translator = lambda x: [float(i) for i in x]
485
+ out.SetOrigin(sitk_meta_translator(meta_info["origin"]))
486
+ out.SetDirection(sitk_meta_translator(meta_info["direction"]))
487
+ out.SetSpacing(sitk_meta_translator(meta_info["spacing"]))
488
+ sitk.WriteImage(out, out_path)
489
+
490
+
491
+ if __name__ == "__main__":
492
+ all_dataset_paths = glob(join(args.test_data_path, "*", "*"))
493
+ all_dataset_paths = list(filter(osp.isdir, all_dataset_paths))
494
+ print("get", len(all_dataset_paths), "datasets")
495
+
496
+ crop_transform = tio.CropOrPad(
497
+ target_shape=(args.crop_size, args.crop_size, args.crop_size)
498
+ )
499
+
500
+ infer_transform = [
501
+ tio.ToCanonical(),
502
+ ]
503
+
504
+ test_dataset = Dataset_Union_ALL_Infer(
505
+ paths=all_dataset_paths,
506
+ data_type=args.data_type,
507
+ transform=tio.Compose(infer_transform),
508
+ split_num=args.split_num,
509
+ split_idx=args.split_idx,
510
+ pcc=False,
511
+ get_all_meta_info=True,
512
+ )
513
+
514
+ test_dataloader = DataLoader(
515
+ dataset=test_dataset, sampler=None, batch_size=1, shuffle=True
516
+ )
517
+
518
+ checkpoint_path = args.checkpoint_path
519
+
520
+ device = args.device
521
+ print("device:", device)
522
+
523
+ if args.dim == 3:
524
+ sam_model_tune = sam_model_registry3D[args.model_type](checkpoint=None).to(
525
+ device
526
+ )
527
+ if checkpoint_path is not None:
528
+ model_dict = torch.load(checkpoint_path, map_location=device)
529
+ state_dict = model_dict["model_state_dict"]
530
+ sam_model_tune.load_state_dict(state_dict)
531
+ else:
532
+ raise NotImplementedError(
533
+ "this scipts is designed for 3D sliding-window inference, not support other dims"
534
+ )
535
+
536
+ sam_trans = ResizeLongestSide3D(sam_model_tune.image_encoder.img_size)
537
+ norm_transform = tio.ZNormalization(masking_method=lambda x: x > 0)
538
+
539
+ for batch_data in tqdm(test_dataloader):
540
+ image3D, meta_info = batch_data
541
+ img_name = meta_info["image_path"][0]
542
+
543
+ modality = osp.basename(osp.dirname(osp.dirname(osp.dirname(img_name))))
544
+ dataset = osp.basename(osp.dirname(osp.dirname(img_name)))
545
+ vis_root = osp.join(args.pred_output_dir, modality, dataset)
546
+ pred_path = osp.join(
547
+ vis_root,
548
+ osp.basename(img_name).replace(
549
+ ".nii.gz", f"_pred{args.num_clicks-1}.nii.gz"
550
+ ),
551
+ )
552
+
553
+ """ inference """
554
+ if args.skip_existing_pred and osp.exists(pred_path):
555
+ pass # if the pred existed, skip the inference
556
+ else:
557
+ image3D_full = image3D
558
+ pred3D_full_dict = {
559
+ click_idx: torch.zeros_like(image3D_full).numpy()
560
+ for click_idx in range(args.num_clicks)
561
+ }
562
+ offset_mode = "center" if (not args.sliding_window) else "rounded"
563
+ sliding_window_list = pad_and_crop_with_sliding_window(
564
+ image3D_full, crop_transform, offset_mode=offset_mode
565
+ )
566
+ for image3D, pos3D in sliding_window_list:
567
+ seg_mask_list, points, labels = finetune_model_predict3D(
568
+ image3D,
569
+ sam_model_tune,
570
+ device=device,
571
+ click_method=args.point_method,
572
+ num_clicks=args.num_clicks,
573
+ prev_masks=None,
574
+ )
575
+ ori_roi, pred_roi = pos3D["ori_roi"], pos3D["pred_roi"]
576
+ for idx, seg_mask in enumerate(seg_mask_list):
577
+ seg_mask_roi = seg_mask[
578
+ ...,
579
+ pred_roi[0] : pred_roi[1],
580
+ pred_roi[2] : pred_roi[3],
581
+ pred_roi[4] : pred_roi[5],
582
+ ]
583
+ pred3D_full_dict[idx][
584
+ ...,
585
+ ori_roi[0] : ori_roi[1],
586
+ ori_roi[2] : ori_roi[3],
587
+ ori_roi[4] : ori_roi[5],
588
+ ] = seg_mask_roi
589
+
590
+ os.makedirs(vis_root, exist_ok=True)
591
+ padding_params = sliding_window_list[-1][-1]["padding_params"]
592
+ cropping_params = sliding_window_list[-1][-1]["cropping_params"]
593
+ # print(padding_params, cropping_params)
594
+ point_offset = np.array(
595
+ [
596
+ cropping_params[0] - padding_params[0],
597
+ cropping_params[2] - padding_params[2],
598
+ cropping_params[4] - padding_params[4],
599
+ ]
600
+ )
601
+ points = [p.cpu().numpy() + point_offset for p in points]
602
+ labels = [l.cpu().numpy() for l in labels]
603
+ pt_info = dict(points=points, labels=labels)
604
+ # print("save to", osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", "_pred.nii.gz")))
605
+ pt_path = osp.join(
606
+ vis_root, osp.basename(img_name).replace(".nii.gz", "_pt.pkl")
607
+ )
608
+ pickle.dump(pt_info, open(pt_path, "wb"))
609
+
610
+ if args.save_image:
611
+ save_numpy_to_nifti(
612
+ image3D_full,
613
+ osp.join(
614
+ vis_root,
615
+ osp.basename(img_name).replace(".nii.gz", f"_img.nii.gz"),
616
+ ),
617
+ meta_info,
618
+ )
619
+ for idx, pred3D_full in pred3D_full_dict.items():
620
+ save_numpy_to_nifti(
621
+ pred3D_full,
622
+ osp.join(
623
+ vis_root,
624
+ osp.basename(img_name).replace(".nii.gz", f"_pred{idx}.nii.gz"),
625
+ ),
626
+ meta_info,
627
+ )
628
+ radius = 2
629
+ for pt in points[: idx + 1]:
630
+ pred3D_full[
631
+ ...,
632
+ pt[0, 0, 0] - radius : pt[0, 0, 0] + radius,
633
+ pt[0, 0, 1] - radius : pt[0, 0, 1] + radius,
634
+ pt[0, 0, 2] - radius : pt[0, 0, 2] + radius,
635
+ ] = 10
636
+ save_numpy_to_nifti(
637
+ pred3D_full,
638
+ osp.join(
639
+ vis_root,
640
+ osp.basename(img_name).replace(
641
+ ".nii.gz", f"_pred{idx}_wPt.nii.gz"
642
+ ),
643
+ ),
644
+ meta_info,
645
+ )
646
+
647
+ print("Done")
infer_sequence.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python infer_sequence.py --seed 2023 \
2
+ -tdp ./data/inference -nc 1 \
3
+ -cp ./work_dir/fine_tune_experimental_augmented/sam_model_latest.pth \
4
+ --output_dir ./results/sequence \
5
+ --task_name sequence
inference.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path as osp
3
+ join = osp.join
4
+ import numpy as np
5
+ from glob import glob
6
+ import torch
7
+ from segment_anything.build_sam3D import sam_model_registry3D
8
+ from segment_anything.utils.transforms3D import ResizeLongestSide3D
9
+ from segment_anything import sam_model_registry
10
+ from tqdm import tqdm
11
+ import argparse
12
+ import SimpleITK as sitk
13
+ import torch.nn.functional as F
14
+ from torch.utils.data import DataLoader
15
+ import SimpleITK as sitk
16
+ import torchio as tio
17
+ import numpy as np
18
+ from collections import OrderedDict, defaultdict
19
+ import json
20
+ import pickle
21
+ from utils.click_method import get_next_click3D_torch_ritm, get_next_click3D_torch_2
22
+ from utils.data_loader import Dataset_Union_ALL_Val
23
+ from itertools import product
24
+
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument('-tdp', '--test_data_path', type=str, default='./data/validation')
27
+ parser.add_argument('-cp', '--checkpoint_path', type=str, default='./ckpt/sam_med3d.pth')
28
+ parser.add_argument('--output_dir', type=str, default='./visualization')
29
+ parser.add_argument('--task_name', type=str, default='test_amos')
30
+ parser.add_argument('--skip_existing_pred', action='store_true', default=False)
31
+ parser.add_argument('--save_image_and_gt', action='store_true', default=False)
32
+ parser.add_argument('--sliding_window', action='store_true', default=False)
33
+
34
+ parser.add_argument('--image_size', type=int, default=256)
35
+ parser.add_argument('--crop_size', type=int, default=128)
36
+ parser.add_argument('--device', type=str, default='cuda')
37
+ parser.add_argument('-mt', '--model_type', type=str, default='vit_b_ori')
38
+ parser.add_argument('-nc', '--num_clicks', type=int, default=5)
39
+ parser.add_argument('-pm', '--point_method', type=str, default='default')
40
+ parser.add_argument('-dt', '--data_type', type=str, default='Ts')
41
+
42
+ parser.add_argument('--threshold', type=int, default=0)
43
+ parser.add_argument('--dim', type=int, default=3)
44
+ parser.add_argument('--split_idx', type=int, default=0)
45
+ parser.add_argument('--split_num', type=int, default=1)
46
+ parser.add_argument('--ft2d', action='store_true', default=False)
47
+ parser.add_argument('--seed', type=int, default=2023)
48
+
49
+ args = parser.parse_args()
50
+
51
+ ''' parse and output_dir and task_name '''
52
+ args.output_dir = join(args.output_dir, args.task_name)
53
+ args.pred_output_dir = join(args.output_dir, "pred")
54
+ os.makedirs(args.output_dir, exist_ok=True)
55
+ os.makedirs(args.pred_output_dir, exist_ok=True)
56
+ args.save_name = join(args.output_dir, "dice.py")
57
+ print("output_dir set to", args.output_dir)
58
+
59
+ SEED = args.seed
60
+ print("set seed as", SEED)
61
+ torch.manual_seed(SEED)
62
+ np.random.seed(SEED)
63
+
64
+ if torch.cuda.is_available():
65
+ torch.cuda.init()
66
+
67
+ click_methods = {
68
+ 'default': get_next_click3D_torch_ritm,
69
+ 'ritm': get_next_click3D_torch_ritm,
70
+ 'random': get_next_click3D_torch_2,
71
+ }
72
+
73
+ def compute_iou(pred_mask, gt_semantic_seg):
74
+ in_mask = np.logical_and(gt_semantic_seg, pred_mask)
75
+ out_mask = np.logical_or(gt_semantic_seg, pred_mask)
76
+ iou = np.sum(in_mask) / np.sum(out_mask)
77
+ return iou
78
+
79
+ def compute_dice(mask_gt, mask_pred, dtype=np.uint8):
80
+ volume_sum = mask_gt.sum() + mask_pred.sum()
81
+ if volume_sum == 0:
82
+ return np.NaN
83
+ volume_intersect = (mask_gt.astype(dtype) & mask_pred.astype(dtype)).sum()
84
+ return 2*volume_intersect / volume_sum
85
+
86
+ def postprocess_masks(low_res_masks, image_size, original_size):
87
+ ori_h, ori_w = original_size
88
+ masks = F.interpolate(
89
+ low_res_masks,
90
+ (image_size, image_size),
91
+ mode="bilinear",
92
+ align_corners=False,
93
+ )
94
+ if args.ft2d and ori_h < image_size and ori_w < image_size:
95
+ top = (image_size - ori_h) // 2
96
+ left = (image_size - ori_w) // 2
97
+ masks = masks[..., top : ori_h + top, left : ori_w + left]
98
+ pad = (top, left)
99
+ else:
100
+ masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
101
+ pad = None
102
+ return masks, pad
103
+
104
+ def sam_decoder_inference(target_size, points_coords, points_labels, model, image_embeddings, mask_inputs=None, multimask = False):
105
+ with torch.no_grad():
106
+ sparse_embeddings, dense_embeddings = model.prompt_encoder(
107
+ points=(points_coords.to(model.device), points_labels.to(model.device)),
108
+ boxes=None,
109
+ masks=mask_inputs,
110
+ )
111
+
112
+ low_res_masks, iou_predictions = model.mask_decoder(
113
+ image_embeddings = image_embeddings,
114
+ image_pe = model.prompt_encoder.get_dense_pe(),
115
+ sparse_prompt_embeddings=sparse_embeddings,
116
+ dense_prompt_embeddings=dense_embeddings,
117
+ multimask_output=multimask,
118
+ )
119
+
120
+ if multimask:
121
+ max_values, max_indexs = torch.max(iou_predictions, dim=1)
122
+ max_values = max_values.unsqueeze(1)
123
+ iou_predictions = max_values
124
+ low_res = []
125
+ for i, idx in enumerate(max_indexs):
126
+ low_res.append(low_res_masks[i:i+1, idx])
127
+ low_res_masks = torch.stack(low_res, 0)
128
+ masks = F.interpolate(low_res_masks, (target_size, target_size), mode="bilinear", align_corners=False,)
129
+ return masks, low_res_masks, iou_predictions
130
+
131
+ def repixel_value(arr, is_seg=False):
132
+ if not is_seg:
133
+ min_val = arr.min()
134
+ max_val = arr.max()
135
+ new_arr = (arr - min_val) / (max_val - min_val + 1e-10) * 255.
136
+ return new_arr
137
+
138
+ def random_point_sampling(mask, get_point = 1):
139
+ if isinstance(mask, torch.Tensor):
140
+ mask = mask.numpy()
141
+ fg_coords = np.argwhere(mask == 1)[:,::-1]
142
+ bg_coords = np.argwhere(mask == 0)[:,::-1]
143
+
144
+ fg_size = len(fg_coords)
145
+ bg_size = len(bg_coords)
146
+
147
+ if get_point == 1:
148
+ if fg_size > 0:
149
+ index = np.random.randint(fg_size)
150
+ fg_coord = fg_coords[index]
151
+ label = 1
152
+ else:
153
+ index = np.random.randint(bg_size)
154
+ fg_coord = bg_coords[index]
155
+ label = 0
156
+ return torch.as_tensor([fg_coord.tolist()], dtype=torch.float), torch.as_tensor([label], dtype=torch.int)
157
+ else:
158
+ num_fg = get_point // 2
159
+ num_bg = get_point - num_fg
160
+ fg_indices = np.random.choice(fg_size, size=num_fg, replace=True)
161
+ bg_indices = np.random.choice(bg_size, size=num_bg, replace=True)
162
+ fg_coords = fg_coords[fg_indices]
163
+ bg_coords = bg_coords[bg_indices]
164
+ coords = np.concatenate([fg_coords, bg_coords], axis=0)
165
+ labels = np.concatenate([np.ones(num_fg), np.zeros(num_bg)]).astype(int)
166
+ indices = np.random.permutation(get_point)
167
+ coords, labels = torch.as_tensor(coords[indices], dtype=torch.float), torch.as_tensor(labels[indices], dtype=torch.int)
168
+ return coords, labels
169
+
170
+
171
+ def finetune_model_predict2D(img3D, gt3D, sam_model_tune, target_size=256, click_method='random', device='cuda', num_clicks=1, prev_masks=None):
172
+ pred_list = []
173
+
174
+ slice_mask_list = defaultdict(list)
175
+
176
+ img3D = torch.repeat_interleave(img3D, repeats=3, dim=1) # 1 channel -> 3 channel (align to RGB)
177
+
178
+ click_points = []
179
+ click_labels = []
180
+ for slice_idx in tqdm(range(img3D.size(-1)), desc="transverse slices", leave=False):
181
+ img2D, gt2D = repixel_value(img3D[..., slice_idx]), gt3D[..., slice_idx]
182
+
183
+ if (gt2D==0).all():
184
+ empty_result = torch.zeros(list(gt3D.size()[:-1])+[1]).to(device)
185
+ for iter in range(num_clicks):
186
+ slice_mask_list[iter].append(empty_result)
187
+ continue
188
+
189
+ img2D = F.interpolate(img2D, (target_size, target_size), mode="bilinear", align_corners=False)
190
+ gt2D = F.interpolate(gt2D.float(), (target_size, target_size), mode="nearest").int()
191
+
192
+ img2D, gt2D = img2D.to(device), gt2D.to(device)
193
+ img2D = (img2D - img2D.mean()) / img2D.std()
194
+
195
+ with torch.no_grad():
196
+ image_embeddings = sam_model_tune.image_encoder(img2D.float())
197
+
198
+ points_co, points_la = torch.zeros(1,0,2).to(device), torch.zeros(1,0).to(device)
199
+ low_res_masks = None
200
+ gt_semantic_seg = gt2D[0, 0].to(device)
201
+ true_masks = (gt_semantic_seg > 0)
202
+ for iter in range(num_clicks):
203
+ if(low_res_masks==None):
204
+ pred_masks = torch.zeros_like(true_masks).to(device)
205
+ else:
206
+ pred_masks = (prev_masks[0, 0] > 0.0).to(device)
207
+ fn_masks = torch.logical_and(true_masks, torch.logical_not(pred_masks))
208
+ fp_masks = torch.logical_and(torch.logical_not(true_masks), pred_masks)
209
+ mask_to_sample = torch.logical_or(fn_masks, fp_masks)
210
+ new_points_co, _ = random_point_sampling(mask_to_sample.cpu(), get_point=1)
211
+ new_points_la = torch.Tensor([1]).to(torch.int64) if(true_masks[new_points_co[0,1].int(), new_points_co[0,0].int()]) else torch.Tensor([0]).to(torch.int64)
212
+ new_points_co, new_points_la = new_points_co[None].to(device), new_points_la[None].to(device)
213
+ points_co = torch.cat([points_co, new_points_co],dim=1)
214
+ points_la = torch.cat([points_la, new_points_la],dim=1)
215
+ prev_masks, low_res_masks, iou_predictions = sam_decoder_inference(
216
+ target_size, points_co, points_la, sam_model_tune, image_embeddings,
217
+ mask_inputs = low_res_masks, multimask = True)
218
+ click_points.append(new_points_co)
219
+ click_labels.append(new_points_la)
220
+
221
+ slice_mask, _ = postprocess_masks(low_res_masks, target_size, (gt3D.size(2), gt3D.size(3)))
222
+ slice_mask_list[iter].append(slice_mask[..., None]) # append (B, C, H, W, 1)
223
+
224
+ for iter in range(num_clicks):
225
+ medsam_seg = torch.cat(slice_mask_list[iter], dim=-1).cpu().numpy().squeeze()
226
+ medsam_seg = medsam_seg > sam_model_tune.mask_threshold
227
+ medsam_seg = medsam_seg.astype(np.uint8)
228
+
229
+ pred_list.append(medsam_seg)
230
+
231
+ return pred_list, click_points, click_labels
232
+
233
+
234
+ def finetune_model_predict3D(img3D, gt3D, sam_model_tune, device='cuda', click_method='random', num_clicks=10, prev_masks=None):
235
+ img3D = norm_transform(img3D.squeeze(dim=1)) # (N, C, W, H, D)
236
+ img3D = img3D.unsqueeze(dim=1)
237
+
238
+ click_points = []
239
+ click_labels = []
240
+
241
+ pred_list = []
242
+
243
+ if prev_masks is None:
244
+ prev_masks = torch.zeros_like(gt3D).to(device)
245
+ low_res_masks = F.interpolate(prev_masks.float(), size=(args.crop_size//4,args.crop_size//4,args.crop_size//4))
246
+
247
+ with torch.no_grad():
248
+ image_embedding = sam_model_tune.image_encoder(img3D.to(device)) # (1, 384, 16, 16, 16)
249
+
250
+ for click_idx in range(num_clicks):
251
+ with torch.no_grad():
252
+ if(click_idx>1):
253
+ click_method = "random"
254
+ batch_points, batch_labels = click_methods[click_method](prev_masks.to(device), gt3D.to(device))
255
+
256
+ points_co = torch.cat(batch_points, dim=0).to(device)
257
+ points_la = torch.cat(batch_labels, dim=0).to(device)
258
+
259
+ click_points.append(points_co)
260
+ click_labels.append(points_la)
261
+
262
+ points_input = points_co
263
+ labels_input = points_la
264
+
265
+ sparse_embeddings, dense_embeddings = sam_model_tune.prompt_encoder(
266
+ points=[points_input, labels_input],
267
+ boxes=None,
268
+ masks=low_res_masks.to(device),
269
+ )
270
+ low_res_masks, _ = sam_model_tune.mask_decoder(
271
+ image_embeddings=image_embedding.to(device), # (B, 384, 64, 64, 64)
272
+ image_pe=sam_model_tune.prompt_encoder.get_dense_pe(), # (1, 384, 64, 64, 64)
273
+ sparse_prompt_embeddings=sparse_embeddings, # (B, 2, 384)
274
+ dense_prompt_embeddings=dense_embeddings, # (B, 384, 64, 64, 64)
275
+ multimask_output=False,
276
+ )
277
+ prev_masks = F.interpolate(low_res_masks, size=gt3D.shape[-3:], mode='trilinear', align_corners=False)
278
+
279
+ medsam_seg_prob = torch.sigmoid(prev_masks) # (B, 1, 64, 64, 64)
280
+ # convert prob to mask
281
+ medsam_seg_prob = medsam_seg_prob.cpu().numpy().squeeze()
282
+ medsam_seg = (medsam_seg_prob > 0.5).astype(np.uint8)
283
+ pred_list.append(medsam_seg)
284
+
285
+ return pred_list, click_points, click_labels
286
+
287
+
288
+ def pad_and_crop_with_sliding_window(img3D, gt3D, crop_transform, offset_mode="center"):
289
+ subject = tio.Subject(
290
+ image = tio.ScalarImage(tensor=img3D.squeeze(0)),
291
+ label = tio.LabelMap(tensor=gt3D.squeeze(0)),
292
+ )
293
+ padding_params, cropping_params = crop_transform.compute_crop_or_pad(subject)
294
+ # cropping_params: (x_start, x_max-(x_start+roi_size), y_start, ...)
295
+ # padding_params: (x_left_pad, x_right_pad, y_left_pad, ...)
296
+ if(cropping_params is None): cropping_params = (0,0,0,0,0,0)
297
+ if(padding_params is None): padding_params = (0,0,0,0,0,0)
298
+ roi_shape = crop_transform.target_shape
299
+ vol_bound = (0, img3D.shape[2], 0, img3D.shape[3], 0, img3D.shape[4])
300
+ center_oob_ori_roi=(
301
+ cropping_params[0]-padding_params[0], cropping_params[0]+roi_shape[0]-padding_params[0],
302
+ cropping_params[2]-padding_params[2], cropping_params[2]+roi_shape[1]-padding_params[2],
303
+ cropping_params[4]-padding_params[4], cropping_params[4]+roi_shape[2]-padding_params[4],
304
+ )
305
+ window_list = []
306
+ offset_dict = {
307
+ "rounded": list(product((-32,+32,0), repeat=3)),
308
+ "center": [(0,0,0)],
309
+ }
310
+ for offset in offset_dict[offset_mode]:
311
+ # get the position in original volume~(allow out-of-bound) for current offset
312
+ oob_ori_roi = (
313
+ center_oob_ori_roi[0]+offset[0], center_oob_ori_roi[1]+offset[0],
314
+ center_oob_ori_roi[2]+offset[1], center_oob_ori_roi[3]+offset[1],
315
+ center_oob_ori_roi[4]+offset[2], center_oob_ori_roi[5]+offset[2],
316
+ )
317
+ # get corresponing padding params based on `vol_bound`
318
+ padding_params = [0 for i in range(6)]
319
+ for idx, (ori_pos, bound) in enumerate(zip(oob_ori_roi, vol_bound)):
320
+ pad_val = 0
321
+ if(idx%2==0 and ori_pos<bound): # left bound
322
+ pad_val = bound-ori_pos
323
+ if(idx%2==1 and ori_pos>bound):
324
+ pad_val = ori_pos-bound
325
+ padding_params[idx] = pad_val
326
+ # get corresponding crop params after padding
327
+ cropping_params = (
328
+ oob_ori_roi[0]+padding_params[0], vol_bound[1]-oob_ori_roi[1]+padding_params[1],
329
+ oob_ori_roi[2]+padding_params[2], vol_bound[3]-oob_ori_roi[3]+padding_params[3],
330
+ oob_ori_roi[4]+padding_params[4], vol_bound[5]-oob_ori_roi[5]+padding_params[5],
331
+ )
332
+ # pad and crop for the original subject
333
+ pad_and_crop = tio.Compose([
334
+ tio.Pad(padding_params, padding_mode=crop_transform.padding_mode),
335
+ tio.Crop(cropping_params),
336
+ ])
337
+ subject_roi = pad_and_crop(subject)
338
+ img3D_roi, gt3D_roi = subject_roi.image.data.clone().detach().unsqueeze(1), subject_roi.label.data.clone().detach().unsqueeze(1)
339
+
340
+ # collect all position information, and set correct roi for sliding-windows in
341
+ # todo: get correct roi window of half because of the sliding
342
+ windows_clip = [0 for i in range(6)]
343
+ for i in range(3):
344
+ if(offset[i]<0):
345
+ windows_clip[2*i] = 0
346
+ windows_clip[2*i+1] = -(roi_shape[i]+offset[i])
347
+ elif(offset[i]>0):
348
+ windows_clip[2*i] = roi_shape[i]-offset[i]
349
+ windows_clip[2*i+1] = 0
350
+ pos3D_roi = dict(
351
+ padding_params=padding_params, cropping_params=cropping_params,
352
+ ori_roi=(
353
+ cropping_params[0]+windows_clip[0], cropping_params[0]+roi_shape[0]-padding_params[0]-padding_params[1]+windows_clip[1],
354
+ cropping_params[2]+windows_clip[2], cropping_params[2]+roi_shape[1]-padding_params[2]-padding_params[3]+windows_clip[3],
355
+ cropping_params[4]+windows_clip[4], cropping_params[4]+roi_shape[2]-padding_params[4]-padding_params[5]+windows_clip[5],
356
+ ),
357
+ pred_roi=(
358
+ padding_params[0]+windows_clip[0], roi_shape[0]-padding_params[1]+windows_clip[1],
359
+ padding_params[2]+windows_clip[2], roi_shape[1]-padding_params[3]+windows_clip[3],
360
+ padding_params[4]+windows_clip[4], roi_shape[2]-padding_params[5]+windows_clip[5],
361
+ ))
362
+ pred_roi = pos3D_roi["pred_roi"]
363
+
364
+ #if((gt3D_roi[pred_roi[0]:pred_roi[1],pred_roi[2]:pred_roi[3],pred_roi[4]:pred_roi[5]]==0).all()):
365
+ #print("skip empty window with offset", offset)
366
+ # continue
367
+
368
+ window_list.append((img3D_roi, gt3D_roi, pos3D_roi))
369
+ return window_list
370
+
371
+ def save_numpy_to_nifti(in_arr: np.array, out_path, meta_info):
372
+ # torchio turn 1xHxWxD -> DxWxH
373
+ # so we need to squeeze and transpose back to HxWxD
374
+ ori_arr = np.transpose(in_arr.squeeze(), (2, 1, 0))
375
+ out = sitk.GetImageFromArray(ori_arr)
376
+ sitk_meta_translator = lambda x: [float(i) for i in x]
377
+ out.SetOrigin(sitk_meta_translator(meta_info["origin"]))
378
+ out.SetDirection(sitk_meta_translator(meta_info["direction"]))
379
+ out.SetSpacing(sitk_meta_translator(meta_info["spacing"]))
380
+ sitk.WriteImage(out, out_path)
381
+
382
+
383
+ if __name__ == "__main__":
384
+ all_dataset_paths = glob(join(args.test_data_path, "*", "*"))
385
+ all_dataset_paths = list(filter(osp.isdir, all_dataset_paths))
386
+ print("get", len(all_dataset_paths), "datasets")
387
+
388
+ crop_transform = tio.CropOrPad(
389
+ mask_name='label',
390
+ target_shape=(args.crop_size, args.crop_size, args.crop_size))
391
+
392
+ infer_transform = [
393
+ tio.ToCanonical(),
394
+ ]
395
+
396
+ test_dataset = Dataset_Union_ALL_Val(
397
+ paths=all_dataset_paths,
398
+ mode="Val",
399
+ data_type=args.data_type,
400
+ transform=tio.Compose(infer_transform),
401
+ threshold=0,
402
+ split_num=args.split_num,
403
+ split_idx=args.split_idx,
404
+ pcc=False,
405
+ get_all_meta_info=True,
406
+ )
407
+
408
+ test_dataloader = DataLoader(
409
+ dataset=test_dataset,
410
+ sampler=None,
411
+ batch_size=1,
412
+ shuffle=True
413
+ )
414
+
415
+ checkpoint_path = args.checkpoint_path
416
+
417
+ device = args.device
418
+ print("device:", device)
419
+
420
+ if(args.dim==3):
421
+ sam_model_tune = sam_model_registry3D[args.model_type](checkpoint=None).to(device)
422
+ if checkpoint_path is not None:
423
+ model_dict = torch.load(checkpoint_path, map_location=device)
424
+ state_dict = model_dict['model_state_dict']
425
+ sam_model_tune.load_state_dict(state_dict)
426
+ else:
427
+ raise NotImplementedError("this scipts is designed for 3D sliding-window inference, not support other dims")
428
+
429
+ sam_trans = ResizeLongestSide3D(sam_model_tune.image_encoder.img_size)
430
+ norm_transform = tio.ZNormalization(masking_method=lambda x: x > 0)
431
+
432
+ all_iou_list = []
433
+ all_dice_list = []
434
+
435
+ out_dice = dict()
436
+ out_dice_all = OrderedDict()
437
+
438
+ for batch_data in tqdm(test_dataloader):
439
+ image3D, gt3D, meta_info = batch_data
440
+ img_name = meta_info["image_path"][0]
441
+
442
+ modality = osp.basename(osp.dirname(osp.dirname(osp.dirname(img_name))))
443
+ dataset = osp.basename(osp.dirname(osp.dirname(img_name)))
444
+ vis_root = osp.join(args.pred_output_dir, modality, dataset)
445
+ pred_path = osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_pred{args.num_clicks-1}.nii.gz"))
446
+
447
+ ''' inference '''
448
+ iou_list, dice_list = [], []
449
+ if(args.skip_existing_pred and osp.exists(pred_path)):
450
+ pass # if the pred existed, skip the inference
451
+ else:
452
+ image3D_full, gt3D_full = image3D, gt3D
453
+ pred3D_full_dict = {click_idx:torch.zeros_like(gt3D_full).numpy() for click_idx in range(args.num_clicks)}
454
+ offset_mode = "center" if(not args.sliding_window) else "rounded"
455
+ sliding_window_list = pad_and_crop_with_sliding_window(image3D_full, gt3D_full, crop_transform, offset_mode=offset_mode)
456
+ for (image3D, gt3D, pos3D) in sliding_window_list:
457
+ seg_mask_list, points, labels = finetune_model_predict3D(
458
+ image3D, gt3D, sam_model_tune, device=device,
459
+ click_method=args.point_method, num_clicks=args.num_clicks,
460
+ prev_masks=None)
461
+ ori_roi, pred_roi = pos3D["ori_roi"], pos3D["pred_roi"]
462
+ for idx, seg_mask in enumerate(seg_mask_list):
463
+ seg_mask_roi = seg_mask[..., pred_roi[0]:pred_roi[1], pred_roi[2]:pred_roi[3], pred_roi[4]:pred_roi[5]]
464
+ pred3D_full_dict[idx][..., ori_roi[0]:ori_roi[1], ori_roi[2]:ori_roi[3], ori_roi[4]:ori_roi[5]] = seg_mask_roi
465
+
466
+ os.makedirs(vis_root, exist_ok=True)
467
+ padding_params = sliding_window_list[-1][-1]["padding_params"]
468
+ cropping_params = sliding_window_list[-1][-1]["cropping_params"]
469
+ # print(padding_params, cropping_params)
470
+ point_offset = np.array([cropping_params[0]-padding_params[0], cropping_params[2]-padding_params[2], cropping_params[4]-padding_params[4]])
471
+ points = [p.cpu().numpy()+point_offset for p in points]
472
+ labels = [l.cpu().numpy() for l in labels]
473
+ pt_info = dict(points=points, labels=labels)
474
+ # print("save to", osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", "_pred.nii.gz")))
475
+ pt_path=osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", "_pt.pkl"))
476
+ pickle.dump(pt_info, open(pt_path, "wb"))
477
+
478
+ if(args.save_image_and_gt):
479
+ save_numpy_to_nifti(image3D_full, osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_img.nii.gz")), meta_info)
480
+ save_numpy_to_nifti(gt3D_full, osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_gt.nii.gz")), meta_info)
481
+ for idx, pred3D_full in pred3D_full_dict.items():
482
+ save_numpy_to_nifti(pred3D_full, osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_pred{idx}.nii.gz")), meta_info)
483
+ radius = 2
484
+ for pt in points[:idx+1]:
485
+ pred3D_full[..., pt[0,0,0]-radius:pt[0,0,0]+radius, pt[0,0,1]-radius:pt[0,0,1]+radius, pt[0,0,2]-radius:pt[0,0,2]+radius] = 10
486
+ save_numpy_to_nifti(pred3D_full, osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_pred{idx}_wPt.nii.gz")), meta_info)
487
+
488
+ ''' metric computation '''
489
+ for click_idx in range(args.num_clicks):
490
+ reorient_tensor = lambda in_arr : np.transpose(in_arr.squeeze().detach().cpu().numpy(), (2, 1, 0))
491
+ curr_pred_path = osp.join(vis_root, osp.basename(img_name).replace(".nii.gz", f"_pred{click_idx}.nii.gz"))
492
+ medsam_seg = sitk.GetArrayFromImage(sitk.ReadImage(curr_pred_path))
493
+ iou_list.append(round(compute_iou(medsam_seg, reorient_tensor(gt3D_full)), 4))
494
+ dice_list.append(round(compute_dice(reorient_tensor(gt3D_full), medsam_seg), 4))
495
+
496
+ per_iou = max(iou_list)
497
+ all_iou_list.append(per_iou)
498
+ all_dice_list.append(max(dice_list))
499
+ print(dice_list)
500
+ out_dice[img_name] = max(dice_list)
501
+ cur_dice_dict = OrderedDict()
502
+ for i, dice in enumerate(dice_list):
503
+ cur_dice_dict[f'{i}'] = dice
504
+ out_dice_all[img_name] = cur_dice_dict
505
+
506
+ print('Mean IoU : ', sum(all_iou_list)/len(all_iou_list))
507
+ print('Mean Dice: ', sum(all_dice_list)/len(all_dice_list))
508
+
509
+ final_dice_dict = OrderedDict()
510
+ for k, v in out_dice_all.items():
511
+ organ = k.split('/')[-4]
512
+ final_dice_dict[organ] = OrderedDict()
513
+ for k, v in out_dice_all.items():
514
+ organ = k.split('/')[-4]
515
+ final_dice_dict[organ][k] = v
516
+
517
+ if(args.split_num>1):
518
+ args.save_name = args.save_name.replace('.py', f'_s{args.split_num}i{args.split_idx}.py')
519
+
520
+ print("Save to", args.save_name)
521
+ with open(args.save_name, 'w') as f:
522
+ f.writelines(f'# mean dice: \t{np.mean(all_dice_list)}\n')
523
+ f.writelines('dice_Ts = {')
524
+ for k, v in out_dice.items():
525
+ f.writelines(f'\'{str(k[0])}\': {v},\n')
526
+ f.writelines('}')
527
+
528
+ with open(args.save_name.replace('.py', '.json'), 'w') as f:
529
+ json.dump(final_dice_dict, f, indent=4)
530
+
531
+ print("Done")
medim_infer.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- encoding: utf-8 -*-
2
+ '''
3
+ @File : infer_with_medim.py
4
+ @Time : 2024/09/08 11:31:02
5
+ @Author : Haoyu Wang
6
+ @Contact : small_dark@sina.com
7
+ @Brief : Example code for inference with MedIM
8
+ '''
9
+
10
+ import medim
11
+ import torch
12
+ import numpy as np
13
+ import torch.nn.functional as F
14
+ import torchio as tio
15
+ import os.path as osp
16
+ import os
17
+ from torchio.data.io import sitk_to_nib
18
+ import SimpleITK as sitk
19
+
20
+
21
+ def random_sample_next_click(prev_mask, gt_mask):
22
+ """
23
+ Randomly sample one click from ground-truth mask and previous seg mask
24
+
25
+ Arguements:
26
+ prev_mask: (torch.Tensor) [H,W,D] previous mask that SAM-Med3D predict
27
+ gt_mask: (torch.Tensor) [H,W,D] ground-truth mask for this image
28
+ """
29
+ prev_mask = prev_mask > 0
30
+ true_masks = gt_mask > 0
31
+
32
+ if (not true_masks.any()):
33
+ raise ValueError("Cannot find true value in the ground-truth!")
34
+
35
+ fn_masks = torch.logical_and(true_masks, torch.logical_not(prev_mask))
36
+ fp_masks = torch.logical_and(torch.logical_not(true_masks), prev_mask)
37
+
38
+ to_point_mask = torch.logical_or(fn_masks, fp_masks)
39
+
40
+ all_points = torch.argwhere(to_point_mask)
41
+ point = all_points[np.random.randint(len(all_points))]
42
+
43
+ if fn_masks[point[0], point[1], point[2]]:
44
+ is_positive = True
45
+ else:
46
+ is_positive = False
47
+
48
+ sampled_point = point.clone().detach().reshape(1, 1, 3)
49
+ sampled_label = torch.tensor([
50
+ int(is_positive),
51
+ ]).reshape(1, 1)
52
+
53
+ return sampled_point, sampled_label
54
+
55
+
56
+ def sam_model_infer(model,
57
+ roi_image,
58
+ prompt_generator=random_sample_next_click,
59
+ roi_gt=None,
60
+ prev_low_res_mask=None):
61
+ '''
62
+ Inference for SAM-Med3D, inputs prompt points with its labels (positive/negative for each points)
63
+
64
+ # roi_image: (torch.Tensor) cropped image, shape [1,1,128,128,128]
65
+ # prompt_points_and_labels: (Tuple(torch.Tensor, torch.Tensor))
66
+ '''
67
+
68
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
69
+ print("using device", device)
70
+ model = model.to(device)
71
+
72
+ with torch.no_grad():
73
+ input_tensor = roi_image.to(device)
74
+ image_embeddings = model.image_encoder(input_tensor)
75
+
76
+ points_coords, points_labels = torch.zeros(1, 0,
77
+ 3).to(device), torch.zeros(
78
+ 1, 0).to(device)
79
+ new_points_co, new_points_la = torch.Tensor(
80
+ [[[64, 64, 64]]]).to(device), torch.Tensor([[1]]).to(torch.int64)
81
+ if (roi_gt is not None):
82
+ prev_low_res_mask = prev_low_res_mask if (
83
+ prev_low_res_mask is not None) else torch.zeros(
84
+ 1, 1, roi_image.shape[2] // 4, roi_image.shape[3] //
85
+ 4, roi_image.shape[4] // 4)
86
+ new_points_co, new_points_la = prompt_generator(
87
+ torch.zeros_like(roi_image)[0, 0], roi_gt[0, 0])
88
+ new_points_co, new_points_la = new_points_co.to(
89
+ device), new_points_la.to(device)
90
+ points_coords = torch.cat([points_coords, new_points_co], dim=1)
91
+ points_labels = torch.cat([points_labels, new_points_la], dim=1)
92
+
93
+ sparse_embeddings, dense_embeddings = model.prompt_encoder(
94
+ points=[points_coords, points_labels],
95
+ boxes=None, # we currently not support bbox prompt
96
+ masks=prev_low_res_mask.to(device),
97
+ # masks=None,
98
+ )
99
+
100
+ low_res_masks, _ = model.mask_decoder(
101
+ image_embeddings=image_embeddings, # (1, 384, 8, 8, 8)
102
+ image_pe=model.prompt_encoder.get_dense_pe(), # (1, 384, 8, 8, 8)
103
+ sparse_prompt_embeddings=sparse_embeddings, # (1, 2, 384)
104
+ dense_prompt_embeddings=dense_embeddings, # (1, 384, 8, 8, 8)
105
+ )
106
+
107
+ prev_mask = F.interpolate(low_res_masks,
108
+ size=roi_image.shape[-3:],
109
+ mode='trilinear',
110
+ align_corners=False)
111
+
112
+ # convert prob to mask
113
+ medsam_seg_prob = torch.sigmoid(prev_mask) # (1, 1, 64, 64, 64)
114
+ medsam_seg_prob = medsam_seg_prob.cpu().numpy().squeeze()
115
+ medsam_seg_mask = (medsam_seg_prob > 0.5).astype(np.uint8)
116
+
117
+ return medsam_seg_mask
118
+
119
+
120
+ def resample_nii(input_path: str,
121
+ output_path: str,
122
+ target_spacing: tuple = (1.5, 1.5, 1.5),
123
+ n=None,
124
+ reference_image=None,
125
+ mode="linear"):
126
+ """
127
+ Resample a nii.gz file to a specified spacing using torchio.
128
+
129
+ Parameters:
130
+ - input_path: Path to the input .nii.gz file.
131
+ - output_path: Path to save the resampled .nii.gz file.
132
+ - target_spacing: Desired spacing for resampling. Default is (1.5, 1.5, 1.5).
133
+ """
134
+ # Load the nii.gz file using torchio
135
+ subject = tio.Subject(img=tio.ScalarImage(input_path))
136
+ resampler = tio.Resample(target=target_spacing, image_interpolation=mode)
137
+ resampled_subject = resampler(subject)
138
+
139
+ if (n != None):
140
+ image = resampled_subject.img
141
+ tensor_data = image.data
142
+ if (isinstance(n, int)):
143
+ n = [n]
144
+ for ni in n:
145
+ tensor_data[tensor_data == ni] = -1
146
+ tensor_data[tensor_data != -1] = 0
147
+ tensor_data[tensor_data != 0] = 1
148
+ save_image = tio.ScalarImage(tensor=tensor_data, affine=image.affine)
149
+ reference_size = reference_image.shape[
150
+ 1:] # omitting the channel dimension
151
+ cropper_or_padder = tio.CropOrPad(reference_size)
152
+ save_image = cropper_or_padder(save_image)
153
+ else:
154
+ save_image = resampled_subject.img
155
+
156
+ save_image.save(output_path)
157
+
158
+
159
+ def read_data_from_nii(img_path, gt_path):
160
+ sitk_image = sitk.ReadImage(img_path)
161
+ sitk_label = sitk.ReadImage(gt_path)
162
+
163
+ if sitk_image.GetOrigin() != sitk_label.GetOrigin():
164
+ sitk_image.SetOrigin(sitk_label.GetOrigin())
165
+ if sitk_image.GetDirection() != sitk_label.GetDirection():
166
+ sitk_image.SetDirection(sitk_label.GetDirection())
167
+
168
+ sitk_image_arr, _ = sitk_to_nib(sitk_image)
169
+ sitk_label_arr, _ = sitk_to_nib(sitk_label)
170
+
171
+ subject = tio.Subject(
172
+ image=tio.ScalarImage(tensor=sitk_image_arr),
173
+ label=tio.LabelMap(tensor=sitk_label_arr),
174
+ )
175
+ crop_transform = tio.CropOrPad(mask_name='label',
176
+ target_shape=(128, 128, 128))
177
+ padding_params, cropping_params = crop_transform.compute_crop_or_pad(
178
+ subject)
179
+ if (cropping_params is None): cropping_params = (0, 0, 0, 0, 0, 0)
180
+ if (padding_params is None): padding_params = (0, 0, 0, 0, 0, 0)
181
+
182
+ infer_transform = tio.Compose([
183
+ crop_transform,
184
+ tio.ZNormalization(masking_method=lambda x: x > 0),
185
+ ])
186
+ subject_roi = infer_transform(subject)
187
+
188
+ img3D_roi, gt3D_roi = subject_roi.image.data.clone().detach().unsqueeze(
189
+ 1), subject_roi.label.data.clone().detach().unsqueeze(1)
190
+ ori_roi_offset = (
191
+ cropping_params[0],
192
+ cropping_params[0] + 128 - padding_params[0] - padding_params[1],
193
+ cropping_params[2],
194
+ cropping_params[2] + 128 - padding_params[2] - padding_params[3],
195
+ cropping_params[4],
196
+ cropping_params[4] + 128 - padding_params[4] - padding_params[5],
197
+ )
198
+
199
+ meta_info = {
200
+ "image_path": img_path,
201
+ "image_shape": sitk_image_arr.shape[1:],
202
+ "origin": sitk_label.GetOrigin(),
203
+ "direction": sitk_label.GetDirection(),
204
+ "spacing": sitk_label.GetSpacing(),
205
+ "padding_params": padding_params,
206
+ "cropping_params": cropping_params,
207
+ "ori_roi": ori_roi_offset,
208
+ }
209
+ return (
210
+ img3D_roi,
211
+ gt3D_roi,
212
+ meta_info,
213
+ )
214
+
215
+
216
+ def save_numpy_to_nifti(in_arr: np.array, out_path, meta_info):
217
+ # torchio turn 1xHxWxD -> DxWxH
218
+ # so we need to squeeze and transpose back to HxWxD
219
+ ori_arr = np.transpose(in_arr.squeeze(), (2, 1, 0))
220
+ out = sitk.GetImageFromArray(ori_arr)
221
+ sitk_meta_translator = lambda x: [float(i) for i in x]
222
+ out.SetOrigin(sitk_meta_translator(meta_info["origin"]))
223
+ out.SetDirection(sitk_meta_translator(meta_info["direction"]))
224
+ out.SetSpacing(sitk_meta_translator(meta_info["spacing"]))
225
+ sitk.WriteImage(out, out_path)
226
+
227
+
228
+ def data_preprocess(img_path, gt_path, category_index):
229
+ target_img_path = osp.join(
230
+ osp.dirname(img_path),
231
+ osp.basename(img_path).replace(".nii.gz", "_resampled.nii.gz"))
232
+ target_gt_path = osp.join(
233
+ osp.dirname(gt_path),
234
+ osp.basename(gt_path).replace(".nii.gz", "_resampled.nii.gz"))
235
+ resample_nii(img_path, target_img_path)
236
+ resample_nii(gt_path,
237
+ target_gt_path,
238
+ n=category_index,
239
+ reference_image=tio.ScalarImage(target_img_path),
240
+ mode="nearest")
241
+ roi_image, roi_label, meta_info = read_data_from_nii(
242
+ target_img_path, target_gt_path)
243
+ return roi_image, roi_label, meta_info
244
+
245
+
246
+ def data_postprocess(roi_pred, meta_info, output_path, ori_img_path):
247
+ os.makedirs(osp.dirname(output_path), exist_ok=True)
248
+ pred3D_full = np.zeros(meta_info["image_shape"])
249
+ padding_params = meta_info["padding_params"]
250
+ unpadded_pred = roi_pred[padding_params[0] : 128-padding_params[1],
251
+ padding_params[2] : 128-padding_params[3],
252
+ padding_params[4] : 128-padding_params[5]]
253
+ ori_roi = meta_info["ori_roi"]
254
+ pred3D_full[ori_roi[0]:ori_roi[1], ori_roi[2]:ori_roi[3],
255
+ ori_roi[4]:ori_roi[5]] = unpadded_pred
256
+
257
+ sitk_image = sitk.ReadImage(ori_img_path)
258
+ ori_meta_info = {
259
+ "image_path": ori_img_path,
260
+ "image_shape": sitk_image.GetSize(),
261
+ "origin": sitk_image.GetOrigin(),
262
+ "direction": sitk_image.GetDirection(),
263
+ "spacing": sitk_image.GetSpacing(),
264
+ }
265
+ pred3D_full_ori = F.interpolate(
266
+ torch.Tensor(pred3D_full)[None][None],
267
+ size=ori_meta_info["image_shape"],
268
+ mode='nearest').cpu().numpy().squeeze()
269
+ save_numpy_to_nifti(pred3D_full_ori, output_path, meta_info)
270
+
271
+
272
+ if __name__ == "__main__":
273
+ ''' 1. read and pre-process your input data '''
274
+ img_path = "./test_data/kidney_right/AMOS/imagesVal/amos_0013.nii.gz"
275
+ gt_path = "./test_data/kidney_right/AMOS/labelsVal/amos_0013.nii.gz"
276
+ category_index = 3 # the index of your target category in the gt annotation
277
+ output_dir = "./test_data/kidney_right/AMOS/pred/"
278
+ roi_image, roi_label, meta_info = data_preprocess(img_path, gt_path, category_index=category_index)
279
+
280
+ ''' 2. prepare the pre-trained model with local path or huggingface url '''
281
+ ckpt_path = "https://huggingface.co/blueyo0/SAM-Med3D/blob/main/sam_med3d_turbo.pth"
282
+ # or you can use the local path like: ckpt_path = "./ckpt/sam_med3d_turbo.pth"
283
+ model = medim.create_model("SAM-Med3D",
284
+ pretrained=True,
285
+ checkpoint_path=ckpt_path)
286
+
287
+ ''' 3. infer with the pre-trained SAM-Med3D model '''
288
+ roi_pred = sam_model_infer(model, roi_image, roi_gt=roi_label)
289
+
290
+ ''' 4. post-process and save the result '''
291
+ output_path = osp.join(output_dir, osp.basename(img_path).replace(".nii.gz", "_pred.nii.gz"))
292
+ data_postprocess(roi_pred, meta_info, output_path, img_path)
293
+
294
+ print("result saved to", output_path)
readme.md ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: BoSAM
3
+ app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.25.2
6
+ ---
7
+ # SAM-Med3D \[[Paper](https://arxiv.org/abs/2310.15161)] \[[Suppl](https://github.com/uni-medical/SAM-Med3D/blob/main/paper/SAM_Med3D_ECCV_Supplementary.pdf)\] \[[Data](https://drive.google.com/file/d/1F7lRWM5mdEKSRQtvJ8ExEyNrWIEkXc-G/view?usp=drive_link)\]
8
+ [![x](https://img.shields.io/badge/cs.CV-2310.15161-b31b1b?logo=arxiv&logoColor=red)](https://arxiv.org/abs/2310.15161)
9
+ [![x](https://img.shields.io/badge/WeChat-Group-green?logo=wechat)](https://github.com/uni-medical/SAM-Med3D/tree/main?tab=readme-ov-file#-discussion-group)
10
+
11
+
12
+ The official repo of "SAM-Med3D: Towards General-purpose Segmentation Models for Volumetric Medical Images".
13
+
14
+ <div align="center">
15
+ <img src="assets/motivation.png">
16
+ </div>
17
+
18
+ ## 🔥🌻📰 News 📰🌻🔥
19
+ - **[Challenge]** SAM-Med3D is invited as a baseline of [CVPR-MedSegFMCompetition](https://www.codabench.org/competitions/5263/) and the tutorial is [here](https://github.com/uni-medical/SAM-Med3D/tree/CVPR25_3DFM). We kindly invite you to join the challenge and build better foundation models for 3D medical image segmentation!
20
+ - **[Examples]** SAM-Med3D is now supported in [MedIM](https://github.com/uni-medical/MedIM), you can easily get our model with one-line Python code. Our new example is in [`medim_infer.py`](https://github.com/uni-medical/SAM-Med3D/blob/main/medim_infer.py).
21
+ - **[Data]** We have now released all labels of our training dataset SA-Med3D-140K. Due to the large volume of image data (over 1T), we are currently seeking an appropriate release method. For now, you can directly contact small_dark@sina.com to obtain it. Download Link: [Baidu Netdisk](https://pan.baidu.com/s/12Nxwd10uVZs57O8WP8Y-Hg?pwd=cv6t) and [Google Drive](https://drive.google.com/file/d/1F7lRWM5mdEKSRQtvJ8ExEyNrWIEkXc-G/view?usp=drive_link).
22
+ - **[Paper]** SAM-Med3D is accepted as [ECCV BIC 2024 Oral](https://www.bioimagecomputing.com/program/selected-contributions/)
23
+ - **[Model]** A newer version of finetuned SAM-Med3D named `SAM-Med3D-turbo` is released now. We fine-tuned it on 44 datasets ([list](https://github.com/uni-medical/SAM-Med3D/issues/2#issuecomment-1849002225)) to improve the performance. Hope this update can help you 🙂.
24
+ - **[Repos]** If you are interested in computer vision,
25
+ we recommend checking out [OpenGVLab](https://github.com/OpenGVLab) for more exciting projects like [SAM-Med2D](https://github.com/OpenGVLab/SAM-Med2D/tree/main)!
26
+
27
+ ## 🌟 Highlights
28
+ - 📚 Curated the most extensive volumetric medical dataset to date for training, boasting 143K 3D masks and 245 categories.
29
+ - 🚤 Achieved efficient promptable segmentation, requiring 10 to 100 times fewer prompt points for satisfactory 3D outcomes.
30
+ - 🏆 Conducted a thorough assessment of SAM-Med3D across 16 frequently used volumetric medical image segmentation datasets.
31
+
32
+ ## 🔗 Checkpoint
33
+
34
+ - **SAM-Med3D-turbo**: [Hugging Face](https://huggingface.co/blueyo0/SAM-Med3D/blob/main/sam_med3d_turbo.pth) | [Google Drive](https://drive.google.com/file/d/1MuqYRQKIZb4YPtEraK8zTKKpp-dUQIR9/view?usp=sharing) | [Baidu NetDisk (pwd:l6ol)](https://pan.baidu.com/s/1OEVtiDc6osG0l9HkQN4hEg?pwd=l6ol)
35
+
36
+ <details>
37
+ <summary>More</summary>
38
+
39
+ - **SAM-Med3D-base**:
40
+ [Google Drive](https://drive.google.com/file/d/1PFeUjlFMAppllS9x1kAWyCYUJM9re2Ub/view?usp=drive_link) | [Baidu NetDisk (pwd: r5o3)](https://pan.baidu.com/s/18uhMXy_XO0yy3ODj66N8GQ?pwd=r5o3)
41
+ - **SAM-Med3D-organ**:
42
+ [Google Drive](https://drive.google.com/file/d/1kKpjIwCsUWQI-mYZ2Lww9WZXuJxc3FvU/view?usp=sharing) | [Baidu NetDisk (pwd: 5t7v)](https://pan.baidu.com/s/1Dermdr-ZN8NMWELejF1p1w?pwd=5t7v)
43
+ - **SAM-Med3D-brain**:
44
+ [Google Drive](https://drive.google.com/file/d/1otbhZs9uugSWkAbcQLLSmPB8jo5rzFL2/view?usp=sharing) | [Baidu NetDisk (pwd: yp42)](https://pan.baidu.com/s/1S2-buTga9D4Nbrt6fevo8Q?pwd=yp42)
45
+
46
+ Other checkpoints are available via their official links:
47
+ - [SAM](https://drive.google.com/file/d/1_U26MIJhWnWVwmI5JkGg2cd2J6MvkqU-/view?usp=drive_link)
48
+ - [SAM-Med2D](https://drive.google.com/file/d/1ARiB5RkSsWmAB_8mqWnwDF8ZKTtFwsjl/view?usp=drive_link)
49
+
50
+ </details>
51
+
52
+ ## 🔨 Usage
53
+ ### Quick Start for SAM-Med3D inference
54
+ > **Note:**
55
+ > Currently, labels are required to generate prompt points for inference.
56
+
57
+ First, set up your environment with the following commands:
58
+ ```
59
+ conda create --name sammed3d python=3.10
60
+ conda activate sammed3d
61
+ pip install light-the-torch && ltt install torch
62
+ pip install torchio opencv-python-headless matplotlib prefetch_generator monai edt medim
63
+ ```
64
+ (if encounter OMP issue in Win, please refer to [link](https://github.com/uni-medical/SAM-Med3D/issues/103))
65
+
66
+ Then, use [`medim_infer.py`](https://github.com/uni-medical/SAM-Med3D/blob/main/medim_infer.py) to test the inference:
67
+ ```
68
+ python medim_infer.py
69
+ ```
70
+
71
+ If you want to run inference on your own data, refer to [`medim_infer.py`](https://github.com/uni-medical/SAM-Med3D/blob/main/medim_infer.py) for more details. You can simply modify the paths in the script to use your own data. Here's the main logic:
72
+ ```
73
+ ''' 1. read and pre-process your input data '''
74
+ img_path = "./test_data/kidney_right/AMOS/imagesVal/amos_0013.nii.gz"
75
+ gt_path = "./test_data/kidney_right/AMOS/labelsVal/amos_0013.nii.gz"
76
+ category_index = 3 # the index of your target category in the gt annotation
77
+ output_dir = "./test_data/kidney_right/AMOS/pred/"
78
+ roi_image, roi_label, meta_info = data_preprocess(img_path, gt_path, category_index=category_index)
79
+
80
+ ''' 2. prepare the pre-trained model with local path or huggingface url '''
81
+ ckpt_path = "https://huggingface.co/blueyo0/SAM-Med3D/blob/main/sam_med3d_turbo.pth"
82
+ # or you can use the local path like: ckpt_path = "./ckpt/sam_med3d_turbo.pth"
83
+ model = medim.create_model("SAM-Med3D",
84
+ pretrained=True,
85
+ checkpoint_path=ckpt_path)
86
+
87
+ ''' 3. infer with the pre-trained SAM-Med3D model '''
88
+ roi_pred = sam_model_infer(model, roi_image, roi_gt=roi_label)
89
+
90
+ ''' 4. post-process and save the result '''
91
+ output_path = osp.join(output_dir, osp.basename(img_path).replace(".nii.gz", "_pred.nii.gz"))
92
+ data_postprocess(roi_pred, meta_info, output_path, img_path)
93
+
94
+ print("result saved to", output_path)
95
+ ```
96
+
97
+
98
+ ### Training / Fine-tuning
99
+ (we recommend fine-tuning with SAM-Med3D pre-trained weights from [link](https://github.com/uni-medical/SAM-Med3D#-checkpoint))
100
+
101
+ To train the SAM-Med3D model on your own data, follow these steps:
102
+
103
+ #### 0. **(Recommend) Prepare the Pre-trained Weights**
104
+
105
+ > Note: You can easily get PyTorch SAM-Med3D model with pre-trained weights from huggingface use `MedIM`.
106
+ > ```
107
+ > ckpt_path = "https://huggingface.co/blueyo0/SAM-Med3D/blob/main/sam_med3d_turbo.pth"
108
+ > model = medim.create_model("SAM-Med3D", pretrained=True, checkpoint_path=ckpt_path)
109
+ > ```
110
+
111
+ Download the checkpoint from [ckpt section](https://github.com/uni-medical/SAM-Med3D#-checkpoint) and move the pth file into `SAM_Med3D/ckpt/` (We recommand to use `SAM-Med3D-turbo.pth`).
112
+
113
+
114
+ #### 1. Prepare Your Training Data (from nnU-Net-style dataset):
115
+
116
+ Ensure that your training data is organized according to the structure shown in the `data/medical_preprocessed` directories. The target file structures should be like the following:
117
+ ```
118
+ data/medical_preprocessed
119
+ ├── adrenal
120
+ │ ├── ct_WORD
121
+ │ │ ├── imagesTr
122
+ │ │ │ ├── word_0025.nii.gz
123
+ │ │ │ ├── ...
124
+ │ │ ├── labelsTr
125
+ │ │ │ ├── word_0025.nii.gz
126
+ │ │ │ ├── ...
127
+ ├── ...
128
+ ```
129
+
130
+ > If the original data are in the **nnU-Net style**, follow these steps:
131
+ >
132
+ > For a nnU-Net style dataset, the original file structure should be:
133
+ > ```
134
+ > Task010_WORD
135
+ > ├── imagesTr
136
+ > │ ├── word_0025_0000.nii.gz
137
+ > │ ├── ...
138
+ > ├── labelsTr
139
+ > │ ├── word_0025.nii.gz
140
+ > │ ├── ...
141
+ > ```
142
+ > Then you should resample and convert the masks into binary. (You can use [script](https://github.com/uni-medical/SAM-Med3D/blob/b77585070b2f520ecd204b551a3f27715f5b3b43/utils/prepare_data_from_nnUNet.py) for nnU-Net folder)
143
+ > ```
144
+ > data/train
145
+ > ├── adrenal
146
+ > │ ├── ct_WORD
147
+ > │ │ ├── imagesTr
148
+ > │ │ │ ├── word_0025.nii.gz
149
+ > │ │ │ ├── ...
150
+ > │ │ ├── labelsTr
151
+ > │ │ │ ├── word_0025.nii.gz (binary label)
152
+ > │ │ │ ├── ...
153
+ > ├── liver
154
+ > │ ├── ct_WORD
155
+ > │ │ ├── imagesTr
156
+ > │ │ │ ├── word_0025.nii.gz
157
+ > │ │ │ ├── ...
158
+ > │ │ ├── labelsTr
159
+ > │ │ │ ├── word_0025.nii.gz (binary label)
160
+ > │ │ │ ├── ...
161
+ > ├── ...
162
+ > ```
163
+
164
+ Then, modify `img_datas` in `utils/data_paths.py` according to your own data.
165
+ ```
166
+ img_datas = [
167
+ "data/train/adrenal/ct_WORD",
168
+ "data/train/liver/ct_WORD",
169
+ ...
170
+ ]
171
+ ```
172
+ or
173
+ ```
174
+ PROJ_DIR = <YOUR PROJ DIR>
175
+ img_datas = glob(os.path.join(PROJ_DIR, "data", "train", "*", "*"))
176
+ ```
177
+
178
+
179
+ #### 2. **Run the Training Script**:
180
+ Run `bash train.sh` to execute the following command in your terminal:
181
+
182
+ ```
183
+ python train.py --multi_gpu --task_name ${tag}
184
+ ```
185
+ This will start the training process of the SAM-Med3D model on your prepared data. If you use only one GPU, remove the `--multi_gpu` flag.
186
+
187
+ The key options are listed below:
188
+
189
+ - task_name: task name
190
+ - checkpoint: pre-trained checkpoint
191
+ - work_dir: results folder for log and ckpt
192
+ - multi_gpu: use multiple GPU with DDP
193
+ - gpu_ids: set gpu ids used for training
194
+ - num_epochs: number of epoches
195
+ - batch_size: batch size for training
196
+ - lr: learning rate for training
197
+
198
+
199
+ **Hint**: Use the `--checkpoint` to set the pre-trained weight path, the model will be trained from scratch if no ckpt in the path is found!
200
+
201
+ ### Evaluation & Inference
202
+ Prepare your own dataset and refer to the samples in `data/validation` to replace them according to your specific scenario.
203
+ Then you can simply run `bash val.sh` to **quickly validate** SAM-Med3D on your data. Or you can use `bash infer.sh` to **generate full-volume results** for your application.
204
+ Make sure the masks are processed into the one-hot format (have only two values: the main image (foreground) and the background). We highly recommend using the spacing of `1.5mm` for the best experience.
205
+
206
+ ```
207
+ python validation.py --seed 2023\
208
+ -vp ./results/vis_sam_med3d \
209
+ -cp ./ckpt/sam_med3d_turbo.pth \
210
+ -tdp ./data/medical_preprocessed -nc 1 \
211
+ --save_name ./results/sam_med3d.py
212
+ ```
213
+
214
+ - vp: visualization path, dir to save the final visualization files
215
+ - cp: checkpoint path
216
+ - tdp: test data path, where your data is placed
217
+ - nc: number of clicks of prompt points
218
+ - save_name: filename to save evaluation results
219
+ - (optional) skip_existing_pred: skip and not predict if output file is found existing
220
+
221
+ **Sliding-window Inference (experimental)**: To extend the application scenario of SAM-Med3D and support more choices for full-volume inference. We provide the sliding-window mode here within `inference.py`.
222
+ ```
223
+ python inference.py --seed 2024\
224
+ -cp ./ckpt/sam_med3d_turbo.pth \
225
+ -tdp ./data/medical_preprocessed -nc 1 \
226
+ --output_dir ./results --task_name test_amos_move \
227
+ --sliding_window --save_image_and_gt
228
+ ```
229
+ - cp: checkpoint path
230
+ - tdp: test data path, where your data is placed
231
+ - output_dir&task_name: all your output will be saved to `<output_dir>/<task_name>`
232
+ - (optional) sliding_window: enable the sliding-window mode. model will infer 27 patches with improved accuracy and slower responce.
233
+ - (optional) save_image_and_gt: enable saving the full-volume image and ground-truth into `output_dir`, plz ensure your disk has enough free space when you turn on this
234
+
235
+ For validation of SAM and SAM-Med2D on 3D volumetric data, you can refer to `scripts/val_sam.sh` and `scripts/val_med2d.sh` for details.
236
+
237
+ Hint: We also provide a simple script `sum_result.py` to help summarize the results from files like `./results/sam_med3d.py`.
238
+
239
+ ## 🗼 Method
240
+ <div align="center">
241
+ <img src="assets/comparison.png">
242
+ </div>
243
+ <div align="center">
244
+ <img src="assets/architecture.png">
245
+ </div>
246
+
247
+ <!-- ## 🗓️ Ongoing
248
+ - [] Dataset release
249
+ - [x] Train code release
250
+ - [x] [Feature] Evaluation on 3D data with 2D models (slice-by-slice)
251
+ - [x] Evaluation code release
252
+ - [x] Pre-trained model release
253
+ - [x] Paper release -->
254
+
255
+ ## 📬 Citation
256
+ ```
257
+ @misc{wang2024sammed3dgeneralpurposesegmentationmodels,
258
+ title={SAM-Med3D: Towards General-purpose Segmentation Models for Volumetric Medical Images},
259
+ author={Haoyu Wang and Sizheng Guo and Jin Ye and Zhongying Deng and Junlong Cheng and Tianbin Li and Jianpin Chen and Yanzhou Su and Ziyan Huang and Yiqing Shen and Bin Fu and Shaoting Zhang and Junjun He and Yu Qiao},
260
+ year={2024},
261
+ eprint={2310.15161},
262
+ archivePrefix={arXiv},
263
+ primaryClass={cs.CV},
264
+ url={https://arxiv.org/abs/2310.15161},
265
+ }
266
+ ```
267
+
268
+ ## 🎫 License
269
+ This project is released under the [Apache 2.0 license](LICENSE).
270
+
271
+ ## 💬 Discussion Group
272
+ <p align="center"><img width="100" alt="image" src="assets/QRCode.jpg"></p>
273
+ (If the QRCode is expired, please contact the WeChat account: EugeneYonng or Small_dark8023,please note with "add sammed3d wechat"/请备注“sammed3d交流群”.)
274
+
275
+ BTW, welcome to follow our [Zhihu official account](https://www.zhihu.com/people/gmai-38), we will share more information on medical imaging there.
276
+
277
+ ## 🙏 Acknowledgement
278
+ - We thank all medical workers and dataset owners for making public datasets available to the community.
279
+ - Thanks to the open-source of the following projects:
280
+ - [Segment Anything](https://github.com/facebookresearch/segment-anything) &#8194;
281
+ - [SAM-Med2D](https://github.com/OpenGVLab/SAM-Med2D/tree/main)
282
+
283
+ ## 👋 Hiring & Global Collaboration
284
+ - **Hiring:** We are hiring researchers, engineers, and interns in General Vision Group, Shanghai AI Lab. If you are interested in Medical Foundation Models and General Medical AI, including designing benchmark datasets, general models, evaluation systems, and efficient tools, please contact us.
285
+ - **Global Collaboration:** We're on a mission to redefine medical research, aiming for a more universally adaptable model. Our passionate team is delving into foundational healthcare models, promoting the development of the medical community. Collaborate with us to increase competitiveness, reduce risk, and expand markets.
286
+ - **Contact:** Junjun He(hejunjun@pjlab.org.cn), Jin Ye(yejin@pjlab.org.cn), and Tianbin Li (litianbin@pjlab.org.cn).
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ torch
3
+ torchvision
4
+ SimpleITK
5
+ torchio
6
+ opencv-python
7
+ Pillow
8
+ gradio
sample.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import SimpleITK as sitk
2
+ import numpy as np
3
+ import os
4
+
5
+ def create_new_nifti_mask(output_nii_path, size=(256, 256, 128), mask_type="sphere"):
6
+ """
7
+ 创建一个指定大小的新NIFTI掩码文件
8
+
9
+ 参数:
10
+ output_nii_path: 输出的伪掩码路径
11
+ size: 掩码的尺寸,默认为(256, 256, 128)
12
+ mask_type: 掩码类型,可选"sphere"、"cube"或"random"
13
+ """
14
+ print(f"创建尺寸为{size}的伪掩码")
15
+
16
+ # 创建一个指定尺寸的零数组
17
+ mask_array = np.zeros(size, dtype=np.uint8)
18
+
19
+ if mask_type == "sphere":
20
+ # 创建一个球形掩码
21
+ center = [s // 2 for s in size]
22
+ radius = min(size) // 4
23
+
24
+ # 创建坐标网格
25
+ z, y, x = np.ogrid[:size[0], :size[1], :size[2]]
26
+
27
+ # 计算到中心的距离
28
+ dist_from_center = np.sqrt(
29
+ (z - center[0])**2 +
30
+ (y - center[1])**2 +
31
+ (x - center[2])**2
32
+ )
33
+
34
+ # 创建掩码
35
+ mask_array = (dist_from_center <= radius).astype(np.uint8)
36
+
37
+ elif mask_type == "cube":
38
+ # 创建一个立方体掩码
39
+ center = [s // 2 for s in size]
40
+ half_width = min(size) // 6
41
+
42
+ mask_array[
43
+ center[0]-half_width:center[0]+half_width,
44
+ center[1]-half_width:center[1]+half_width,
45
+ center[2]-half_width:center[2]+half_width
46
+ ] = 1
47
+
48
+ elif mask_type == "random":
49
+ # 创建一个具有随机形状的掩码
50
+ np.random.seed(42) # 设置随机数种子以便结果可复现
51
+
52
+ # 创建几个随机椭球
53
+ for _ in range(3):
54
+ center = [np.random.randint(s//4, 3*s//4) for s in size]
55
+ radii = [s//10 + np.random.randint(s//8) for s in size]
56
+
57
+ z, y, x = np.ogrid[:size[0], :size[1], :size[2]]
58
+
59
+ dist_from_center = np.sqrt(
60
+ ((z - center[0])/radii[0])**2 +
61
+ ((y - center[1])/radii[1])**2 +
62
+ ((x - center[2])/radii[2])**2
63
+ )
64
+
65
+ mask_array = np.logical_or(mask_array, dist_from_center <= 1)
66
+
67
+ # 转换为SimpleITK图像
68
+ mask_image = sitk.GetImageFromArray(mask_array.astype(np.uint8))
69
+
70
+ # 设置默认的元数据
71
+ mask_image.SetSpacing((1.0, 1.0, 1.0)) # 1mm 各向同性间距
72
+ mask_image.SetOrigin((0.0, 0.0, 0.0)) # 默认原点
73
+ direction = tuple([1.0 if i == j else 0.0 for i in range(3) for j in range(3)]) # 默认方向
74
+ mask_image.SetDirection(direction)
75
+
76
+ # 确保输出目录存在
77
+ os.makedirs(os.path.dirname(output_nii_path), exist_ok=True)
78
+
79
+ # 保存掩码
80
+ sitk.WriteImage(mask_image, output_nii_path)
81
+ print(f"伪掩码已保存至: {output_nii_path}")
82
+
83
+ return mask_array
84
+
85
+ # 使用相对路径
86
+ input_path = "test_data/kidney_right/AMOS/imagesVal/test.nii.gz"
87
+ output_path = "test_data/kidney_right/AMOS/labelsVal/test.nii.gz"
88
+
89
+ # 确保父目录存在
90
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
91
+
92
+ # 创建新的掩码(尺寸可根据需要调整)
93
+ create_new_nifti_mask(output_path, size=(128, 128, 128), mask_type="sphere")
scripts/val_default.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python validation.py --seed 2023\
2
+ -vp ./results/vis_sam_med3d \
3
+ -cp ./ckpt/sam_med3d.pth \
4
+ -tdp ./data/validation_test1 -nc 10 \
5
+ --save_name ./results/sam_med3d.py
scripts/val_med2d.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python validation.py --seed 2023\
2
+ -vp ./results/vis_sam_med2d \
3
+ -cp ./ckpt/sam_med2d.pth \
4
+ -tdp ./data/validation_test1 -nc 10 \
5
+ --image_size 256 -mt vit_b --dim 2 --save_name ./results/sam_med2d.py --ft2d
scripts/val_sam.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python validation.py --seed 2023\
2
+ -vp ./results/vis_sam_vit_b \
3
+ -cp ./ckpt/sam_vit_b.pth \
4
+ -tdp ./data/validation_test1 -nc 10 \
5
+ --image_size 1024 -mt vit_b --dim 2 --save_name ./results/sam_vit_b.py
segment_anything/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .build_sam3D import *
2
+ from utils import *
3
+ from .build_sam import (
4
+ build_sam,
5
+ build_sam_vit_h,
6
+ build_sam_vit_l,
7
+ build_sam_vit_b,
8
+ sam_model_registry,
9
+ )
10
+ from .predictor import SamPredictor
11
+ from .automatic_mask_generator import SamAutomaticMaskGenerator
segment_anything/automatic_mask_generator.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
10
+
11
+ from typing import Any, Dict, List, Optional, Tuple
12
+
13
+ from .modeling import Sam
14
+ from .predictor import SamPredictor
15
+ from .utils.amg import (
16
+ MaskData,
17
+ area_from_rle,
18
+ batch_iterator,
19
+ batched_mask_to_box,
20
+ box_xyxy_to_xywh,
21
+ build_all_layer_point_grids,
22
+ calculate_stability_score,
23
+ coco_encode_rle,
24
+ generate_crop_boxes,
25
+ is_box_near_crop_edge,
26
+ mask_to_rle_pytorch,
27
+ remove_small_regions,
28
+ rle_to_mask,
29
+ uncrop_boxes_xyxy,
30
+ uncrop_masks,
31
+ uncrop_points,
32
+ )
33
+
34
+
35
+ class SamAutomaticMaskGenerator:
36
+ def __init__(
37
+ self,
38
+ model: Sam,
39
+ points_per_side: Optional[int] = 32,
40
+ points_per_batch: int = 64,
41
+ pred_iou_thresh: float = 0.88,
42
+ stability_score_thresh: float = 0.95,
43
+ stability_score_offset: float = 1.0,
44
+ box_nms_thresh: float = 0.7,
45
+ crop_n_layers: int = 0,
46
+ crop_nms_thresh: float = 0.7,
47
+ crop_overlap_ratio: float = 512 / 1500,
48
+ crop_n_points_downscale_factor: int = 1,
49
+ point_grids: Optional[List[np.ndarray]] = None,
50
+ min_mask_region_area: int = 0,
51
+ output_mode: str = "binary_mask",
52
+ ) -> None:
53
+ """
54
+ Using a SAM model, generates masks for the entire image.
55
+ Generates a grid of point prompts over the image, then filters
56
+ low quality and duplicate masks. The default settings are chosen
57
+ for SAM with a ViT-H backbone.
58
+
59
+ Arguments:
60
+ model (Sam): The SAM model to use for mask prediction.
61
+ points_per_side (int or None): The number of points to be sampled
62
+ along one side of the image. The total number of points is
63
+ points_per_side**2. If None, 'point_grids' must provide explicit
64
+ point sampling.
65
+ points_per_batch (int): Sets the number of points run simultaneously
66
+ by the model. Higher numbers may be faster but use more GPU memory.
67
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
68
+ model's predicted mask quality.
69
+ stability_score_thresh (float): A filtering threshold in [0,1], using
70
+ the stability of the mask under changes to the cutoff used to binarize
71
+ the model's mask predictions.
72
+ stability_score_offset (float): The amount to shift the cutoff when
73
+ calculated the stability score.
74
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
75
+ suppression to filter duplicate masks.
76
+ crop_n_layers (int): If >0, mask prediction will be run again on
77
+ crops of the image. Sets the number of layers to run, where each
78
+ layer has 2**i_layer number of image crops.
79
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
80
+ suppression to filter duplicate masks between different crops.
81
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
82
+ In the first crop layer, crops will overlap by this fraction of
83
+ the image length. Later layers with more crops scale down this overlap.
84
+ crop_n_points_downscale_factor (int): The number of points-per-side
85
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
86
+ point_grids (list(np.ndarray) or None): A list over explicit grids
87
+ of points used for sampling, normalized to [0,1]. The nth grid in the
88
+ list is used in the nth crop layer. Exclusive with points_per_side.
89
+ min_mask_region_area (int): If >0, postprocessing will be applied
90
+ to remove disconnected regions and holes in masks with area smaller
91
+ than min_mask_region_area. Requires opencv.
92
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
93
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
94
+ For large resolutions, 'binary_mask' may consume large amounts of
95
+ memory.
96
+ """
97
+
98
+ assert (points_per_side is None) != (
99
+ point_grids is None
100
+ ), "Exactly one of points_per_side or point_grid must be provided."
101
+ if points_per_side is not None:
102
+ self.point_grids = build_all_layer_point_grids(
103
+ points_per_side,
104
+ crop_n_layers,
105
+ crop_n_points_downscale_factor,
106
+ )
107
+ elif point_grids is not None:
108
+ self.point_grids = point_grids
109
+ else:
110
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
111
+
112
+ assert output_mode in [
113
+ "binary_mask",
114
+ "uncompressed_rle",
115
+ "coco_rle",
116
+ ], f"Unknown output_mode {output_mode}."
117
+ if output_mode == "coco_rle":
118
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
119
+
120
+ if min_mask_region_area > 0:
121
+ import cv2 # type: ignore # noqa: F401
122
+
123
+ self.predictor = SamPredictor(model)
124
+ self.points_per_batch = points_per_batch
125
+ self.pred_iou_thresh = pred_iou_thresh
126
+ self.stability_score_thresh = stability_score_thresh
127
+ self.stability_score_offset = stability_score_offset
128
+ self.box_nms_thresh = box_nms_thresh
129
+ self.crop_n_layers = crop_n_layers
130
+ self.crop_nms_thresh = crop_nms_thresh
131
+ self.crop_overlap_ratio = crop_overlap_ratio
132
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
133
+ self.min_mask_region_area = min_mask_region_area
134
+ self.output_mode = output_mode
135
+
136
+ @torch.no_grad()
137
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
138
+ """
139
+ Generates masks for the given image.
140
+
141
+ Arguments:
142
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
143
+
144
+ Returns:
145
+ list(dict(str, any)): A list over records for masks. Each record is
146
+ a dict containing the following keys:
147
+ segmentation (dict(str, any) or np.ndarray): The mask. If
148
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
149
+ is a dictionary containing the RLE.
150
+ bbox (list(float)): The box around the mask, in XYWH format.
151
+ area (int): The area in pixels of the mask.
152
+ predicted_iou (float): The model's own prediction of the mask's
153
+ quality. This is filtered by the pred_iou_thresh parameter.
154
+ point_coords (list(list(float))): The point coordinates input
155
+ to the model to generate this mask.
156
+ stability_score (float): A measure of the mask's quality. This
157
+ is filtered on using the stability_score_thresh parameter.
158
+ crop_box (list(float)): The crop of the image used to generate
159
+ the mask, given in XYWH format.
160
+ """
161
+
162
+ # Generate masks
163
+ mask_data = self._generate_masks(image)
164
+
165
+ # Filter small disconnected regions and holes in masks
166
+ if self.min_mask_region_area > 0:
167
+ mask_data = self.postprocess_small_regions(
168
+ mask_data,
169
+ self.min_mask_region_area,
170
+ max(self.box_nms_thresh, self.crop_nms_thresh),
171
+ )
172
+
173
+ # Encode masks
174
+ if self.output_mode == "coco_rle":
175
+ mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
176
+ elif self.output_mode == "binary_mask":
177
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
178
+ else:
179
+ mask_data["segmentations"] = mask_data["rles"]
180
+
181
+ # Write mask records
182
+ curr_anns = []
183
+ for idx in range(len(mask_data["segmentations"])):
184
+ ann = {
185
+ "segmentation": mask_data["segmentations"][idx],
186
+ "area": area_from_rle(mask_data["rles"][idx]),
187
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
188
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
189
+ "point_coords": [mask_data["points"][idx].tolist()],
190
+ "stability_score": mask_data["stability_score"][idx].item(),
191
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
192
+ }
193
+ curr_anns.append(ann)
194
+
195
+ return curr_anns
196
+
197
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
198
+ orig_size = image.shape[:2]
199
+ crop_boxes, layer_idxs = generate_crop_boxes(
200
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
201
+ )
202
+
203
+ # Iterate over image crops
204
+ data = MaskData()
205
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
206
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
207
+ data.cat(crop_data)
208
+
209
+ # Remove duplicate masks between crops
210
+ if len(crop_boxes) > 1:
211
+ # Prefer masks from smaller crops
212
+ scores = 1 / box_area(data["crop_boxes"])
213
+ scores = scores.to(data["boxes"].device)
214
+ keep_by_nms = batched_nms(
215
+ data["boxes"].float(),
216
+ scores,
217
+ torch.zeros_like(data["boxes"][:, 0]), # categories
218
+ iou_threshold=self.crop_nms_thresh,
219
+ )
220
+ data.filter(keep_by_nms)
221
+
222
+ data.to_numpy()
223
+ return data
224
+
225
+ def _process_crop(
226
+ self,
227
+ image: np.ndarray,
228
+ crop_box: List[int],
229
+ crop_layer_idx: int,
230
+ orig_size: Tuple[int, ...],
231
+ ) -> MaskData:
232
+ # Crop the image and calculate embeddings
233
+ x0, y0, x1, y1 = crop_box
234
+ cropped_im = image[y0:y1, x0:x1, :]
235
+ cropped_im_size = cropped_im.shape[:2]
236
+ self.predictor.set_image(cropped_im)
237
+
238
+ # Get points for this crop
239
+ points_scale = np.array(cropped_im_size)[None, ::-1]
240
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
241
+
242
+ # Generate masks for this crop in batches
243
+ data = MaskData()
244
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
245
+ batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)
246
+ data.cat(batch_data)
247
+ del batch_data
248
+ self.predictor.reset_image()
249
+
250
+ # Remove duplicates within this crop.
251
+ keep_by_nms = batched_nms(
252
+ data["boxes"].float(),
253
+ data["iou_preds"],
254
+ torch.zeros_like(data["boxes"][:, 0]), # categories
255
+ iou_threshold=self.box_nms_thresh,
256
+ )
257
+ data.filter(keep_by_nms)
258
+
259
+ # Return to the original image frame
260
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
261
+ data["points"] = uncrop_points(data["points"], crop_box)
262
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
263
+
264
+ return data
265
+
266
+ def _process_batch(
267
+ self,
268
+ points: np.ndarray,
269
+ im_size: Tuple[int, ...],
270
+ crop_box: List[int],
271
+ orig_size: Tuple[int, ...],
272
+ ) -> MaskData:
273
+ orig_h, orig_w = orig_size
274
+
275
+ # Run model on this batch
276
+ transformed_points = self.predictor.transform.apply_coords(points, im_size)
277
+ in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
278
+ in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
279
+ masks, iou_preds, _ = self.predictor.predict_torch(
280
+ in_points[:, None, :],
281
+ in_labels[:, None],
282
+ multimask_output=True,
283
+ return_logits=True,
284
+ )
285
+
286
+ # Serialize predictions and store in MaskData
287
+ data = MaskData(
288
+ masks=masks.flatten(0, 1),
289
+ iou_preds=iou_preds.flatten(0, 1),
290
+ points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
291
+ )
292
+ del masks
293
+
294
+ # Filter by predicted IoU
295
+ if self.pred_iou_thresh > 0.0:
296
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
297
+ data.filter(keep_mask)
298
+
299
+ # Calculate stability score
300
+ data["stability_score"] = calculate_stability_score(
301
+ data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
302
+ )
303
+ if self.stability_score_thresh > 0.0:
304
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
305
+ data.filter(keep_mask)
306
+
307
+ # Threshold masks and calculate boxes
308
+ data["masks"] = data["masks"] > self.predictor.model.mask_threshold
309
+ data["boxes"] = batched_mask_to_box(data["masks"])
310
+
311
+ # Filter boxes that touch crop boundaries
312
+ keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
313
+ if not torch.all(keep_mask):
314
+ data.filter(keep_mask)
315
+
316
+ # Compress to RLE
317
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
318
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
319
+ del data["masks"]
320
+
321
+ return data
322
+
323
+ @staticmethod
324
+ def postprocess_small_regions(
325
+ mask_data: MaskData, min_area: int, nms_thresh: float
326
+ ) -> MaskData:
327
+ """
328
+ Removes small disconnected regions and holes in masks, then reruns
329
+ box NMS to remove any new duplicates.
330
+
331
+ Edits mask_data in place.
332
+
333
+ Requires open-cv as a dependency.
334
+ """
335
+ if len(mask_data["rles"]) == 0:
336
+ return mask_data
337
+
338
+ # Filter small disconnected regions and holes
339
+ new_masks = []
340
+ scores = []
341
+ for rle in mask_data["rles"]:
342
+ mask = rle_to_mask(rle)
343
+
344
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
345
+ unchanged = not changed
346
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
347
+ unchanged = unchanged and not changed
348
+
349
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
350
+ # Give score=0 to changed masks and score=1 to unchanged masks
351
+ # so NMS will prefer ones that didn't need postprocessing
352
+ scores.append(float(unchanged))
353
+
354
+ # Recalculate boxes and remove any new duplicates
355
+ masks = torch.cat(new_masks, dim=0)
356
+ boxes = batched_mask_to_box(masks)
357
+ keep_by_nms = batched_nms(
358
+ boxes.float(),
359
+ torch.as_tensor(scores),
360
+ torch.zeros_like(boxes[:, 0]), # categories
361
+ iou_threshold=nms_thresh,
362
+ )
363
+
364
+ # Only recalculate RLEs for masks that have changed
365
+ for i_mask in keep_by_nms:
366
+ if scores[i_mask] == 0.0:
367
+ mask_torch = masks[i_mask].unsqueeze(0)
368
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
369
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
370
+ mask_data.filter(keep_by_nms)
371
+
372
+ return mask_data
segment_anything/build_sam.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from functools import partial
9
+ from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
10
+ from torch.nn import functional as F
11
+
12
+ def build_sam_vit_h(args):
13
+ return _build_sam(
14
+ encoder_embed_dim=1280,
15
+ encoder_depth=32,
16
+ encoder_num_heads=16,
17
+ encoder_global_attn_indexes=[7, 15, 23, 31],
18
+ image_size=args.image_size,
19
+ checkpoint=args.sam_checkpoint,
20
+ )
21
+
22
+
23
+ build_sam = build_sam_vit_h
24
+
25
+
26
+ def build_sam_vit_l(args):
27
+ return _build_sam(
28
+ encoder_embed_dim=1024,
29
+ encoder_depth=24,
30
+ encoder_num_heads=16,
31
+ encoder_global_attn_indexes=[5, 11, 17, 23],
32
+ image_size=args.image_size,
33
+ checkpoint=args.sam_checkpoint,
34
+ )
35
+
36
+
37
+ def build_sam_vit_b(args):
38
+ return _build_sam(
39
+ encoder_embed_dim=768,
40
+ encoder_depth=12,
41
+ encoder_num_heads=12,
42
+ encoder_global_attn_indexes=[2, 5, 8, 11],
43
+ image_size=args.image_size,
44
+ checkpoint=args.sam_checkpoint,
45
+
46
+ )
47
+
48
+
49
+ sam_model_registry = {
50
+ "default": build_sam_vit_h,
51
+ "vit_h": build_sam_vit_h,
52
+ "vit_l": build_sam_vit_l,
53
+ "vit_b": build_sam_vit_b,
54
+ }
55
+
56
+
57
+ def _build_sam(
58
+ encoder_embed_dim,
59
+ encoder_depth,
60
+ encoder_num_heads,
61
+ encoder_global_attn_indexes,
62
+ image_size,
63
+ checkpoint,
64
+ ):
65
+ prompt_embed_dim = 256
66
+ image_size = image_size
67
+ vit_patch_size = 16
68
+ image_embedding_size = image_size // vit_patch_size
69
+ sam = Sam(
70
+ image_encoder=ImageEncoderViT(
71
+ depth=encoder_depth,
72
+ embed_dim=encoder_embed_dim,
73
+ img_size=image_size,
74
+ mlp_ratio=4,
75
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
76
+ num_heads=encoder_num_heads,
77
+ patch_size=vit_patch_size,
78
+ qkv_bias=True,
79
+ use_rel_pos = True,
80
+ global_attn_indexes=encoder_global_attn_indexes,
81
+ window_size=14,
82
+ out_chans=prompt_embed_dim,
83
+ ),
84
+ prompt_encoder=PromptEncoder(
85
+ embed_dim=prompt_embed_dim,
86
+ image_embedding_size=(image_embedding_size, image_embedding_size),
87
+ input_image_size=(image_size, image_size),
88
+ mask_in_chans=16,
89
+ ),
90
+ mask_decoder=MaskDecoder(
91
+ num_multimask_outputs=3,
92
+ transformer=TwoWayTransformer(
93
+ depth=2,
94
+ embedding_dim=prompt_embed_dim,
95
+ mlp_dim=2048,
96
+ num_heads=8,
97
+ ),
98
+ transformer_dim=prompt_embed_dim,
99
+ iou_head_depth=3,
100
+ iou_head_hidden_dim=256,
101
+ ),
102
+ pixel_mean=[123.675, 116.28, 103.53],
103
+ pixel_std=[58.395, 57.12, 57.375],
104
+ )
105
+ sam.train()
106
+ if checkpoint is not None:
107
+ with open(checkpoint, "rb") as f:
108
+ state_dict = torch.load(f)
109
+ try:
110
+ if 'model' in state_dict.keys():
111
+ sam.load_state_dict(state_dict['model'])
112
+ else:
113
+ sam.load_state_dict(state_dict)
114
+ except:
115
+ print('*******interpolate')
116
+ new_state_dict = load_from(sam, state_dict, image_size, vit_patch_size)
117
+ sam.load_state_dict(new_state_dict)
118
+ print(f"*******load {checkpoint}")
119
+
120
+ return sam
121
+
122
+
123
+ def load_from(sam, state_dicts, image_size, vit_patch_size):
124
+ sam_dict = sam.state_dict()
125
+ except_keys = ['mask_tokens', 'output_hypernetworks_mlps', 'iou_prediction_head']
126
+ new_state_dict = {k: v for k, v in state_dicts.items() if
127
+ k in sam_dict.keys() and except_keys[0] not in k and except_keys[1] not in k and except_keys[2] not in k}
128
+ pos_embed = new_state_dict['image_encoder.pos_embed']
129
+ token_size = int(image_size // vit_patch_size)
130
+ if pos_embed.shape[1] != token_size:
131
+ # resize pos embedding, which may sacrifice the performance, but I have no better idea
132
+ pos_embed = pos_embed.permute(0, 3, 1, 2) # [b, c, h, w]
133
+ pos_embed = F.interpolate(pos_embed, (token_size, token_size), mode='bilinear', align_corners=False)
134
+ pos_embed = pos_embed.permute(0, 2, 3, 1) # [b, h, w, c]
135
+ new_state_dict['image_encoder.pos_embed'] = pos_embed
136
+ rel_pos_keys = [k for k in sam_dict.keys() if 'rel_pos' in k]
137
+
138
+ global_rel_pos_keys = [k for k in rel_pos_keys if
139
+ '2' in k or
140
+ '5' in k or
141
+ '7' in k or
142
+ '8' in k or
143
+ '11' in k or
144
+ '13' in k or
145
+ '15' in k or
146
+ '23' in k or
147
+ '31' in k]
148
+ # print(sam_dict)
149
+ for k in global_rel_pos_keys:
150
+ h_check, w_check = sam_dict[k].shape
151
+ rel_pos_params = new_state_dict[k]
152
+ h, w = rel_pos_params.shape
153
+ rel_pos_params = rel_pos_params.unsqueeze(0).unsqueeze(0)
154
+ if h != h_check or w != w_check:
155
+ rel_pos_params = F.interpolate(rel_pos_params, (h_check, w_check), mode='bilinear', align_corners=False)
156
+
157
+ new_state_dict[k] = rel_pos_params[0, 0, ...]
158
+
159
+ sam_dict.update(new_state_dict)
160
+ return sam_dict
161
+
segment_anything/build_sam3D.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+
9
+ from functools import partial
10
+
11
+ from .modeling import ImageEncoderViT3D, MaskDecoder3D, PromptEncoder3D, Sam3D
12
+
13
+ def build_sam3D_vit_h(checkpoint=None):
14
+ return _build_sam3D(
15
+ encoder_embed_dim=1280,
16
+ encoder_depth=32,
17
+ encoder_num_heads=16,
18
+ encoder_global_attn_indexes=[7, 15, 23, 31],
19
+ checkpoint=checkpoint,
20
+ )
21
+
22
+
23
+ build_sam3D = build_sam3D_vit_h
24
+
25
+
26
+ def build_sam3D_vit_l(checkpoint=None):
27
+ return _build_sam3D(
28
+ encoder_embed_dim=1024,
29
+ encoder_depth=24,
30
+ encoder_num_heads=16,
31
+ encoder_global_attn_indexes=[5, 11, 17, 23],
32
+ checkpoint=checkpoint,
33
+ )
34
+
35
+
36
+ def build_sam3D_vit_b(checkpoint=None):
37
+ return _build_sam3D(
38
+ # encoder_embed_dim=768,
39
+ encoder_embed_dim=384,
40
+ encoder_depth=12,
41
+ encoder_num_heads=12,
42
+ encoder_global_attn_indexes=[2, 5, 8, 11],
43
+ checkpoint=checkpoint,
44
+ )
45
+
46
+ def build_sam3D_vit_b_ori(checkpoint=None):
47
+ return _build_sam3D_ori(
48
+ encoder_embed_dim=768,
49
+ encoder_depth=12,
50
+ encoder_num_heads=12,
51
+ encoder_global_attn_indexes=[2, 5, 8, 11],
52
+ checkpoint=checkpoint,
53
+ )
54
+
55
+
56
+ sam_model_registry3D = {
57
+ "default": build_sam3D_vit_h,
58
+ "vit_h": build_sam3D_vit_h,
59
+ "vit_l": build_sam3D_vit_l,
60
+ "vit_b": build_sam3D_vit_b,
61
+ "vit_b_ori": build_sam3D_vit_b_ori,
62
+ }
63
+
64
+
65
+
66
+ def _build_sam3D(
67
+ encoder_embed_dim,
68
+ encoder_depth,
69
+ encoder_num_heads,
70
+ encoder_global_attn_indexes,
71
+ checkpoint=None,
72
+ ):
73
+ prompt_embed_dim = 384
74
+ image_size = 256
75
+ vit_patch_size = 16
76
+ image_embedding_size = image_size // vit_patch_size
77
+ sam = Sam3D(
78
+ image_encoder=ImageEncoderViT3D(
79
+ depth=encoder_depth,
80
+ embed_dim=encoder_embed_dim,
81
+ img_size=image_size,
82
+ mlp_ratio=4,
83
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
84
+ num_heads=encoder_num_heads,
85
+ patch_size=vit_patch_size,
86
+ qkv_bias=True,
87
+ use_rel_pos=True,
88
+ global_attn_indexes=encoder_global_attn_indexes,
89
+ window_size=14,
90
+ out_chans=prompt_embed_dim,
91
+ ),
92
+ prompt_encoder=PromptEncoder3D(
93
+ embed_dim=prompt_embed_dim,
94
+ image_embedding_size=(image_embedding_size, image_embedding_size, image_embedding_size),
95
+ input_image_size=(image_size, image_size, image_size),
96
+ mask_in_chans=16,
97
+ ),
98
+ mask_decoder=MaskDecoder3D(
99
+ num_multimask_outputs=3,
100
+ transformer_dim=prompt_embed_dim,
101
+ iou_head_depth=3,
102
+ iou_head_hidden_dim=256,
103
+ ),
104
+ pixel_mean=[123.675, 116.28, 103.53],
105
+ pixel_std=[58.395, 57.12, 57.375],
106
+ )
107
+ sam.eval()
108
+ if checkpoint is not None:
109
+ with open(checkpoint, "rb") as f:
110
+ state_dict = torch.load(f)
111
+ sam.load_state_dict(state_dict)
112
+ return sam
113
+
114
+
115
+ def _build_sam3D_ori(
116
+ encoder_embed_dim,
117
+ encoder_depth,
118
+ encoder_num_heads,
119
+ encoder_global_attn_indexes,
120
+ checkpoint=None,
121
+ ):
122
+ prompt_embed_dim = 384
123
+ image_size = 128
124
+ vit_patch_size = 16
125
+ image_embedding_size = image_size // vit_patch_size
126
+ sam = Sam3D(
127
+ image_encoder=ImageEncoderViT3D(
128
+ depth=encoder_depth,
129
+ embed_dim=encoder_embed_dim,
130
+ img_size=image_size,
131
+ mlp_ratio=4,
132
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
133
+ num_heads=encoder_num_heads,
134
+ patch_size=vit_patch_size,
135
+ qkv_bias=True,
136
+ use_rel_pos=True,
137
+ global_attn_indexes=encoder_global_attn_indexes,
138
+ window_size=14,
139
+ out_chans=prompt_embed_dim,
140
+ ),
141
+ prompt_encoder=PromptEncoder3D(
142
+ embed_dim=prompt_embed_dim,
143
+ image_embedding_size=(image_embedding_size, image_embedding_size, image_embedding_size),
144
+ input_image_size=(image_size, image_size, image_size),
145
+ mask_in_chans=16,
146
+ ),
147
+ mask_decoder=MaskDecoder3D(
148
+ num_multimask_outputs=3,
149
+ transformer_dim=prompt_embed_dim,
150
+ iou_head_depth=3,
151
+ iou_head_hidden_dim=256,
152
+ ),
153
+ pixel_mean=[123.675, 116.28, 103.53],
154
+ pixel_std=[58.395, 57.12, 57.375],
155
+ )
156
+ sam.eval()
157
+ if checkpoint is not None:
158
+ with open(checkpoint, "rb") as f:
159
+ state_dict = torch.load(f)
160
+ sam.load_state_dict(state_dict)
161
+ return sam
segment_anything/modeling/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .sam3D import Sam3D
2
+ from .image_encoder3D import ImageEncoderViT3D
3
+ from .mask_decoder3D import MaskDecoder3D, TwoWayTransformer3D
4
+ from .prompt_encoder3D import PromptEncoder3D
5
+
6
+ from .sam_model import Sam
7
+ from .image_encoder import ImageEncoderViT
8
+ from .mask_decoder import MaskDecoder
9
+ from .prompt_encoder import PromptEncoder
10
+ from .transformer import TwoWayTransformer
segment_anything/modeling/common.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from typing import Type
11
+
12
+
13
+ class MLPBlock(nn.Module):
14
+ def __init__(
15
+ self,
16
+ embedding_dim: int,
17
+ mlp_dim: int,
18
+ act: Type[nn.Module] = nn.GELU,
19
+ ) -> None:
20
+ super().__init__()
21
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
22
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
23
+ self.act = act()
24
+
25
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
26
+ return self.lin2(self.act(self.lin1(x)))
27
+
28
+
29
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
30
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
31
+ class LayerNorm2d(nn.Module):
32
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
33
+ super().__init__()
34
+ self.weight = nn.Parameter(torch.ones(num_channels))
35
+ self.bias = nn.Parameter(torch.zeros(num_channels))
36
+ self.eps = eps
37
+
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ u = x.mean(1, keepdim=True)
40
+ s = (x - u).pow(2).mean(1, keepdim=True)
41
+ x = (x - u) / torch.sqrt(s + self.eps)
42
+ y = self.weight[:, None, None] * x
43
+ # y = torch.mul(self.weight[:, None, None], x)
44
+ x = y + self.bias[:, None, None]
45
+ return x
segment_anything/modeling/image_encoder.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from typing import Optional, Tuple, Type
12
+
13
+ from .common import LayerNorm2d, MLPBlock
14
+
15
+
16
+ # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
17
+ class ImageEncoderViT(nn.Module):
18
+ def __init__(
19
+ self,
20
+ img_size: int = 1024,
21
+ patch_size: int = 16,
22
+ in_chans: int = 3,
23
+ embed_dim: int = 768,
24
+ depth: int = 12,
25
+ num_heads: int = 12,
26
+ mlp_ratio: float = 4.0,
27
+ out_chans: int = 256,
28
+ qkv_bias: bool = True,
29
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
30
+ act_layer: Type[nn.Module] = nn.GELU,
31
+ use_abs_pos: bool = True,
32
+ use_rel_pos: bool = False,
33
+ rel_pos_zero_init: bool = True,
34
+ window_size: int = 0,
35
+ global_attn_indexes: Tuple[int, ...] = (),
36
+ ) -> None:
37
+ """
38
+ Args:
39
+ img_size (int): Input image size.
40
+ patch_size (int): Patch size.
41
+ in_chans (int): Number of input image channels.
42
+ embed_dim (int): Patch embedding dimension.
43
+ depth (int): Depth of ViT.
44
+ num_heads (int): Number of attention heads in each ViT block.
45
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
46
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
47
+ norm_layer (nn.Module): Normalization layer.
48
+ act_layer (nn.Module): Activation layer.
49
+ use_abs_pos (bool): If True, use absolute positional embeddings.
50
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
51
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
52
+ window_size (int): Window size for window attention blocks.
53
+ global_attn_indexes (list): Indexes for blocks using global attention.
54
+ """
55
+ super().__init__()
56
+ self.img_size = img_size
57
+
58
+ self.patch_embed = PatchEmbed(
59
+ kernel_size=(patch_size, patch_size),
60
+ stride=(patch_size, patch_size),
61
+ in_chans=in_chans,
62
+ embed_dim=embed_dim,
63
+ )
64
+
65
+ self.pos_embed: Optional[nn.Parameter] = None
66
+ if use_abs_pos:
67
+ # Initialize absolute positional embedding with pretrain image size.
68
+ self.pos_embed = nn.Parameter(
69
+ torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
70
+ )
71
+
72
+
73
+ self.blocks = nn.ModuleList()
74
+ for i in range(depth):
75
+ block = Block(
76
+ dim=embed_dim,
77
+ num_heads=num_heads,
78
+ mlp_ratio=mlp_ratio,
79
+ qkv_bias=qkv_bias,
80
+ norm_layer=norm_layer,
81
+ act_layer=act_layer,
82
+ use_rel_pos=use_rel_pos,
83
+ rel_pos_zero_init=rel_pos_zero_init,
84
+ window_size=window_size if i not in global_attn_indexes else 0,
85
+ input_size=(img_size // patch_size, img_size // patch_size),
86
+ )
87
+ self.blocks.append(block)
88
+
89
+ self.neck = nn.Sequential(
90
+ nn.Conv2d(
91
+ embed_dim,
92
+ out_chans,
93
+ kernel_size=1,
94
+ bias=False,
95
+ ),
96
+ LayerNorm2d(out_chans),
97
+ nn.Conv2d(
98
+ out_chans,
99
+ out_chans,
100
+ kernel_size=3,
101
+ padding=1,
102
+ bias=False,
103
+ ),
104
+ LayerNorm2d(out_chans),
105
+ )
106
+
107
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
108
+ x = self.patch_embed(x)
109
+ if self.pos_embed is not None:
110
+ x = x + self.pos_embed
111
+
112
+ for blk in self.blocks:
113
+ x = blk(x)
114
+
115
+
116
+ x = self.neck(x.permute(0, 3, 1, 2))
117
+
118
+ return x
119
+
120
+
121
+ class Block(nn.Module):
122
+ """Transformer blocks with support of window attention and residual propagation blocks"""
123
+
124
+ def __init__(
125
+ self,
126
+ dim: int,
127
+ num_heads: int,
128
+ mlp_ratio: float = 4.0,
129
+ qkv_bias: bool = True,
130
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
131
+ act_layer: Type[nn.Module] = nn.GELU,
132
+ use_rel_pos: bool = False,
133
+ rel_pos_zero_init: bool = True,
134
+ window_size: int = 0,
135
+ input_size: Optional[Tuple[int, int]] = None,
136
+ ) -> None:
137
+ """
138
+ Args:
139
+ dim (int): Number of input channels.
140
+ num_heads (int): Number of attention heads in each ViT block.
141
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
142
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
143
+ norm_layer (nn.Module): Normalization layer.
144
+ act_layer (nn.Module): Activation layer.
145
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
146
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
147
+ window_size (int): Window size for window attention blocks. If it equals 0, then
148
+ use global attention.
149
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
150
+ positional parameter size.
151
+ """
152
+ super().__init__()
153
+ self.norm1 = norm_layer(dim)
154
+ self.attn = Attention(
155
+ dim,
156
+ num_heads=num_heads,
157
+ qkv_bias=qkv_bias,
158
+ use_rel_pos=use_rel_pos,
159
+ rel_pos_zero_init=rel_pos_zero_init,
160
+ input_size=input_size if window_size == 0 else (window_size, window_size),
161
+ )
162
+
163
+ self.norm2 = norm_layer(dim)
164
+ self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
165
+
166
+ self.window_size = window_size
167
+
168
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
169
+ shortcut = x
170
+ x = self.norm1(x)
171
+ # Window partition
172
+ if self.window_size > 0:
173
+ H, W = x.shape[1], x.shape[2]
174
+ x, pad_hw = window_partition(x, self.window_size)
175
+
176
+ x = self.attn(x)
177
+ # Reverse window partition
178
+ if self.window_size > 0:
179
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
180
+
181
+ x = shortcut + x
182
+ x = x + self.mlp(self.norm2(x))
183
+
184
+ return x
185
+
186
+
187
+ class Attention(nn.Module):
188
+ """Multi-head Attention block with relative position embeddings."""
189
+
190
+ def __init__(
191
+ self,
192
+ dim: int,
193
+ num_heads: int = 8,
194
+ qkv_bias: bool = True,
195
+ use_rel_pos: bool = False,
196
+ rel_pos_zero_init: bool = True,
197
+ input_size: Optional[Tuple[int, int]] = None,
198
+ ) -> None:
199
+ """
200
+ Args:
201
+ dim (int): Number of input channels.
202
+ num_heads (int): Number of attention heads.
203
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
204
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
205
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
206
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
207
+ positional parameter size.
208
+ """
209
+ super().__init__()
210
+ self.num_heads = num_heads
211
+ head_dim = dim // num_heads
212
+ self.scale = head_dim**-0.5
213
+
214
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
215
+ self.proj = nn.Linear(dim, dim)
216
+
217
+ self.use_rel_pos = use_rel_pos
218
+ if self.use_rel_pos:
219
+ assert (
220
+ input_size is not None
221
+ ), "Input size must be provided if using relative positional encoding."
222
+ # initialize relative positional embeddings
223
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
224
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
225
+
226
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
227
+ B, H, W, _ = x.shape
228
+ # qkv with shape (3, B, nHead, H * W, C)
229
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
230
+ # q, k, v with shape (B * nHead, H * W, C)
231
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
232
+
233
+ attn = (q * self.scale) @ k.transpose(-2, -1)
234
+
235
+ if self.use_rel_pos:
236
+ attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
237
+
238
+ attn = attn.softmax(dim=-1)
239
+ x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
240
+ x = self.proj(x)
241
+
242
+ return x
243
+
244
+
245
+ def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
246
+ """
247
+ Partition into non-overlapping windows with padding if needed.
248
+ Args:
249
+ x (tensor): input tokens with [B, H, W, C].
250
+ window_size (int): window size.
251
+
252
+ Returns:
253
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
254
+ (Hp, Wp): padded height and width before partition
255
+ """
256
+ B, H, W, C = x.shape
257
+
258
+ pad_h = (window_size - H % window_size) % window_size
259
+ pad_w = (window_size - W % window_size) % window_size
260
+ if pad_h > 0 or pad_w > 0:
261
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
262
+ Hp, Wp = H + pad_h, W + pad_w
263
+
264
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
265
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
266
+ return windows, (Hp, Wp)
267
+
268
+
269
+ def window_unpartition(
270
+ windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
271
+ ) -> torch.Tensor:
272
+ """
273
+ Window unpartition into original sequences and removing padding.
274
+ Args:
275
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
276
+ window_size (int): window size.
277
+ pad_hw (Tuple): padded height and width (Hp, Wp).
278
+ hw (Tuple): original height and width (H, W) before padding.
279
+
280
+ Returns:
281
+ x: unpartitioned sequences with [B, H, W, C].
282
+ """
283
+ Hp, Wp = pad_hw
284
+ H, W = hw
285
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
286
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
287
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
288
+
289
+ if Hp > H or Wp > W:
290
+ x = x[:, :H, :W, :].contiguous()
291
+ return x
292
+
293
+
294
+ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
295
+ """
296
+ Get relative positional embeddings according to the relative positions of
297
+ query and key sizes.
298
+ Args:
299
+ q_size (int): size of query q.
300
+ k_size (int): size of key k.
301
+ rel_pos (Tensor): relative position embeddings (L, C).
302
+
303
+ Returns:
304
+ Extracted positional embeddings according to relative positions.
305
+ """
306
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
307
+ # Interpolate rel pos if needed.
308
+ if rel_pos.shape[0] != max_rel_dist:
309
+ # Interpolate rel pos.
310
+ rel_pos_resized = F.interpolate(
311
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
312
+ size=max_rel_dist,
313
+ mode="linear",
314
+ )
315
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
316
+ else:
317
+ rel_pos_resized = rel_pos
318
+
319
+ # Scale the coords with short length if shapes for q and k are different.
320
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
321
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
322
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
323
+
324
+ return rel_pos_resized[relative_coords.long()]
325
+
326
+
327
+ def add_decomposed_rel_pos(
328
+ attn: torch.Tensor,
329
+ q: torch.Tensor,
330
+ rel_pos_h: torch.Tensor,
331
+ rel_pos_w: torch.Tensor,
332
+ q_size: Tuple[int, int],
333
+ k_size: Tuple[int, int],
334
+ ) -> torch.Tensor:
335
+ """
336
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
337
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
338
+ Args:
339
+ attn (Tensor): attention map.
340
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
341
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
342
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
343
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
344
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
345
+
346
+ Returns:
347
+ attn (Tensor): attention map with added relative positional embeddings.
348
+ """
349
+ q_h, q_w = q_size
350
+ k_h, k_w = k_size
351
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
352
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
353
+
354
+ B, _, dim = q.shape
355
+ r_q = q.reshape(B, q_h, q_w, dim)
356
+
357
+ # r_q = r_q.to(torch.float) #todo opt_level="O2" 模式下需要注释
358
+ r_q = r_q.to(Rh.dtype) #todo
359
+
360
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
361
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
362
+
363
+ attn = (
364
+ attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
365
+ ).view(B, q_h * q_w, k_h * k_w)
366
+
367
+ return attn
368
+
369
+
370
+ class PatchEmbed(nn.Module):
371
+ """
372
+ Image to Patch Embedding.
373
+ """
374
+
375
+ def __init__(
376
+ self,
377
+ kernel_size: Tuple[int, int] = (16, 16),
378
+ stride: Tuple[int, int] = (16, 16),
379
+ padding: Tuple[int, int] = (0, 0),
380
+ in_chans: int = 3,
381
+ embed_dim: int = 768,
382
+ ) -> None:
383
+ """
384
+ Args:
385
+ kernel_size (Tuple): kernel size of the projection layer.
386
+ stride (Tuple): stride of the projection layer.
387
+ padding (Tuple): padding size of the projection layer.
388
+ in_chans (int): Number of input image channels.
389
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
390
+ """
391
+ super().__init__()
392
+
393
+ self.proj = nn.Conv2d(
394
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
395
+ )
396
+
397
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
398
+ x = self.proj(x)
399
+ # B C H W -> B H W C
400
+ x = x.permute(0, 2, 3, 1)
401
+ return x
segment_anything/modeling/image_encoder3D.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from typing import Optional, Tuple, Type
12
+
13
+
14
+ class MLPBlock(nn.Module):
15
+ def __init__(
16
+ self,
17
+ embedding_dim: int,
18
+ mlp_dim: int,
19
+ act: Type[nn.Module] = nn.GELU,
20
+ ) -> None:
21
+ super().__init__()
22
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
23
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
24
+ self.act = act()
25
+
26
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
27
+ return self.lin2(self.act(self.lin1(x)))
28
+
29
+ class LayerNorm3d(nn.Module):
30
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
31
+ super().__init__()
32
+ self.weight = nn.Parameter(torch.ones(num_channels))
33
+ self.bias = nn.Parameter(torch.zeros(num_channels))
34
+ self.eps = eps
35
+
36
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
37
+ u = x.mean(1, keepdim=True)
38
+ s = (x - u).pow(2).mean(1, keepdim=True)
39
+ x = (x - u) / torch.sqrt(s + self.eps)
40
+ x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None]
41
+ return x
42
+
43
+
44
+ # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
45
+ class ImageEncoderViT3D(nn.Module):
46
+ def __init__(
47
+ self,
48
+ img_size: int = 256,
49
+ patch_size: int = 16,
50
+ in_chans: int = 1,
51
+ embed_dim: int = 768,
52
+ depth: int = 12,
53
+ num_heads: int = 12,
54
+ mlp_ratio: float = 4.0,
55
+ out_chans: int = 256,
56
+ qkv_bias: bool = True,
57
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
58
+ act_layer: Type[nn.Module] = nn.GELU,
59
+ use_abs_pos: bool = True,
60
+ use_rel_pos: bool = False,
61
+ rel_pos_zero_init: bool = True,
62
+ window_size: int = 0,
63
+ global_attn_indexes: Tuple[int, ...] = (),
64
+ ) -> None:
65
+ """
66
+ Args:
67
+ img_size (int): Input image size.
68
+ patch_size (int): Patch size.
69
+ in_chans (int): Number of input image channels.
70
+ embed_dim (int): Patch embedding dimension.
71
+ depth (int): Depth of ViT.
72
+ num_heads (int): Number of attention heads in each ViT block.
73
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
74
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
75
+ norm_layer (nn.Module): Normalization layer.
76
+ act_layer (nn.Module): Activation layer.
77
+ use_abs_pos (bool): If True, use absolute positional embeddings.
78
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
79
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
80
+ window_size (int): Window size for window attention blocks.
81
+ global_attn_indexes (list): Indexes for blocks using global attention.
82
+ """
83
+ super().__init__()
84
+ self.img_size = img_size
85
+
86
+ self.patch_embed = PatchEmbed3D(
87
+ kernel_size=(patch_size, patch_size, patch_size),
88
+ stride=(patch_size, patch_size, patch_size),
89
+ in_chans=in_chans,
90
+ embed_dim=embed_dim,
91
+ )
92
+
93
+ self.pos_embed: Optional[nn.Parameter] = None
94
+ if use_abs_pos:
95
+ # Initialize absolute positional embedding with pretrain image size.
96
+ self.pos_embed = nn.Parameter(
97
+ torch.zeros(1, img_size // patch_size, img_size // patch_size, img_size // patch_size, embed_dim)
98
+ )
99
+
100
+ self.blocks = nn.ModuleList()
101
+ for i in range(depth):
102
+ block = Block3D(
103
+ dim=embed_dim,
104
+ num_heads=num_heads,
105
+ mlp_ratio=mlp_ratio,
106
+ qkv_bias=qkv_bias,
107
+ norm_layer=norm_layer,
108
+ act_layer=act_layer,
109
+ use_rel_pos=use_rel_pos,
110
+ rel_pos_zero_init=rel_pos_zero_init,
111
+ window_size=window_size if i not in global_attn_indexes else 0,
112
+ input_size=(img_size // patch_size, img_size // patch_size, img_size // patch_size),
113
+ )
114
+ self.blocks.append(block)
115
+
116
+ self.neck = nn.Sequential(
117
+ nn.Conv3d(
118
+ embed_dim,
119
+ out_chans,
120
+ kernel_size=1,
121
+ bias=False,
122
+ ),
123
+ # nn.LayerNorm(out_chans),
124
+ LayerNorm3d(out_chans),
125
+ nn.Conv3d(
126
+ out_chans,
127
+ out_chans,
128
+ kernel_size=3,
129
+ padding=1,
130
+ bias=False,
131
+ ),
132
+ LayerNorm3d(out_chans),
133
+ # nn.LayerNorm(out_chans),
134
+ )
135
+
136
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
137
+ # input_size = [1,1,256,256,256]
138
+ # import IPython; IPython.embed()
139
+ x = self.patch_embed(x)
140
+ # x = [1,16,16,16,768]
141
+ # import pdb; pdb.set_trace()
142
+ if self.pos_embed is not None:
143
+ x = x + self.pos_embed
144
+
145
+ for blk in self.blocks:
146
+ x = blk(x)
147
+ # x = [1,16,16,16,768]
148
+ x = self.neck(x.permute(0, 4, 1, 2, 3))
149
+
150
+ # output_size = [1,256,16,16,16]
151
+ return x
152
+
153
+
154
+ class Block3D(nn.Module):
155
+ """Transformer blocks with support of window attention and residual propagation blocks"""
156
+
157
+ def __init__(
158
+ self,
159
+ dim: int,
160
+ num_heads: int,
161
+ mlp_ratio: float = 4.0,
162
+ qkv_bias: bool = True,
163
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
164
+ act_layer: Type[nn.Module] = nn.GELU,
165
+ use_rel_pos: bool = False,
166
+ rel_pos_zero_init: bool = True,
167
+ window_size: int = 0,
168
+ input_size: Optional[Tuple[int, int, int]] = None,
169
+ ) -> None:
170
+ """
171
+ Args:
172
+ dim (int): Number of input channels.
173
+ num_heads (int): Number of attention heads in each ViT block.
174
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
175
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
176
+ norm_layer (nn.Module): Normalization layer.
177
+ act_layer (nn.Module): Activation layer.
178
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
179
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
180
+ window_size (int): Window size for window attention blocks. If it equals 0, then
181
+ use global attention.
182
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
183
+ positional parameter size.
184
+ """
185
+ super().__init__()
186
+ self.norm1 = norm_layer(dim)
187
+ self.attn = Attention(
188
+ dim,
189
+ num_heads=num_heads,
190
+ qkv_bias=qkv_bias,
191
+ use_rel_pos=use_rel_pos,
192
+ rel_pos_zero_init=rel_pos_zero_init,
193
+ input_size=input_size if window_size == 0 else (window_size, window_size, window_size),
194
+ )
195
+
196
+ self.norm2 = norm_layer(dim)
197
+ self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
198
+
199
+ self.window_size = window_size
200
+
201
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
202
+ shortcut = x
203
+ x = self.norm1(x)
204
+ # Window partition
205
+ if self.window_size > 0:
206
+ D, H, W = x.shape[1], x.shape[2], x.shape[3]
207
+ x, pad_dhw = window_partition3D(x, self.window_size)
208
+
209
+ x = self.attn(x)
210
+ # Reverse window partition
211
+ if self.window_size > 0:
212
+ x = window_unpartition3D(x, self.window_size, pad_dhw, (D, H, W))
213
+
214
+ x = shortcut + x
215
+ x = x + self.mlp(self.norm2(x))
216
+
217
+ return x
218
+
219
+
220
+ class Attention(nn.Module):
221
+ """Multi-head Attention block with relative position embeddings."""
222
+
223
+ def __init__(
224
+ self,
225
+ dim: int,
226
+ num_heads: int = 8,
227
+ qkv_bias: bool = True,
228
+ use_rel_pos: bool = False,
229
+ rel_pos_zero_init: bool = True,
230
+ input_size: Optional[Tuple[int, int, int]] = None,
231
+ ) -> None:
232
+ """
233
+ Args:
234
+ dim (int): Number of input channels.
235
+ num_heads (int): Number of attention heads.
236
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
237
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
238
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
239
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
240
+ positional parameter size.
241
+ """
242
+ super().__init__()
243
+ self.num_heads = num_heads
244
+ head_dim = dim // num_heads
245
+ self.scale = head_dim**-0.5
246
+
247
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
248
+ self.proj = nn.Linear(dim, dim)
249
+
250
+ self.use_rel_pos = use_rel_pos
251
+ if self.use_rel_pos:
252
+ assert (
253
+ input_size is not None
254
+ ), "Input size must be provided if using relative positional encoding."
255
+ # initialize relative positional embeddings
256
+ self.rel_pos_d = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
257
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
258
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[2] - 1, head_dim))
259
+
260
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
261
+ B, D, H, W, _ = x.shape
262
+ # qkv with shape (3, B, nHead, H * W, C)
263
+ qkv = self.qkv(x).reshape(B, D * H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
264
+ # q, k, v with shape (B * nHead, H * W, C)
265
+ q, k, v = qkv.reshape(3, B * self.num_heads, D * H * W, -1).unbind(0)
266
+
267
+ attn = (q * self.scale) @ k.transpose(-2, -1)
268
+
269
+ if self.use_rel_pos:
270
+ attn = add_decomposed_rel_pos(attn, q, self.rel_pos_d, self.rel_pos_h, self.rel_pos_w, (D, H, W), (D, H, W))
271
+
272
+ attn = attn.softmax(dim=-1)
273
+ x = (attn @ v).view(B, self.num_heads, D, H, W, -1).permute(0, 2, 3, 4, 1, 5).reshape(B, D, H, W, -1)
274
+ x = self.proj(x)
275
+
276
+ return x
277
+
278
+
279
+ def window_partition3D(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int, int]]:
280
+ """
281
+ Partition into non-overlapping windows with padding if needed.
282
+ Args:
283
+ x (tensor): input tokens with [B, H, W, C].
284
+ window_size (int): window size.
285
+
286
+ Returns:
287
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
288
+ (Hp, Wp): padded height and width before partition
289
+ """
290
+ B, D, H, W, C = x.shape
291
+
292
+ pad_d = (window_size - D % window_size) % window_size
293
+ pad_h = (window_size - H % window_size) % window_size
294
+ pad_w = (window_size - W % window_size) % window_size
295
+
296
+ if pad_h > 0 or pad_w > 0 or pad_d > 0:
297
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_d))
298
+ Hp, Wp, Dp = H + pad_h, W + pad_w, D + pad_d
299
+
300
+ x = x.view(B, Dp // window_size, window_size, Hp // window_size, window_size, Wp // window_size, window_size, C)
301
+ windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size, window_size, window_size, C)
302
+ return windows, (Dp, Hp, Wp)
303
+
304
+
305
+ def window_unpartition3D(
306
+ windows: torch.Tensor, window_size: int, pad_dhw: Tuple[int, int, int], dhw: Tuple[int, int, int]
307
+ ) -> torch.Tensor:
308
+ """
309
+ Window unpartition into original sequences and removing padding.
310
+ Args:
311
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
312
+ window_size (int): window size.
313
+ pad_hw (Tuple): padded height and width (Hp, Wp).
314
+ hw (Tuple): original height and width (H, W) before padding.
315
+
316
+ Returns:
317
+ x: unpartitioned sequences with [B, H, W, C].
318
+ """
319
+ Dp, Hp, Wp = pad_dhw
320
+ D, H, W = dhw
321
+ B = windows.shape[0] // (Dp * Hp * Wp // window_size // window_size // window_size)
322
+ x = windows.view(B, Dp // window_size, Hp // window_size, Wp // window_size, window_size, window_size, window_size, -1)
323
+ x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, Dp, Hp, Wp, -1)
324
+
325
+ if Hp > H or Wp > W or Dp > D:
326
+ x = x[:, :D, :H, :W, :].contiguous()
327
+ return x
328
+
329
+
330
+ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
331
+ """
332
+ Get relative positional embeddings according to the relative positions of
333
+ query and key sizes.
334
+ Args:
335
+ q_size (int): size of query q.
336
+ k_size (int): size of key k.
337
+ rel_pos (Tensor): relative position embeddings (L, C).
338
+
339
+ Returns:
340
+ Extracted positional embeddings according to relative positions.
341
+ """
342
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
343
+ # Interpolate rel pos if needed.
344
+ if rel_pos.shape[0] != max_rel_dist:
345
+ # Interpolate rel pos.
346
+ rel_pos_resized = F.interpolate(
347
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
348
+ size=max_rel_dist,
349
+ mode="linear",
350
+ )
351
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
352
+ else:
353
+ rel_pos_resized = rel_pos
354
+
355
+ # Scale the coords with short length if shapes for q and k are different.
356
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
357
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
358
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
359
+
360
+ return rel_pos_resized[relative_coords.long()]
361
+
362
+
363
+ def add_decomposed_rel_pos(
364
+ attn: torch.Tensor,
365
+ q: torch.Tensor,
366
+ rel_pos_d: torch.Tensor,
367
+ rel_pos_h: torch.Tensor,
368
+ rel_pos_w: torch.Tensor,
369
+ q_size: Tuple[int, int, int],
370
+ k_size: Tuple[int, int, int],
371
+ ) -> torch.Tensor:
372
+ """
373
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
374
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
375
+ Args:
376
+ attn (Tensor): attention map.
377
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
378
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
379
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
380
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
381
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
382
+
383
+ Returns:
384
+ attn (Tensor): attention map with added relative positional embeddings.
385
+ """
386
+ q_d, q_h, q_w = q_size
387
+ k_d, k_h, k_w = k_size
388
+
389
+ Rd = get_rel_pos(q_d, k_d, rel_pos_d)
390
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
391
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
392
+
393
+ B, _, dim = q.shape
394
+ r_q = q.reshape(B, q_d, q_h, q_w, dim)
395
+
396
+ rel_d = torch.einsum("bdhwc,dkc->bdhwk", r_q, Rd)
397
+ rel_h = torch.einsum("bdhwc,hkc->bdhwk", r_q, Rh)
398
+ rel_w = torch.einsum("bdhwc,wkc->bdhwk", r_q, Rw)
399
+
400
+
401
+
402
+ attn = (
403
+ attn.view(B, q_d, q_h, q_w, k_d, k_h, k_w) + rel_d[:, :, :, :, None, None] + rel_h[:, :, :, None, :, None] + rel_w[:, :, :,None,None, :]
404
+ ).view(B, q_d * q_h * q_w, k_d * k_h * k_w)
405
+
406
+ return attn
407
+
408
+
409
+ class PatchEmbed3D(nn.Module):
410
+ """
411
+ Image to Patch Embedding.
412
+ """
413
+
414
+ def __init__(
415
+ self,
416
+ kernel_size: Tuple[int, int] = (16, 16, 16),
417
+ stride: Tuple[int, int] = (16, 16, 16),
418
+ padding: Tuple[int, int] = (0, 0, 0),
419
+ in_chans: int = 1,
420
+ embed_dim: int = 768,
421
+ ) -> None:
422
+ """
423
+ Args:
424
+ kernel_size (Tuple): kernel size of the projection layer.
425
+ stride (Tuple): stride of the projection layer.
426
+ padding (Tuple): padding size of the projection layer.
427
+ in_chans (int): Number of input image channels.
428
+ embed_dim (int): Patch embedding dimension.
429
+ """
430
+ super().__init__()
431
+
432
+ self.proj = nn.Conv3d(
433
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
434
+ )
435
+
436
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
437
+ x = self.proj(x)
438
+ # B C X Y Z -> B X Y Z C
439
+ x = x.permute(0, 2, 3, 4, 1)
440
+ return x
441
+
442
+
segment_anything/modeling/mask_decoder.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import List, Tuple, Type
12
+
13
+ from .common import LayerNorm2d
14
+
15
+
16
+ class MaskDecoder(nn.Module):
17
+ def __init__(
18
+ self,
19
+ *,
20
+ transformer_dim: int,
21
+ transformer: nn.Module,
22
+ num_multimask_outputs: int = 3,
23
+ activation: Type[nn.Module] = nn.GELU,
24
+ iou_head_depth: int = 3,
25
+ iou_head_hidden_dim: int = 256,
26
+ ) -> None:
27
+ """
28
+ Predicts masks given an image and prompt embeddings, using a
29
+ transformer architecture.
30
+
31
+ Arguments:
32
+ transformer_dim (int): the channel dimension of the transformer
33
+ transformer (nn.Module): the transformer used to predict masks
34
+ num_multimask_outputs (int): the number of masks to predict
35
+ when disambiguating masks
36
+ activation (nn.Module): the type of activation to use when
37
+ upscaling masks
38
+ iou_head_depth (int): the depth of the MLP used to predict
39
+ mask quality
40
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
41
+ used to predict mask quality
42
+ """
43
+ super().__init__()
44
+ self.transformer_dim = transformer_dim
45
+ self.transformer = transformer
46
+
47
+ self.num_multimask_outputs = num_multimask_outputs
48
+
49
+ self.iou_token = nn.Embedding(1, transformer_dim)
50
+ self.num_mask_tokens = num_multimask_outputs + 1
51
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
52
+
53
+ self.output_upscaling = nn.Sequential(
54
+ nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
55
+ LayerNorm2d(transformer_dim // 4),
56
+ activation(),
57
+ nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
58
+ activation(),
59
+ )
60
+ self.output_hypernetworks_mlps = nn.ModuleList(
61
+ [
62
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
63
+ for i in range(self.num_mask_tokens)
64
+ ]
65
+ )
66
+
67
+ self.iou_prediction_head = MLP(
68
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
69
+ ) #256 256 4 3
70
+
71
+ def forward(
72
+ self,
73
+ image_embeddings: torch.Tensor, #[B, 256, 64, 64]
74
+ image_pe: torch.Tensor, #[1, 256, 64, 64]
75
+ sparse_prompt_embeddings: torch.Tensor, #[B, 3, 256]
76
+ dense_prompt_embeddings: torch.Tensor, #[B, 256, 64, 64]
77
+ multimask_output: bool,
78
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
79
+ """
80
+ Predict masks given image and prompt embeddings.
81
+
82
+ Arguments:
83
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
84
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
85
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
86
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
87
+ multimask_output (bool): Whether to return multiple masks or a single
88
+ mask.
89
+
90
+ Returns:
91
+ torch.Tensor: batched predicted masks
92
+ torch.Tensor: batched predictions of mask quality
93
+ """
94
+
95
+ masks, iou_pred = self.predict_masks(
96
+ image_embeddings=image_embeddings,
97
+ image_pe=image_pe,
98
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
99
+ dense_prompt_embeddings=dense_prompt_embeddings,
100
+ )
101
+
102
+ # Select the correct mask or masks for output
103
+ if multimask_output:
104
+ mask_slice = slice(1, None)
105
+ else:
106
+ mask_slice = slice(0, 1)
107
+ masks = masks[:, mask_slice, :, :]
108
+ iou_pred = iou_pred[:, mask_slice]
109
+
110
+ # Prepare output
111
+ return masks, iou_pred
112
+
113
+ def predict_masks(
114
+ self,
115
+ image_embeddings: torch.Tensor,
116
+ image_pe: torch.Tensor,
117
+ sparse_prompt_embeddings: torch.Tensor,
118
+ dense_prompt_embeddings: torch.Tensor,
119
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
120
+ """Predicts masks. See 'forward' for more details."""
121
+ # Concatenate output tokens
122
+
123
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) #iou_token:[1,256] mask_tokens:[4,256]
124
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
125
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
126
+
127
+ # Expand per-image data in batch direction to be per-mask
128
+ # src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
129
+ src = image_embeddings
130
+ src = src + dense_prompt_embeddings
131
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
132
+ b, c, h, w = src.shape
133
+
134
+ # Run the transformer
135
+ hs, src = self.transformer(src, pos_src, tokens)
136
+ iou_token_out = hs[:, 0, :]
137
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
138
+
139
+ # Upscale mask embeddings and predict masks using the mask tokens
140
+ src = src.transpose(1, 2).view(b, c, h, w)
141
+ upscaled_embedding = self.output_upscaling(src)
142
+ hyper_in_list: List[torch.Tensor] = []
143
+ for i in range(self.num_mask_tokens):
144
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
145
+ hyper_in = torch.stack(hyper_in_list, dim=1) #[1,4,32]
146
+
147
+ b, c, h, w = upscaled_embedding.shape #[1, 32, 256, 256]
148
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
149
+
150
+ # Generate mask quality predictions
151
+ iou_pred = self.iou_prediction_head(iou_token_out)
152
+
153
+ return masks, iou_pred
154
+
155
+
156
+ # Lightly adapted from
157
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
158
+ class MLP(nn.Module):
159
+ def __init__(
160
+ self,
161
+ input_dim: int,
162
+ hidden_dim: int,
163
+ output_dim: int,
164
+ num_layers: int,
165
+ sigmoid_output: bool = False,
166
+ ) -> None:
167
+ super().__init__()
168
+ self.num_layers = num_layers
169
+ h = [hidden_dim] * (num_layers - 1)
170
+ self.layers = nn.ModuleList(
171
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
172
+ )
173
+ self.sigmoid_output = sigmoid_output
174
+ self.relu = nn.ReLU(inplace=False)
175
+ def forward(self, x):
176
+ for i, layer in enumerate(self.layers):
177
+ # x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
178
+ # x = self.relu(layer(x)) if i < self.num_layers - 1 else layer(x) #源码
179
+ if i < self.num_layers - 1:
180
+ x = F.relu(layer(x))
181
+ else:
182
+ x = layer(x)
183
+
184
+ if self.sigmoid_output:
185
+ x = F.sigmoid(x)
186
+ return x
segment_anything/modeling/mask_decoder3D.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import List, Tuple, Type
12
+ # from .transformer import TwoWayTransformer
13
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
14
+ # All rights reserved.
15
+
16
+ # This source code is licensed under the license found in the
17
+ # LICENSE file in the root directory of this source tree.
18
+
19
+ import torch
20
+ from torch import Tensor, nn
21
+
22
+ import math
23
+ from typing import Tuple, Type
24
+
25
+
26
+ class MLPBlock3D(nn.Module):
27
+ def __init__(
28
+ self,
29
+ embedding_dim: int,
30
+ mlp_dim: int,
31
+ act: Type[nn.Module] = nn.GELU,
32
+ ) -> None:
33
+ super().__init__()
34
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
35
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
36
+ self.act = act()
37
+
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ return self.lin2(self.act(self.lin1(x)))
40
+
41
+ class TwoWayTransformer3D(nn.Module):
42
+ def __init__(
43
+ self,
44
+ depth: int,
45
+ embedding_dim: int,
46
+ num_heads: int,
47
+ mlp_dim: int,
48
+ activation: Type[nn.Module] = nn.ReLU,
49
+ attention_downsample_rate: int = 2,
50
+ ) -> None:
51
+ """
52
+ A transformer decoder that attends to an input image using
53
+ queries whose positional embedding is supplied.
54
+
55
+ Args:
56
+ depth (int): number of layers in the transformer
57
+ embedding_dim (int): the channel dimension for the input embeddings
58
+ num_heads (int): the number of heads for multihead attention. Must
59
+ divide embedding_dim
60
+ mlp_dim (int): the channel dimension internal to the MLP block
61
+ activation (nn.Module): the activation to use in the MLP block
62
+ """
63
+ super().__init__()
64
+ self.depth = depth
65
+ self.embedding_dim = embedding_dim
66
+ self.num_heads = num_heads
67
+ self.mlp_dim = mlp_dim
68
+ self.layers = nn.ModuleList()
69
+
70
+ for i in range(depth):
71
+ self.layers.append(
72
+ TwoWayAttentionBlock3D(
73
+ embedding_dim=embedding_dim,
74
+ num_heads=num_heads,
75
+ mlp_dim=mlp_dim,
76
+ activation=activation,
77
+ attention_downsample_rate=attention_downsample_rate,
78
+ skip_first_layer_pe=(i == 0),
79
+ )
80
+ )
81
+
82
+ self.final_attn_token_to_image = Attention(
83
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
84
+ )
85
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
86
+
87
+ def forward(
88
+ self,
89
+ image_embedding: Tensor,
90
+ image_pe: Tensor,
91
+ point_embedding: Tensor,
92
+ ) -> Tuple[Tensor, Tensor]:
93
+ """
94
+ Args:
95
+ image_embedding (torch.Tensor): image to attend to. Should be shape
96
+ B x embedding_dim x h x w for any h and w.
97
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
98
+ have the same shape as image_embedding.
99
+ point_embedding (torch.Tensor): the embedding to add to the query points.
100
+ Must have shape B x N_points x embedding_dim for any N_points.
101
+
102
+ Returns:
103
+ torch.Tensor: the processed point_embedding
104
+ torch.Tensor: the processed image_embedding
105
+ """
106
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
107
+ bs, c, x, y, z = image_embedding.shape
108
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
109
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
110
+
111
+ # Prepare queries
112
+ queries = point_embedding
113
+ keys = image_embedding
114
+
115
+ # Apply transformer blocks and final layernorm
116
+ for layer in self.layers:
117
+ queries, keys = layer(
118
+ queries=queries,
119
+ keys=keys,
120
+ query_pe=point_embedding,
121
+ key_pe=image_pe,
122
+ )
123
+
124
+ # Apply the final attention layer from the points to the image
125
+ q = queries + point_embedding
126
+ k = keys + image_pe
127
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
128
+ queries = queries + attn_out
129
+ queries = self.norm_final_attn(queries)
130
+
131
+ return queries, keys
132
+
133
+
134
+ class TwoWayAttentionBlock3D(nn.Module):
135
+ def __init__(
136
+ self,
137
+ embedding_dim: int,
138
+ num_heads: int,
139
+ mlp_dim: int = 2048,
140
+ activation: Type[nn.Module] = nn.ReLU,
141
+ attention_downsample_rate: int = 2,
142
+ skip_first_layer_pe: bool = False,
143
+ ) -> None:
144
+ """
145
+ A transformer block with four layers: (1) self-attention of sparse
146
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
147
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
148
+ inputs.
149
+
150
+ Arguments:
151
+ embedding_dim (int): the channel dimension of the embeddings
152
+ num_heads (int): the number of heads in the attention layers
153
+ mlp_dim (int): the hidden dimension of the mlp block
154
+ activation (nn.Module): the activation of the mlp block
155
+ skip_first_layer_pe (bool): skip the PE on the first layer
156
+ """
157
+ super().__init__()
158
+ self.self_attn = Attention(embedding_dim, num_heads)
159
+ self.norm1 = nn.LayerNorm(embedding_dim)
160
+
161
+ self.cross_attn_token_to_image = Attention(
162
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
163
+ )
164
+ self.norm2 = nn.LayerNorm(embedding_dim)
165
+
166
+ self.mlp = MLPBlock3D(embedding_dim, mlp_dim, activation)
167
+ self.norm3 = nn.LayerNorm(embedding_dim)
168
+
169
+ self.norm4 = nn.LayerNorm(embedding_dim)
170
+ self.cross_attn_image_to_token = Attention(
171
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
172
+ )
173
+
174
+ self.skip_first_layer_pe = skip_first_layer_pe
175
+
176
+ def forward(
177
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
178
+ ) -> Tuple[Tensor, Tensor]:
179
+ # Self attention block
180
+ if self.skip_first_layer_pe:
181
+ queries = self.self_attn(q=queries, k=queries, v=queries)
182
+ else:
183
+ q = queries + query_pe
184
+ attn_out = self.self_attn(q=q, k=q, v=queries)
185
+ queries = queries + attn_out
186
+ queries = self.norm1(queries)
187
+
188
+ # Cross attention block, tokens attending to image embedding
189
+ q = queries + query_pe
190
+ k = keys + key_pe
191
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
192
+ queries = queries + attn_out
193
+ queries = self.norm2(queries)
194
+
195
+ # MLP block
196
+ mlp_out = self.mlp(queries)
197
+ queries = queries + mlp_out
198
+ queries = self.norm3(queries)
199
+
200
+ # Cross attention block, image embedding attending to tokens
201
+ q = queries + query_pe
202
+ k = keys + key_pe
203
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
204
+ keys = keys + attn_out
205
+ keys = self.norm4(keys)
206
+
207
+ return queries, keys
208
+
209
+
210
+ class Attention(nn.Module):
211
+ """
212
+ An attention layer that allows for downscaling the size of the embedding
213
+ after projection to queries, keys, and values.
214
+ """
215
+
216
+ def __init__(
217
+ self,
218
+ embedding_dim: int,
219
+ num_heads: int,
220
+ downsample_rate: int = 1,
221
+ ) -> None:
222
+ super().__init__()
223
+ self.embedding_dim = embedding_dim
224
+ self.internal_dim = embedding_dim // downsample_rate
225
+ self.num_heads = num_heads
226
+ assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
227
+
228
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
229
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
230
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
231
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
232
+
233
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
234
+ b, n, c = x.shape
235
+ x = x.reshape(b, n, num_heads, c // num_heads)
236
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
237
+
238
+ def _recombine_heads(self, x: Tensor) -> Tensor:
239
+ b, n_heads, n_tokens, c_per_head = x.shape
240
+ x = x.transpose(1, 2)
241
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
242
+
243
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
244
+ # Input projections
245
+ q = self.q_proj(q)
246
+ k = self.k_proj(k)
247
+ v = self.v_proj(v)
248
+
249
+ # Separate into heads
250
+ q = self._separate_heads(q, self.num_heads)
251
+ k = self._separate_heads(k, self.num_heads)
252
+ v = self._separate_heads(v, self.num_heads)
253
+
254
+ # Attention
255
+ _, _, _, c_per_head = q.shape
256
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
257
+ attn = attn / math.sqrt(c_per_head)
258
+ attn = torch.softmax(attn, dim=-1)
259
+
260
+ # Get output
261
+ out = attn @ v
262
+ out = self._recombine_heads(out)
263
+ out = self.out_proj(out)
264
+
265
+ return out
266
+
267
+
268
+
269
+ class LayerNorm3d(nn.Module):
270
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
271
+ super().__init__()
272
+ self.weight = nn.Parameter(torch.ones(num_channels))
273
+ self.bias = nn.Parameter(torch.zeros(num_channels))
274
+ self.eps = eps
275
+
276
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
277
+ u = x.mean(1, keepdim=True)
278
+ s = (x - u).pow(2).mean(1, keepdim=True)
279
+ x = (x - u) / torch.sqrt(s + self.eps)
280
+ x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None]
281
+ return x
282
+
283
+
284
+ class MaskDecoder3D(nn.Module):
285
+ def __init__(
286
+ self,
287
+ *,
288
+ transformer_dim: int,
289
+ # transformer: nn.Module ,
290
+ num_multimask_outputs: int = 3,
291
+ activation: Type[nn.Module] = nn.GELU,
292
+ iou_head_depth: int = 3,
293
+ iou_head_hidden_dim: int = 256,
294
+ ) -> None:
295
+ """
296
+ Predicts masks given an image and prompt embeddings, using a
297
+ transformer architecture.
298
+
299
+ Arguments:
300
+ transformer_dim (int): the channel dimension of the transformer
301
+ transformer (nn.Module): the transformer used to predict masks
302
+ num_multimask_outputs (int): the number of masks to predict
303
+ when disambiguating masks
304
+ activation (nn.Module): the type of activation to use when
305
+ upscaling masks
306
+ iou_head_depth (int): the depth of the MLP used to predict
307
+ mask quality
308
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
309
+ used to predict mask quality
310
+ """
311
+ super().__init__()
312
+ self.transformer_dim = transformer_dim
313
+ # self.transformer = transformer
314
+ self.transformer = TwoWayTransformer3D(
315
+ depth=2,
316
+ embedding_dim=self.transformer_dim,
317
+ mlp_dim=2048,
318
+ num_heads=8,
319
+ )
320
+
321
+ self.num_multimask_outputs = num_multimask_outputs
322
+
323
+ self.iou_token = nn.Embedding(1, transformer_dim)
324
+ self.num_mask_tokens = num_multimask_outputs + 1
325
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
326
+
327
+ self.output_upscaling = nn.Sequential(
328
+ nn.ConvTranspose3d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
329
+ LayerNorm3d(transformer_dim // 4),
330
+ activation(),
331
+ nn.ConvTranspose3d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
332
+ activation(),
333
+ )
334
+ self.output_hypernetworks_mlps = nn.ModuleList(
335
+ [
336
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
337
+ for i in range(self.num_mask_tokens)
338
+ ]
339
+ )
340
+
341
+ self.iou_prediction_head = MLP(
342
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
343
+ )
344
+
345
+ def forward(
346
+ self,
347
+ image_embeddings: torch.Tensor,
348
+ image_pe: torch.Tensor,
349
+ sparse_prompt_embeddings: torch.Tensor,
350
+ dense_prompt_embeddings: torch.Tensor,
351
+ multimask_output: bool,
352
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
353
+ """
354
+ Predict masks given image and prompt embeddings.
355
+
356
+ Arguments:
357
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
358
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
359
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
360
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
361
+ multimask_output (bool): Whether to return multiple masks or a single
362
+ mask.
363
+
364
+ Returns:
365
+ torch.Tensor: batched predicted masks
366
+ torch.Tensor: batched predictions of mask quality
367
+ """
368
+ masks, iou_pred = self.predict_masks(
369
+ image_embeddings=image_embeddings,
370
+ image_pe=image_pe,
371
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
372
+ dense_prompt_embeddings=dense_prompt_embeddings,
373
+ )
374
+
375
+ # Select the correct mask or masks for output
376
+ if multimask_output:
377
+ mask_slice = slice(1, None)
378
+ else:
379
+ mask_slice = slice(0, 1)
380
+ masks = masks[:, mask_slice, :, :]
381
+ iou_pred = iou_pred[:, mask_slice]
382
+
383
+ # Prepare output
384
+ return masks, iou_pred
385
+
386
+ def predict_masks(
387
+ self,
388
+ image_embeddings: torch.Tensor,
389
+ image_pe: torch.Tensor,
390
+ sparse_prompt_embeddings: torch.Tensor,
391
+ dense_prompt_embeddings: torch.Tensor,
392
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
393
+ """Predicts masks. See 'forward' for more details."""
394
+ # Concatenate output tokens
395
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
396
+ output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
397
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
398
+
399
+ # Expand per-image data in batch direction to be per-mask
400
+ if image_embeddings.shape[0] != tokens.shape[0]:
401
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
402
+ else:
403
+ src = image_embeddings
404
+ src = src + dense_prompt_embeddings
405
+ if image_pe.shape[0] != tokens.shape[0]:
406
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
407
+ else:
408
+ pos_src = image_pe
409
+ b, c, x, y, z = src.shape
410
+
411
+ # Run the transformer
412
+ # import IPython; IPython.embed()
413
+ hs, src = self.transformer(src, pos_src, tokens)
414
+ iou_token_out = hs[:, 0, :]
415
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
416
+
417
+ # Upscale mask embeddings and predict masks using the mask tokens
418
+ src = src.transpose(1, 2).view(b, c, x, y, z)
419
+ upscaled_embedding = self.output_upscaling(src)
420
+ hyper_in_list: List[torch.Tensor] = []
421
+ for i in range(self.num_mask_tokens):
422
+ hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
423
+ hyper_in = torch.stack(hyper_in_list, dim=1)
424
+ b, c, x, y, z = upscaled_embedding.shape
425
+ masks = (hyper_in @ upscaled_embedding.view(b, c, x * y * z)).view(b, -1, x, y, z)
426
+
427
+ # Generate mask quality predictions
428
+ iou_pred = self.iou_prediction_head(iou_token_out)
429
+
430
+ return masks, iou_pred
431
+
432
+
433
+ # Lightly adapted from
434
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
435
+ class MLP(nn.Module):
436
+ def __init__(
437
+ self,
438
+ input_dim: int,
439
+ hidden_dim: int,
440
+ output_dim: int,
441
+ num_layers: int,
442
+ sigmoid_output: bool = False,
443
+ ) -> None:
444
+ super().__init__()
445
+ self.num_layers = num_layers
446
+ h = [hidden_dim] * (num_layers - 1)
447
+ self.layers = nn.ModuleList(
448
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
449
+ )
450
+ self.sigmoid_output = sigmoid_output
451
+
452
+ def forward(self, x):
453
+ for i, layer in enumerate(self.layers):
454
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
455
+ if self.sigmoid_output:
456
+ x = F.sigmoid(x)
457
+ return x
458
+
segment_anything/modeling/prompt_encoder.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+
11
+ from typing import Any, Optional, Tuple, Type
12
+
13
+ from .common import LayerNorm2d
14
+
15
+
16
+ class PromptEncoder(nn.Module):
17
+ def __init__(
18
+ self,
19
+ embed_dim: int,
20
+ image_embedding_size: Tuple[int, int],
21
+ input_image_size: Tuple[int, int],
22
+ mask_in_chans: int,
23
+ activation: Type[nn.Module] = nn.GELU,
24
+ ) -> None:
25
+ """
26
+ Encodes prompts for input to SAM's mask decoder.
27
+
28
+ Arguments:
29
+ embed_dim (int): The prompts' embedding dimension
30
+ image_embedding_size (tuple(int, int)): The spatial size of the
31
+ image embedding, as (H, W).
32
+ input_image_size (int): The padded size of the image as input
33
+ to the image encoder, as (H, W).
34
+ mask_in_chans (int): The number of hidden channels used for
35
+ encoding input masks.
36
+ activation (nn.Module): The activation to use when encoding
37
+ input masks.
38
+ """
39
+ super().__init__()
40
+ self.embed_dim = embed_dim
41
+ self.input_image_size = input_image_size
42
+ self.image_embedding_size = image_embedding_size
43
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
44
+
45
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
46
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
47
+ self.point_embeddings = nn.ModuleList(point_embeddings)
48
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
49
+
50
+ self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
51
+ self.mask_downscaling = nn.Sequential(
52
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
53
+ LayerNorm2d(mask_in_chans // 4),
54
+ activation(),
55
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
56
+ LayerNorm2d(mask_in_chans),
57
+ activation(),
58
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
59
+ )
60
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
61
+
62
+ def get_dense_pe(self) -> torch.Tensor:
63
+ """
64
+ Returns the positional encoding used to encode point prompts,
65
+ applied to a dense set of points the shape of the image encoding.
66
+
67
+ Returns:
68
+ torch.Tensor: Positional encoding with shape
69
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
70
+ """
71
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
72
+
73
+ def _embed_points(
74
+ self,
75
+ points: torch.Tensor,
76
+ labels: torch.Tensor,
77
+ pad: bool,
78
+ ) -> torch.Tensor:
79
+ """Embeds point prompts."""
80
+ points = points + 0.5 # Shift to center of pixel
81
+
82
+ if pad:
83
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
84
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
85
+ points = torch.cat([points, padding_point], dim=1) #B,N+1,2
86
+ labels = torch.cat([labels, padding_label], dim=1)
87
+
88
+
89
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) #B,N+1,256
90
+ point_embedding[labels == -1] = 0.0
91
+
92
+ self.not_a_point_embed.weight = torch.nn.Parameter(self.not_a_point_embed.weight.to(point_embedding.dtype), requires_grad=True) # todo
93
+ self.point_embeddings[0].weight = torch.nn.Parameter(self.point_embeddings[0].weight.to(point_embedding.dtype), requires_grad=True) #todo
94
+ self.point_embeddings[1].weight = torch.nn.Parameter(self.point_embeddings[1].weight.to(point_embedding.dtype), requires_grad=True) #todo
95
+
96
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
97
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
98
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
99
+ return point_embedding
100
+
101
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
102
+ """Embeds box prompts."""
103
+
104
+ boxes = boxes + 0.5 # Shift to center of pixel
105
+ coords = boxes.reshape(-1, 2, 2)
106
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
107
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
108
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
109
+ return corner_embedding
110
+
111
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
112
+ """Embeds mask inputs."""
113
+ mask_embedding = self.mask_downscaling(masks)
114
+ return mask_embedding
115
+
116
+ def _get_batch_size(
117
+ self,
118
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
119
+ boxes: Optional[torch.Tensor],
120
+ masks: Optional[torch.Tensor],
121
+ ) -> int:
122
+ """
123
+ Gets the batch size of the output given the batch size of the input prompts.
124
+ """
125
+ if points is not None:
126
+ return points[0].shape[0]
127
+ elif boxes is not None:
128
+ return boxes.shape[0]
129
+ elif masks is not None:
130
+ return masks.shape[0]
131
+ else:
132
+ return 1
133
+
134
+ def _get_device(self) -> torch.device:
135
+ return self.point_embeddings[0].weight.device
136
+
137
+ def forward(
138
+ self,
139
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
140
+ boxes: Optional[torch.Tensor],
141
+ masks: Optional[torch.Tensor],
142
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
143
+ """
144
+ Embeds different types of prompts, returning both sparse and dense
145
+ embeddings.
146
+
147
+ Arguments:
148
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
149
+ and labels to embed.
150
+ boxes (torch.Tensor or none): boxes to embed
151
+ masks (torch.Tensor or none): masks to embed
152
+
153
+ Returns:
154
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
155
+ BxNx(embed_dim), where N is determined by the number of input points
156
+ and boxes.
157
+ torch.Tensor: dense embeddings for the masks, in the shape
158
+ Bx(embed_dim)x(embed_H)x(embed_W)
159
+ """
160
+ bs = self._get_batch_size(points, boxes, masks)
161
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) #B,0,256 空[]
162
+
163
+ if points is not None:
164
+ coords, labels = points #coords:B,N,2 labels:B,N
165
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
166
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
167
+
168
+ if boxes is not None:
169
+ box_embeddings = self._embed_boxes(boxes)
170
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
171
+
172
+ if masks is not None:
173
+ dense_embeddings = self._embed_masks(masks)
174
+ else:
175
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
176
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
177
+ )
178
+
179
+ return sparse_embeddings, dense_embeddings
180
+
181
+
182
+ class PositionEmbeddingRandom(nn.Module):
183
+ """
184
+ Positional encoding using random spatial frequencies.
185
+ """
186
+
187
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
188
+ super().__init__()
189
+ if scale is None or scale <= 0.0:
190
+ scale = 1.0
191
+ self.register_buffer(
192
+ "positional_encoding_gaussian_matrix",
193
+ scale * torch.randn((2, num_pos_feats)),
194
+ )
195
+
196
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
197
+ """Positionally encode points that are normalized to [0,1]."""
198
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
199
+ coords = 2 * coords - 1
200
+ coords = coords @ self.positional_encoding_gaussian_matrix.to(torch.float32)
201
+ coords = 2 * np.pi * coords
202
+ # outputs d_1 x ... x d_n x C shape
203
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
204
+
205
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
206
+ """Generate positional encoding for a grid of the specified size."""
207
+ h, w = size
208
+
209
+ device: Any = self.positional_encoding_gaussian_matrix.device
210
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
211
+ y_embed = grid.cumsum(dim=0) - 0.5
212
+ x_embed = grid.cumsum(dim=1) - 0.5
213
+ y_embed = y_embed / h
214
+ x_embed = x_embed / w
215
+
216
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
217
+ return pe.permute(2, 0, 1) # C x H x W
218
+
219
+ def forward_with_coords(
220
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
221
+ ) -> torch.Tensor:
222
+ """Positionally encode points that are not normalized to [0,1]."""
223
+ coords = coords_input.clone()
224
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
225
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
226
+
227
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
segment_anything/modeling/prompt_encoder3D.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+
11
+ from typing import Any, Optional, Tuple, Type
12
+
13
+
14
+ class LayerNorm3d(nn.Module):
15
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
16
+ super().__init__()
17
+ self.weight = nn.Parameter(torch.ones(num_channels))
18
+ self.bias = nn.Parameter(torch.zeros(num_channels))
19
+ self.eps = eps
20
+
21
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
22
+ u = x.mean(1, keepdim=True)
23
+ s = (x - u).pow(2).mean(1, keepdim=True)
24
+ x = (x - u) / torch.sqrt(s + self.eps)
25
+ x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None]
26
+ return x
27
+
28
+
29
+ class PromptEncoder3D(nn.Module):
30
+ def __init__(
31
+ self,
32
+ embed_dim: int,
33
+ image_embedding_size: Tuple[int, int, int],
34
+ input_image_size: Tuple[int, int, int],
35
+ mask_in_chans: int,
36
+ activation: Type[nn.Module] = nn.GELU,
37
+ ) -> None:
38
+ """
39
+ Encodes prompts for input to SAM's mask decoder.
40
+
41
+ Arguments:
42
+ embed_dim (int): The prompts' embedding dimension
43
+ image_embedding_size (tuple(int, int)): The spatial size of the
44
+ image embedding, as (H, W).
45
+ input_image_size (int): The padded size of the image as input
46
+ to the image encoder, as (H, W).
47
+ mask_in_chans (int): The number of hidden channels used for
48
+ encoding input masks.
49
+ activation (nn.Module): The activation to use when encoding
50
+ input masks.
51
+ """
52
+ super().__init__()
53
+ self.embed_dim = embed_dim
54
+ self.input_image_size = input_image_size
55
+ self.image_embedding_size = image_embedding_size
56
+ self.pe_layer = PositionEmbeddingRandom3D(embed_dim // 3)
57
+
58
+ self.num_point_embeddings: int = 2 # pos/neg point
59
+ point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
60
+ self.point_embeddings = nn.ModuleList(point_embeddings)
61
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
62
+
63
+ self.mask_input_size = (image_embedding_size[0], image_embedding_size[1], image_embedding_size[2])
64
+ self.mask_downscaling = nn.Sequential(
65
+ nn.Conv3d(1, mask_in_chans // 4, kernel_size=2, stride=2),
66
+ LayerNorm3d(mask_in_chans // 4),
67
+ activation(),
68
+ nn.Conv3d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
69
+ LayerNorm3d(mask_in_chans),
70
+ activation(),
71
+ nn.Conv3d(mask_in_chans, embed_dim, kernel_size=1),
72
+ )
73
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
74
+
75
+ def get_dense_pe(self) -> torch.Tensor:
76
+ """
77
+ Returns the positional encoding used to encode point prompts,
78
+ applied to a dense set of points the shape of the image encoding.
79
+
80
+ Returns:
81
+ torch.Tensor: Positional encoding with shape
82
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
83
+ """
84
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0) # 1xXxYxZ
85
+
86
+ def _embed_points(
87
+ self,
88
+ points: torch.Tensor,
89
+ labels: torch.Tensor,
90
+ pad: bool,
91
+ ) -> torch.Tensor:
92
+ """Embeds point prompts."""
93
+ points = points + 0.5 # Shift to center of pixel
94
+ if pad:
95
+ padding_point = torch.zeros((points.shape[0], 1, 3), device=points.device)
96
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
97
+ points = torch.cat([points, padding_point], dim=1)
98
+ labels = torch.cat([labels, padding_label], dim=1)
99
+ point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
100
+ point_embedding[labels == -1] = 0.0
101
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
102
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
103
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
104
+ return point_embedding
105
+
106
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
107
+ """Embeds box prompts."""
108
+ boxes = boxes + 0.5 # Shift to center of pixel
109
+ coords = boxes.reshape(-1, 2, 2)
110
+ corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
111
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
112
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
113
+ return corner_embedding
114
+
115
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
116
+ """Embeds mask inputs."""
117
+ mask_embedding = self.mask_downscaling(masks)
118
+ return mask_embedding
119
+
120
+ def _get_batch_size(
121
+ self,
122
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
123
+ boxes: Optional[torch.Tensor],
124
+ masks: Optional[torch.Tensor],
125
+ ) -> int:
126
+ """
127
+ Gets the batch size of the output given the batch size of the input prompts.
128
+ """
129
+ if points is not None:
130
+ return points[0].shape[0]
131
+ elif boxes is not None:
132
+ return boxes.shape[0]
133
+ elif masks is not None:
134
+ return masks.shape[0]
135
+ else:
136
+ return 1
137
+
138
+ def _get_device(self) -> torch.device:
139
+ return self.point_embeddings[0].weight.device
140
+
141
+ def forward(
142
+ self,
143
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
144
+ boxes: Optional[torch.Tensor],
145
+ masks: Optional[torch.Tensor],
146
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
147
+ """
148
+ Embeds different types of prompts, returning both sparse and dense
149
+ embeddings.
150
+
151
+ Arguments:
152
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
153
+ and labels to embed.
154
+ boxes (torch.Tensor or none): boxes to embed
155
+ masks (torch.Tensor or none): masks to embed
156
+
157
+ Returns:
158
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
159
+ BxNx(embed_dim), where N is determined by the number of input points
160
+ and boxes.
161
+ torch.Tensor: dense embeddings for the masks, in the shape
162
+ Bx(embed_dim)x(embed_H)x(embed_W)
163
+ """
164
+ bs = self._get_batch_size(points, boxes, masks)
165
+ sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
166
+ if points is not None:
167
+ coords, labels = points
168
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
169
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
170
+ if boxes is not None:
171
+ box_embeddings = self._embed_boxes(boxes)
172
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
173
+
174
+ if masks is not None:
175
+ dense_embeddings = self._embed_masks(masks)
176
+ else:
177
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1, 1).expand(
178
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1], self.image_embedding_size[2]
179
+ )
180
+
181
+ return sparse_embeddings, dense_embeddings
182
+
183
+
184
+ class PositionEmbeddingRandom3D(nn.Module):
185
+ """
186
+ Positional encoding using random spatial frequencies.
187
+ """
188
+
189
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
190
+ super().__init__()
191
+ if scale is None or scale <= 0.0:
192
+ scale = 1.0
193
+ self.register_buffer(
194
+ "positional_encoding_gaussian_matrix",
195
+ scale * torch.randn((3, num_pos_feats)),
196
+ )
197
+
198
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
199
+ """Positionally encode points that are normalized to [0,1]."""
200
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
201
+ coords = 2 * coords - 1
202
+ coords = coords @ self.positional_encoding_gaussian_matrix
203
+ coords = 2 * np.pi * coords
204
+ # outputs d_1 x ... x d_n x C shape
205
+ return torch.cat([torch.sin(coords), torch.cos(coords), torch.sin(coords)], dim=-1)
206
+
207
+ def forward(self, size: Tuple[int, int, int]) -> torch.Tensor:
208
+ """Generate positional encoding for a grid of the specified size."""
209
+ x, y, z = size
210
+ device: Any = self.positional_encoding_gaussian_matrix.device
211
+ grid = torch.ones((x, y, z), device=device, dtype=torch.float32)
212
+ y_embed = grid.cumsum(dim=0) - 0.5
213
+ x_embed = grid.cumsum(dim=1) - 0.5
214
+ z_embed = grid.cumsum(dim=2) - 0.5
215
+ y_embed = y_embed / y
216
+ x_embed = x_embed / x
217
+ z_embed = z_embed / z
218
+
219
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed, z_embed], dim=-1))
220
+ return pe.permute(3, 0, 1, 2) # C x X x Y x Z
221
+
222
+ def forward_with_coords(
223
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int, int]
224
+ ) -> torch.Tensor:
225
+ """Positionally encode points that are not normalized to [0,1]."""
226
+ coords = coords_input.clone()
227
+ coords[:, :, 0] = coords[:, :, 0] / image_size[0]
228
+ coords[:, :, 1] = coords[:, :, 1] / image_size[1]
229
+ coords[:, :, 2] = coords[:, :, 2] / image_size[2]
230
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
segment_anything/modeling/sam.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import Any, Dict, List, Tuple
12
+
13
+ from .image_encoder import ImageEncoderViT
14
+ from .mask_decoder import MaskDecoder
15
+ from .prompt_encoder import PromptEncoder
16
+
17
+
18
+ class Sam(nn.Module):
19
+ mask_threshold: float = 0.0
20
+ image_format: str = "RGB"
21
+
22
+ def __init__(
23
+ self,
24
+ image_encoder: ImageEncoderViT,
25
+ prompt_encoder: PromptEncoder,
26
+ mask_decoder: MaskDecoder,
27
+ pixel_mean: List[float] = [123.675, 116.28, 103.53],
28
+ pixel_std: List[float] = [58.395, 57.12, 57.375],
29
+ ) -> None:
30
+ """
31
+ SAM predicts object masks from an image and input prompts.
32
+
33
+ Arguments:
34
+ image_encoder (ImageEncoderViT): The backbone used to encode the
35
+ image into image embeddings that allow for efficient mask prediction.
36
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
37
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
38
+ and encoded prompts.
39
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
40
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
41
+ """
42
+ super().__init__()
43
+ self.image_encoder = image_encoder
44
+ self.prompt_encoder = prompt_encoder
45
+ self.mask_decoder = mask_decoder
46
+ self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
47
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
48
+
49
+ @property
50
+ def device(self) -> Any:
51
+ return self.pixel_mean.device
52
+
53
+ @torch.no_grad()
54
+ def forward(
55
+ self,
56
+ batched_input: List[Dict[str, Any]],
57
+ multimask_output: bool,
58
+ ) -> List[Dict[str, torch.Tensor]]:
59
+ """
60
+ Predicts masks end-to-end from provided images and prompts.
61
+ If prompts are not known in advance, using SamPredictor is
62
+ recommended over calling the model directly.
63
+
64
+ Arguments:
65
+ batched_input (list(dict)): A list over input images, each a
66
+ dictionary with the following keys. A prompt key can be
67
+ excluded if it is not present.
68
+ 'image': The image as a torch tensor in 3xHxW format,
69
+ already transformed for input to the model.
70
+ 'original_size': (tuple(int, int)) The original size of
71
+ the image before transformation, as (H, W).
72
+ 'point_coords': (torch.Tensor) Batched point prompts for
73
+ this image, with shape BxNx2. Already transformed to the
74
+ input frame of the model.
75
+ 'point_labels': (torch.Tensor) Batched labels for point prompts,
76
+ with shape BxN.
77
+ 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
78
+ Already transformed to the input frame of the model.
79
+ 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
80
+ in the form Bx1xHxW.
81
+ multimask_output (bool): Whether the model should predict multiple
82
+ disambiguating masks, or return a single mask.
83
+
84
+ Returns:
85
+ (list(dict)): A list over input images, where each element is
86
+ as dictionary with the following keys.
87
+ 'masks': (torch.Tensor) Batched binary mask predictions,
88
+ with shape BxCxHxW, where B is the number of input prompts,
89
+ C is determined by multimask_output, and (H, W) is the
90
+ original size of the image.
91
+ 'iou_predictions': (torch.Tensor) The model's predictions
92
+ of mask quality, in shape BxC.
93
+ 'low_res_logits': (torch.Tensor) Low resolution logits with
94
+ shape BxCxHxW, where H=W=256. Can be passed as mask input
95
+ to subsequent iterations of prediction.
96
+ """
97
+
98
+ input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
99
+ image_embeddings = self.image_encoder(input_images)
100
+
101
+ outputs = []
102
+ for image_record, curr_embedding in zip(batched_input, image_embeddings):
103
+ if "point_coords" in image_record:
104
+ points = (image_record["point_coords"], image_record["point_labels"])
105
+ else:
106
+ points = None
107
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
108
+ points=points,
109
+ boxes=image_record.get("boxes", None),
110
+ masks=image_record.get("mask_inputs", None),
111
+ )
112
+ low_res_masks, iou_predictions = self.mask_decoder(
113
+ image_embeddings=curr_embedding.unsqueeze(0),
114
+ image_pe=self.prompt_encoder.get_dense_pe(),
115
+ sparse_prompt_embeddings=sparse_embeddings,
116
+ dense_prompt_embeddings=dense_embeddings,
117
+ multimask_output=multimask_output,
118
+ )
119
+ masks = self.postprocess_masks(
120
+ low_res_masks,
121
+ input_size=image_record["image"].shape[-2:],
122
+ original_size=image_record["original_size"],
123
+ )
124
+ masks = masks > self.mask_threshold
125
+ outputs.append(
126
+ {
127
+ "masks": masks,
128
+ "iou_predictions": iou_predictions,
129
+ "low_res_logits": low_res_masks,
130
+ }
131
+ )
132
+ return outputs
133
+
134
+ def postprocess_masks(
135
+ self,
136
+ masks: torch.Tensor,
137
+ input_size: Tuple[int, ...],
138
+ original_size: Tuple[int, ...],
139
+ ) -> torch.Tensor:
140
+ """
141
+ Remove padding and upscale masks to the original image size.
142
+
143
+ Arguments:
144
+ masks (torch.Tensor): Batched masks from the mask_decoder,
145
+ in BxCxHxW format.
146
+ input_size (tuple(int, int)): The size of the image input to the
147
+ model, in (H, W) format. Used to remove padding.
148
+ original_size (tuple(int, int)): The original size of the image
149
+ before resizing for input to the model, in (H, W) format.
150
+
151
+ Returns:
152
+ (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
153
+ is given by original_size.
154
+ """
155
+ masks = F.interpolate(
156
+ masks,
157
+ (self.image_encoder.img_size, self.image_encoder.img_size),
158
+ mode="bilinear",
159
+ align_corners=False,
160
+ )
161
+ masks = masks[..., : input_size[0], : input_size[1]]
162
+ masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
163
+ return masks
164
+
165
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
166
+ """Normalize pixel values and pad to a square input."""
167
+ # Normalize colors
168
+ x = (x - self.pixel_mean) / self.pixel_std
169
+ # Pad
170
+ h, w = x.shape[-2:]
171
+ padh = self.image_encoder.img_size - h
172
+ padw = self.image_encoder.img_size - w
173
+ x = F.pad(x, (0, padw, 0, padh))
174
+ return x
segment_anything/modeling/sam3D.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import Any, Dict, List, Tuple
12
+
13
+ from .image_encoder3D import ImageEncoderViT3D
14
+ from .mask_decoder3D import MaskDecoder3D
15
+ from .prompt_encoder3D import PromptEncoder3D
16
+
17
+
18
+ class Sam3D(nn.Module):
19
+ mask_threshold: float = 0.0
20
+ image_format: str = "L"
21
+
22
+ def __init__(
23
+ self,
24
+ image_encoder: ImageEncoderViT3D,
25
+ prompt_encoder: PromptEncoder3D,
26
+ mask_decoder: MaskDecoder3D,
27
+ pixel_mean: List[float] = [123.675],
28
+ pixel_std: List[float] = [58.395],
29
+ ) -> None:
30
+ """
31
+ SAM predicts object masks from an image and input prompts.
32
+
33
+ Arguments:
34
+ image_encoder (ImageEncoderViT): The backbone used to encode the
35
+ image into image embeddings that allow for efficient mask prediction.
36
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
37
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
38
+ and encoded prompts.
39
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
40
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
41
+ """
42
+ super().__init__()
43
+ self.image_encoder = image_encoder
44
+ self.prompt_encoder = prompt_encoder
45
+ self.mask_decoder = mask_decoder
46
+ self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
47
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
48
+
49
+ @property
50
+ def device(self) -> Any:
51
+ return self.pixel_mean.device
52
+
53
+ @torch.no_grad()
54
+ def forward(
55
+ self,
56
+ batched_input: List[Dict[str, Any]],
57
+ multimask_output: bool,
58
+ ) -> List[Dict[str, torch.Tensor]]:
59
+ """
60
+ Predicts masks end-to-end from provided images and prompts.
61
+ If prompts are not known in advance, using SamPredictor is
62
+ recommended over calling the model directly.
63
+
64
+ Arguments:
65
+ batched_input (list(dict)): A list over input images, each a
66
+ dictionary with the following keys. A prompt key can be
67
+ excluded if it is not present.
68
+ 'image': The image as a torch tensor in 3xHxW format,
69
+ already transformed for input to the model.
70
+ 'original_size': (tuple(int, int)) The original size of
71
+ the image before transformation, as (H, W).
72
+ 'point_coords': (torch.Tensor) Batched point prompts for
73
+ this image, with shape BxNx2. Already transformed to the
74
+ input frame of the model.
75
+ 'point_labels': (torch.Tensor) Batched labels for point prompts,
76
+ with shape BxN.
77
+ 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
78
+ Already transformed to the input frame of the model.
79
+ 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
80
+ in the form Bx1xHxW.
81
+ multimask_output (bool): Whether the model should predict multiple
82
+ disambiguating masks, or return a single mask.
83
+
84
+ Returns:
85
+ (list(dict)): A list over input images, where each element is
86
+ as dictionary with the following keys.
87
+ 'masks': (torch.Tensor) Batched binary mask predictions,
88
+ with shape BxCxHxW, where B is the number of input prompts,
89
+ C is determined by multimask_output, and (H, W) is the
90
+ original size of the image.
91
+ 'iou_predictions': (torch.Tensor) The model's predictions
92
+ of mask quality, in shape BxC.
93
+ 'low_res_logits': (torch.Tensor) Low resolution logits with
94
+ shape BxCxHxW, where H=W=256. Can be passed as mask input
95
+ to subsequent iterations of prediction.
96
+ """
97
+ input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
98
+ image_embeddings = self.image_encoder(input_images)
99
+
100
+ outputs = []
101
+ for image_record, curr_embedding in zip(batched_input, image_embeddings):
102
+ if "point_coords" in image_record:
103
+ points = (image_record["point_coords"], image_record["point_labels"])
104
+ else:
105
+ points = None
106
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
107
+ points=points,
108
+ boxes=image_record.get("boxes", None),
109
+ masks=image_record.get("mask_inputs", None),
110
+ )
111
+ low_res_masks, iou_predictions = self.mask_decoder(
112
+ image_embeddings=curr_embedding.unsqueeze(0),
113
+ image_pe=self.prompt_encoder.get_dense_pe(),
114
+ sparse_prompt_embeddings=sparse_embeddings,
115
+ dense_prompt_embeddings=dense_embeddings,
116
+ multimask_output=multimask_output,
117
+ )
118
+ masks = self.postprocess_masks(
119
+ low_res_masks,
120
+ input_size=image_record["image"].shape[-3:],
121
+ original_size=image_record["original_size"],
122
+ )
123
+ masks = masks > self.mask_threshold
124
+ outputs.append(
125
+ {
126
+ "masks": masks,
127
+ "iou_predictions": iou_predictions,
128
+ "low_res_logits": low_res_masks,
129
+ }
130
+ )
131
+ return outputs
132
+
133
+ def postprocess_masks(
134
+ self,
135
+ masks: torch.Tensor,
136
+ input_size: Tuple[int, ...],
137
+ original_size: Tuple[int, ...],
138
+ ) -> torch.Tensor:
139
+ """
140
+ Remove padding and upscale masks to the original image size.
141
+
142
+ Arguments:
143
+ masks (torch.Tensor): Batched masks from the mask_decoder,
144
+ in BxCxHxW format.
145
+ input_size (tuple(int, int)): The size of the image input to the
146
+ model, in (H, W) format. Used to remove padding.
147
+ original_size (tuple(int, int)): The original size of the image
148
+ before resizing for input to the model, in (H, W) format.
149
+
150
+ Returns:
151
+ (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
152
+ is given by original_size.
153
+ """
154
+ masks = F.interpolate(
155
+ masks,
156
+ (self.image_encoder.img_size, self.image_encoder.img_size, self.image_encoder.img_size),
157
+ mode="bilinear",
158
+ align_corners=False,
159
+ )
160
+ masks = masks[..., : input_size[0], : input_size[1], : input_size[2]]
161
+ masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
162
+ return masks
163
+
164
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
165
+ """Normalize pixel values and pad to a square input."""
166
+ # Normalize colors
167
+ x = (x - self.pixel_mean) / self.pixel_std
168
+
169
+ # Pad
170
+ d, h, w = x.shape[-3:]
171
+ padd = self.image_encoder.img_size - d
172
+ padh = self.image_encoder.img_size - h
173
+ padw = self.image_encoder.img_size - w
174
+ x = F.pad(x, (0, padw, 0, padh, 0, padd))
175
+ return x
176
+
segment_anything/modeling/sam_model.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from typing import Any, Dict, List, Tuple
10
+ from .image_encoder import ImageEncoderViT
11
+ from .mask_decoder import MaskDecoder
12
+ from .prompt_encoder import PromptEncoder
13
+
14
+
15
+ class Sam(nn.Module):
16
+ mask_threshold: float = 0.0
17
+ image_format: str = "RGB"
18
+
19
+ def __init__(
20
+ self,
21
+ image_encoder: ImageEncoderViT,
22
+ prompt_encoder: PromptEncoder,
23
+ mask_decoder: MaskDecoder,
24
+ pixel_mean: List[float] = [123.675, 116.28, 103.53],
25
+ pixel_std: List[float] = [58.395, 57.12, 57.375],
26
+ ) -> None:
27
+ """
28
+ SAM predicts object masks from an image and input prompts.
29
+
30
+ Arguments:
31
+ image_encoder (ImageEncoderViT): The backbone used to encode the
32
+ image into image embeddings that allow for efficient mask prediction.
33
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
34
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
35
+ and encoded prompts.
36
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
37
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
38
+ """
39
+ super().__init__()
40
+ self.image_encoder = image_encoder
41
+ self.prompt_encoder = prompt_encoder
42
+ self.mask_decoder = mask_decoder
43
+ self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
44
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
45
+
46
+ @property
47
+ def device(self) -> Any:
48
+ return self.pixel_mean.device
49
+
50
+ def forward(self, batched_input: Dict[str, Any], multimask_output: bool) -> List[Dict[str, torch.Tensor]]:
51
+
52
+ input_images = batched_input.get("image")
53
+ image_embeddings = self.image_encoder(input_images)
54
+
55
+ if "point_coords" in batched_input and batched_input["point_coords"] != None:
56
+ points = (batched_input["point_coords"], batched_input["point_labels"])
57
+ else:
58
+ points = None
59
+
60
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
61
+ points=points,
62
+ boxes=batched_input.get("boxes", None),
63
+ masks=batched_input.get("mask_inputs", None),
64
+ ) # sparse_embeddings:[2, 3, 256], dense_embeddings:[2, 256, 64, 64]
65
+
66
+ low_res_masks, iou_predictions = self.mask_decoder(
67
+ image_embeddings=image_embeddings,
68
+ image_pe=self.prompt_encoder.get_dense_pe(), # 1x(256)x(64)x(64)
69
+ sparse_prompt_embeddings=sparse_embeddings,
70
+ dense_prompt_embeddings=dense_embeddings,
71
+ multimask_output=multimask_output,
72
+ )
73
+
74
+ masks = self.postprocess_masks(
75
+ low_res_masks,
76
+ input_size=batched_input["image"].shape[-2:],
77
+ original_size=batched_input["original_size"],
78
+ )
79
+
80
+ outputs = {
81
+ "masks": masks,
82
+ "iou_predictions": iou_predictions,
83
+ "low_res_logits": low_res_masks,
84
+ }
85
+
86
+ return outputs
87
+
88
+ def postprocess_masks(self,masks: torch.Tensor, input_size: Tuple[int, ...],original_size: Tuple[int, ...],) -> torch.Tensor:
89
+ masks = F.interpolate(
90
+ masks,
91
+ (self.image_encoder.img_size, self.image_encoder.img_size), mode="bilinear", align_corners=False,) #[1,1024,1024]
92
+
93
+ masks = masks[..., : input_size[0], : input_size[1]]
94
+ masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
95
+ return masks
96
+
97
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
98
+ """Normalize pixel values and pad to a square input."""
99
+ # Normalize colors
100
+ x = (x - self.pixel_mean) / self.pixel_std
101
+ # Pad
102
+ h, w = x.shape[-2:]
103
+ padh = self.image_encoder.img_size - h
104
+ padw = self.image_encoder.img_size - w
105
+ x = F.pad(x, (0, padw, 0, padh))
106
+ return x
segment_anything/modeling/transformer.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ from torch import Tensor, nn
9
+
10
+ import math
11
+ from typing import Tuple, Type
12
+
13
+ from .common import MLPBlock
14
+
15
+
16
+ class TwoWayTransformer(nn.Module):
17
+ def __init__(
18
+ self,
19
+ depth: int,
20
+ embedding_dim: int,
21
+ num_heads: int,
22
+ mlp_dim: int,
23
+ activation: Type[nn.Module] = nn.ReLU,
24
+ attention_downsample_rate: int = 2,
25
+ ) -> None:
26
+ """
27
+ A transformer decoder that attends to an input image using
28
+ queries whose positional embedding is supplied.
29
+
30
+ Args:
31
+ depth (int): number of layers in the transformer
32
+ embedding_dim (int): the channel dimension for the input embeddings
33
+ num_heads (int): the number of heads for multihead attention. Must
34
+ divide embedding_dim
35
+ mlp_dim (int): the channel dimension internal to the MLP block
36
+ activation (nn.Module): the activation to use in the MLP block
37
+ """
38
+ super().__init__()
39
+ self.depth = depth
40
+ self.embedding_dim = embedding_dim
41
+ self.num_heads = num_heads
42
+ self.mlp_dim = mlp_dim
43
+ self.layers = nn.ModuleList()
44
+
45
+ for i in range(depth):
46
+ self.layers.append(
47
+ TwoWayAttentionBlock(
48
+ embedding_dim=embedding_dim,
49
+ num_heads=num_heads,
50
+ mlp_dim=mlp_dim,
51
+ activation=activation,
52
+ attention_downsample_rate=attention_downsample_rate,
53
+ skip_first_layer_pe=(i == 0),
54
+ )
55
+ )
56
+
57
+ self.final_attn_token_to_image = Attention(
58
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
59
+ )
60
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
61
+
62
+ def forward(
63
+ self,
64
+ image_embedding: Tensor,
65
+ image_pe: Tensor,
66
+ point_embedding: Tensor,
67
+ ) -> Tuple[Tensor, Tensor]:
68
+ """
69
+ Args:
70
+ image_embedding (torch.Tensor): image to attend to. Should be shape
71
+ B x embedding_dim x h x w for any h and w.
72
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
73
+ have the same shape as image_embedding.
74
+ point_embedding (torch.Tensor): the embedding to add to the query points.
75
+ Must have shape B x N_points x embedding_dim for any N_points.
76
+
77
+ Returns:
78
+ torch.Tensor: the processed point_embedding
79
+ torch.Tensor: the processed image_embedding
80
+ """
81
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
82
+ bs, c, h, w = image_embedding.shape
83
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
84
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
85
+
86
+ # Prepare queries
87
+ queries = point_embedding
88
+ keys = image_embedding
89
+
90
+ # Apply transformer blocks and final layernorm
91
+ for layer in self.layers:
92
+ queries, keys = layer(
93
+ queries=queries,
94
+ keys=keys,
95
+ query_pe=point_embedding,
96
+ key_pe=image_pe,
97
+ )
98
+
99
+ # Apply the final attention layer from the points to the image
100
+ q = queries + point_embedding
101
+ k = keys + image_pe
102
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
103
+ queries = queries + attn_out
104
+ queries = self.norm_final_attn(queries)
105
+
106
+ return queries, keys
107
+
108
+
109
+ class TwoWayAttentionBlock(nn.Module):
110
+ def __init__(
111
+ self,
112
+ embedding_dim: int,
113
+ num_heads: int,
114
+ mlp_dim: int = 2048,
115
+ activation: Type[nn.Module] = nn.ReLU,
116
+ attention_downsample_rate: int = 2,
117
+ skip_first_layer_pe: bool = False,
118
+ ) -> None:
119
+ """
120
+ A transformer block with four layers: (1) self-attention of sparse
121
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
122
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
123
+ inputs.
124
+
125
+ Arguments:
126
+ embedding_dim (int): the channel dimension of the embeddings
127
+ num_heads (int): the number of heads in the attention layers
128
+ mlp_dim (int): the hidden dimension of the mlp block
129
+ activation (nn.Module): the activation of the mlp block
130
+ skip_first_layer_pe (bool): skip the PE on the first layer
131
+ """
132
+ super().__init__()
133
+ self.self_attn = Attention(embedding_dim, num_heads)
134
+ self.norm1 = nn.LayerNorm(embedding_dim)
135
+
136
+ self.cross_attn_token_to_image = Attention(
137
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
138
+ )
139
+ self.norm2 = nn.LayerNorm(embedding_dim)
140
+
141
+ self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
142
+ self.norm3 = nn.LayerNorm(embedding_dim)
143
+
144
+ self.norm4 = nn.LayerNorm(embedding_dim)
145
+ self.cross_attn_image_to_token = Attention(
146
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
147
+ )
148
+
149
+ self.skip_first_layer_pe = skip_first_layer_pe
150
+
151
+ def forward(
152
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
153
+ ) -> Tuple[Tensor, Tensor]:
154
+ # Self attention block
155
+ if self.skip_first_layer_pe:
156
+ queries = self.self_attn(q=queries, k=queries, v=queries)
157
+ else:
158
+ q = queries + query_pe
159
+ attn_out = self.self_attn(q=q, k=q, v=queries)
160
+ queries = queries + attn_out
161
+ queries = self.norm1(queries)
162
+
163
+ # Cross attention block, tokens attending to image embedding
164
+ q = queries + query_pe
165
+ k = keys + key_pe
166
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
167
+ queries = queries + attn_out
168
+ queries = self.norm2(queries)
169
+
170
+ # MLP block
171
+ mlp_out = self.mlp(queries)
172
+ queries = queries + mlp_out
173
+ queries = self.norm3(queries)
174
+
175
+ # Cross attention block, image embedding attending to tokens
176
+ q = queries + query_pe
177
+ k = keys + key_pe
178
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
179
+ keys = keys + attn_out
180
+ keys = self.norm4(keys)
181
+
182
+ return queries, keys
183
+
184
+
185
+ class Attention(nn.Module):
186
+ """
187
+ An attention layer that allows for downscaling the size of the embedding
188
+ after projection to queries, keys, and values.
189
+ """
190
+
191
+ def __init__(
192
+ self,
193
+ embedding_dim: int,
194
+ num_heads: int,
195
+ downsample_rate: int = 1,
196
+ ) -> None:
197
+ super().__init__()
198
+ self.embedding_dim = embedding_dim
199
+ self.internal_dim = embedding_dim // downsample_rate
200
+ self.num_heads = num_heads
201
+ assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
202
+
203
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
204
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
205
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
206
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
207
+
208
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
209
+ b, n, c = x.shape
210
+ x = x.reshape(b, n, num_heads, c // num_heads)
211
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
212
+
213
+ def _recombine_heads(self, x: Tensor) -> Tensor:
214
+ b, n_heads, n_tokens, c_per_head = x.shape
215
+ x = x.transpose(1, 2)
216
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
217
+
218
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
219
+ # Input projections
220
+ q = self.q_proj(q.to(self.q_proj.weight.dtype)) #todo
221
+ k = self.k_proj(k.to(self.k_proj.weight.dtype)) #todo
222
+ v = self.v_proj(v.to(self.v_proj.weight.dtype)) #todo
223
+
224
+ # q = self.q_proj(q)
225
+ # k = self.k_proj(k)
226
+ # v = self.v_proj(v)
227
+
228
+ # Separate into heads
229
+ q = self._separate_heads(q, self.num_heads)
230
+ k = self._separate_heads(k, self.num_heads)
231
+ v = self._separate_heads(v, self.num_heads)
232
+
233
+ # Attention
234
+ _, _, _, c_per_head = q.shape
235
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
236
+ attn = attn / math.sqrt(c_per_head)
237
+ attn = torch.softmax(attn, dim=-1)
238
+
239
+ # Get output
240
+ out = attn @ v
241
+ out = self._recombine_heads(out)
242
+ out = self.out_proj(out)
243
+
244
+ return out
segment_anything/predictor.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ from segment_anything.modeling import Sam
11
+
12
+ from typing import Optional, Tuple
13
+
14
+ from .utils.transforms import ResizeLongestSide
15
+
16
+
17
+ class SamPredictor:
18
+ def __init__(
19
+ self,
20
+ sam_model: Sam,
21
+ ) -> None:
22
+ """
23
+ Uses SAM to calculate the image embedding for an image, and then
24
+ allow repeated, efficient mask prediction given prompts.
25
+
26
+ Arguments:
27
+ sam_model (Sam): The model to use for mask prediction.
28
+ """
29
+ super().__init__()
30
+ self.model = sam_model
31
+ self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
32
+ self.reset_image()
33
+
34
+ def set_image(
35
+ self,
36
+ image: np.ndarray,
37
+ image_format: str = "RGB",
38
+ ) -> None:
39
+ """
40
+ Calculates the image embeddings for the provided image, allowing
41
+ masks to be predicted with the 'predict' method.
42
+
43
+ Arguments:
44
+ image (np.ndarray): The image for calculating masks. Expects an
45
+ image in HWC uint8 format, with pixel values in [0, 255].
46
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
47
+ """
48
+ assert image_format in [
49
+ "RGB",
50
+ "BGR",
51
+ ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
52
+ if image_format != self.model.image_format:
53
+ image = image[..., ::-1]
54
+
55
+ # Transform the image to the form expected by the model
56
+ input_image = self.transform.apply_image(image)
57
+ input_image_torch = torch.as_tensor(input_image, device=self.device)
58
+ input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
59
+
60
+ self.set_torch_image(input_image_torch, image.shape[:2])
61
+
62
+ @torch.no_grad()
63
+ def set_torch_image(
64
+ self,
65
+ transformed_image: torch.Tensor,
66
+ original_image_size: Tuple[int, ...],
67
+ ) -> None:
68
+ """
69
+ Calculates the image embeddings for the provided image, allowing
70
+ masks to be predicted with the 'predict' method. Expects the input
71
+ image to be already transformed to the format expected by the model.
72
+
73
+ Arguments:
74
+ transformed_image (torch.Tensor): The input image, with shape
75
+ 1x3xHxW, which has been transformed with ResizeLongestSide.
76
+ original_image_size (tuple(int, int)): The size of the image
77
+ before transformation, in (H, W) format.
78
+ """
79
+ assert (
80
+ len(transformed_image.shape) == 4
81
+ and transformed_image.shape[1] == 3
82
+ and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
83
+ ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
84
+ self.reset_image()
85
+
86
+ self.original_size = original_image_size
87
+ self.input_size = tuple(transformed_image.shape[-2:])
88
+ input_image = self.model.preprocess(transformed_image)
89
+ self.features = self.model.image_encoder(input_image)
90
+ self.is_image_set = True
91
+
92
+ def predict(
93
+ self,
94
+ point_coords: Optional[np.ndarray] = None,
95
+ point_labels: Optional[np.ndarray] = None,
96
+ box: Optional[np.ndarray] = None,
97
+ mask_input: Optional[np.ndarray] = None,
98
+ multimask_output: bool = True,
99
+ return_logits: bool = False,
100
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
101
+ """
102
+ Predict masks for the given input prompts, using the currently set image.
103
+
104
+ Arguments:
105
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
106
+ model. Each point is in (X,Y) in pixels.
107
+ point_labels (np.ndarray or None): A length N array of labels for the
108
+ point prompts. 1 indicates a foreground point and 0 indicates a
109
+ background point.
110
+ box (np.ndarray or None): A length 4 array given a box prompt to the
111
+ model, in XYXY format.
112
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
113
+ coming from a previous prediction iteration. Has form 1xHxW, where
114
+ for SAM, H=W=256.
115
+ multimask_output (bool): If true, the model will return three masks.
116
+ For ambiguous input prompts (such as a single click), this will often
117
+ produce better masks than a single prediction. If only a single
118
+ mask is needed, the model's predicted quality score can be used
119
+ to select the best mask. For non-ambiguous prompts, such as multiple
120
+ input prompts, multimask_output=False can give better results.
121
+ return_logits (bool): If true, returns un-thresholded masks logits
122
+ instead of a binary mask.
123
+
124
+ Returns:
125
+ (np.ndarray): The output masks in CxHxW format, where C is the
126
+ number of masks, and (H, W) is the original image size.
127
+ (np.ndarray): An array of length C containing the model's
128
+ predictions for the quality of each mask.
129
+ (np.ndarray): An array of shape CxHxW, where C is the number
130
+ of masks and H=W=256. These low resolution logits can be passed to
131
+ a subsequent iteration as mask input.
132
+ """
133
+ if not self.is_image_set:
134
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
135
+
136
+ # Transform input prompts
137
+ coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
138
+ if point_coords is not None:
139
+ assert (
140
+ point_labels is not None
141
+ ), "point_labels must be supplied if point_coords is supplied."
142
+
143
+ point_coords = self.transform.apply_coords(point_coords, self.original_size)
144
+ coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
145
+ labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
146
+ coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
147
+
148
+ if box is not None:
149
+ box = self.transform.apply_boxes(box, self.original_size)
150
+ box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
151
+ box_torch = box_torch[None, :]
152
+ if mask_input is not None:
153
+ mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
154
+ mask_input_torch = mask_input_torch[None, :, :, :]
155
+
156
+ masks, iou_predictions, low_res_masks = self.predict_torch(
157
+ coords_torch,
158
+ labels_torch,
159
+ box_torch,
160
+ mask_input_torch,
161
+ multimask_output,
162
+ return_logits=return_logits,
163
+ )
164
+
165
+ masks = masks[0].detach().cpu().numpy()
166
+ iou_predictions = iou_predictions[0].detach().cpu().numpy()
167
+ low_res_masks = low_res_masks[0].detach().cpu().numpy()
168
+ return masks, iou_predictions, low_res_masks
169
+
170
+ @torch.no_grad()
171
+ def predict_torch(
172
+ self,
173
+ point_coords: Optional[torch.Tensor],
174
+ point_labels: Optional[torch.Tensor],
175
+ boxes: Optional[torch.Tensor] = None,
176
+ mask_input: Optional[torch.Tensor] = None,
177
+ multimask_output: bool = True,
178
+ return_logits: bool = False,
179
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
180
+ """
181
+ Predict masks for the given input prompts, using the currently set image.
182
+ Input prompts are batched torch tensors and are expected to already be
183
+ transformed to the input frame using ResizeLongestSide.
184
+
185
+ Arguments:
186
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
187
+ model. Each point is in (X,Y) in pixels.
188
+ point_labels (torch.Tensor or None): A BxN array of labels for the
189
+ point prompts. 1 indicates a foreground point and 0 indicates a
190
+ background point.
191
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
192
+ model, in XYXY format.
193
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
194
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
195
+ for SAM, H=W=256. Masks returned by a previous iteration of the
196
+ predict method do not need further transformation.
197
+ multimask_output (bool): If true, the model will return three masks.
198
+ For ambiguous input prompts (such as a single click), this will often
199
+ produce better masks than a single prediction. If only a single
200
+ mask is needed, the model's predicted quality score can be used
201
+ to select the best mask. For non-ambiguous prompts, such as multiple
202
+ input prompts, multimask_output=False can give better results.
203
+ return_logits (bool): If true, returns un-thresholded masks logits
204
+ instead of a binary mask.
205
+
206
+ Returns:
207
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
208
+ number of masks, and (H, W) is the original image size.
209
+ (torch.Tensor): An array of shape BxC containing the model's
210
+ predictions for the quality of each mask.
211
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
212
+ of masks and H=W=256. These low res logits can be passed to
213
+ a subsequent iteration as mask input.
214
+ """
215
+ if not self.is_image_set:
216
+ raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
217
+
218
+ if point_coords is not None:
219
+ points = (point_coords, point_labels)
220
+ else:
221
+ points = None
222
+
223
+ # Embed prompts
224
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
225
+ points=points,
226
+ boxes=boxes,
227
+ masks=mask_input,
228
+ )
229
+
230
+ # Predict masks
231
+ low_res_masks, iou_predictions = self.model.mask_decoder(
232
+ image_embeddings=self.features,
233
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
234
+ sparse_prompt_embeddings=sparse_embeddings,
235
+ dense_prompt_embeddings=dense_embeddings,
236
+ multimask_output=multimask_output,
237
+ )
238
+
239
+ # Upscale the masks to the original image resolution
240
+ masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
241
+
242
+ if not return_logits:
243
+ masks = masks > self.model.mask_threshold
244
+
245
+ return masks, iou_predictions, low_res_masks
246
+
247
+ def get_image_embedding(self) -> torch.Tensor:
248
+ """
249
+ Returns the image embeddings for the currently set image, with
250
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
251
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
252
+ """
253
+ if not self.is_image_set:
254
+ raise RuntimeError(
255
+ "An image must be set with .set_image(...) to generate an embedding."
256
+ )
257
+ assert self.features is not None, "Features must exist if an image has been set."
258
+ return self.features
259
+
260
+ @property
261
+ def device(self) -> torch.device:
262
+ return self.model.device
263
+
264
+ def reset_image(self) -> None:
265
+ """Resets the currently set image."""
266
+ self.is_image_set = False
267
+ self.features = None
268
+ self.orig_h = None
269
+ self.orig_w = None
270
+ self.input_h = None
271
+ self.input_w = None
segment_anything/utils/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .transforms3D import *
segment_anything/utils/amg.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ import math
11
+ from copy import deepcopy
12
+ from itertools import product
13
+ from typing import Any, Dict, Generator, ItemsView, List, Tuple
14
+
15
+
16
+ class MaskData:
17
+ """
18
+ A structure for storing masks and their related data in batched format.
19
+ Implements basic filtering and concatenation.
20
+ """
21
+
22
+ def __init__(self, **kwargs) -> None:
23
+ for v in kwargs.values():
24
+ assert isinstance(
25
+ v, (list, np.ndarray, torch.Tensor)
26
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
27
+ self._stats = dict(**kwargs)
28
+
29
+ def __setitem__(self, key: str, item: Any) -> None:
30
+ assert isinstance(
31
+ item, (list, np.ndarray, torch.Tensor)
32
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
33
+ self._stats[key] = item
34
+
35
+ def __delitem__(self, key: str) -> None:
36
+ del self._stats[key]
37
+
38
+ def __getitem__(self, key: str) -> Any:
39
+ return self._stats[key]
40
+
41
+ def items(self) -> ItemsView[str, Any]:
42
+ return self._stats.items()
43
+
44
+ def filter(self, keep: torch.Tensor) -> None:
45
+ for k, v in self._stats.items():
46
+ if v is None:
47
+ self._stats[k] = None
48
+ elif isinstance(v, torch.Tensor):
49
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
50
+ elif isinstance(v, np.ndarray):
51
+ self._stats[k] = v[keep.detach().cpu().numpy()]
52
+ elif isinstance(v, list) and keep.dtype == torch.bool:
53
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
54
+ elif isinstance(v, list):
55
+ self._stats[k] = [v[i] for i in keep]
56
+ else:
57
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
58
+
59
+ def cat(self, new_stats: "MaskData") -> None:
60
+ for k, v in new_stats.items():
61
+ if k not in self._stats or self._stats[k] is None:
62
+ self._stats[k] = deepcopy(v)
63
+ elif isinstance(v, torch.Tensor):
64
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
65
+ elif isinstance(v, np.ndarray):
66
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
67
+ elif isinstance(v, list):
68
+ self._stats[k] = self._stats[k] + deepcopy(v)
69
+ else:
70
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
71
+
72
+ def to_numpy(self) -> None:
73
+ for k, v in self._stats.items():
74
+ if isinstance(v, torch.Tensor):
75
+ self._stats[k] = v.detach().cpu().numpy()
76
+
77
+
78
+ def is_box_near_crop_edge(
79
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
80
+ ) -> torch.Tensor:
81
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
82
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
83
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
84
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
85
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
86
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
87
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
88
+ return torch.any(near_crop_edge, dim=1)
89
+
90
+
91
+ def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
92
+ box_xywh = deepcopy(box_xyxy)
93
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
94
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
95
+ return box_xywh
96
+
97
+
98
+ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
99
+ assert len(args) > 0 and all(
100
+ len(a) == len(args[0]) for a in args
101
+ ), "Batched iteration must have inputs of all the same size."
102
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
103
+ for b in range(n_batches):
104
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
105
+
106
+
107
+ def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
108
+ """
109
+ Encodes masks to an uncompressed RLE, in the format expected by
110
+ pycoco tools.
111
+ """
112
+ # Put in fortran order and flatten h,w
113
+ b, h, w = tensor.shape
114
+ tensor = tensor.permute(0, 2, 1).flatten(1)
115
+
116
+ # Compute change indices
117
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
118
+ change_indices = diff.nonzero()
119
+
120
+ # Encode run length
121
+ out = []
122
+ for i in range(b):
123
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
124
+ cur_idxs = torch.cat(
125
+ [
126
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
127
+ cur_idxs + 1,
128
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
129
+ ]
130
+ )
131
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
132
+ counts = [] if tensor[i, 0] == 0 else [0]
133
+ counts.extend(btw_idxs.detach().cpu().tolist())
134
+ out.append({"size": [h, w], "counts": counts})
135
+ return out
136
+
137
+
138
+ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
139
+ """Compute a binary mask from an uncompressed RLE."""
140
+ h, w = rle["size"]
141
+ mask = np.empty(h * w, dtype=bool)
142
+ idx = 0
143
+ parity = False
144
+ for count in rle["counts"]:
145
+ mask[idx : idx + count] = parity
146
+ idx += count
147
+ parity ^= True
148
+ mask = mask.reshape(w, h)
149
+ return mask.transpose() # Put in C order
150
+
151
+
152
+ def area_from_rle(rle: Dict[str, Any]) -> int:
153
+ return sum(rle["counts"][1::2])
154
+
155
+
156
+ def calculate_stability_score(
157
+ masks: torch.Tensor, mask_threshold: float, threshold_offset: float
158
+ ) -> torch.Tensor:
159
+ """
160
+ Computes the stability score for a batch of masks. The stability
161
+ score is the IoU between the binary masks obtained by thresholding
162
+ the predicted mask logits at high and low values.
163
+ """
164
+ # One mask is always contained inside the other.
165
+ # Save memory by preventing unnecessary cast to torch.int64
166
+ intersections = (
167
+ (masks > (mask_threshold + threshold_offset))
168
+ .sum(-1, dtype=torch.int16)
169
+ .sum(-1, dtype=torch.int32)
170
+ )
171
+ unions = (
172
+ (masks > (mask_threshold - threshold_offset))
173
+ .sum(-1, dtype=torch.int16)
174
+ .sum(-1, dtype=torch.int32)
175
+ )
176
+ return intersections / unions
177
+
178
+
179
+ def build_point_grid(n_per_side: int) -> np.ndarray:
180
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
181
+ offset = 1 / (2 * n_per_side)
182
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
183
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
184
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
185
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
186
+ return points
187
+
188
+
189
+ def build_all_layer_point_grids(
190
+ n_per_side: int, n_layers: int, scale_per_layer: int
191
+ ) -> List[np.ndarray]:
192
+ """Generates point grids for all crop layers."""
193
+ points_by_layer = []
194
+ for i in range(n_layers + 1):
195
+ n_points = int(n_per_side / (scale_per_layer**i))
196
+ points_by_layer.append(build_point_grid(n_points))
197
+ return points_by_layer
198
+
199
+
200
+ def generate_crop_boxes(
201
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
202
+ ) -> Tuple[List[List[int]], List[int]]:
203
+ """
204
+ Generates a list of crop boxes of different sizes. Each layer
205
+ has (2**i)**2 boxes for the ith layer.
206
+ """
207
+ crop_boxes, layer_idxs = [], []
208
+ im_h, im_w = im_size
209
+ short_side = min(im_h, im_w)
210
+
211
+ # Original image
212
+ crop_boxes.append([0, 0, im_w, im_h])
213
+ layer_idxs.append(0)
214
+
215
+ def crop_len(orig_len, n_crops, overlap):
216
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
217
+
218
+ for i_layer in range(n_layers):
219
+ n_crops_per_side = 2 ** (i_layer + 1)
220
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
221
+
222
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
223
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
224
+
225
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
226
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
227
+
228
+ # Crops in XYWH format
229
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
230
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
231
+ crop_boxes.append(box)
232
+ layer_idxs.append(i_layer + 1)
233
+
234
+ return crop_boxes, layer_idxs
235
+
236
+
237
+ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
238
+ x0, y0, _, _ = crop_box
239
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
240
+ # Check if boxes has a channel dimension
241
+ if len(boxes.shape) == 3:
242
+ offset = offset.unsqueeze(1)
243
+ return boxes + offset
244
+
245
+
246
+ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
247
+ x0, y0, _, _ = crop_box
248
+ offset = torch.tensor([[x0, y0]], device=points.device)
249
+ # Check if points has a channel dimension
250
+ if len(points.shape) == 3:
251
+ offset = offset.unsqueeze(1)
252
+ return points + offset
253
+
254
+
255
+ def uncrop_masks(
256
+ masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
257
+ ) -> torch.Tensor:
258
+ x0, y0, x1, y1 = crop_box
259
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
260
+ return masks
261
+ # Coordinate transform masks
262
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
263
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
264
+ return torch.nn.functional.pad(masks, pad, value=0)
265
+
266
+
267
+ def remove_small_regions(
268
+ mask: np.ndarray, area_thresh: float, mode: str
269
+ ) -> Tuple[np.ndarray, bool]:
270
+ """
271
+ Removes small disconnected regions and holes in a mask. Returns the
272
+ mask and an indicator of if the mask has been modified.
273
+ """
274
+ import cv2 # type: ignore
275
+
276
+ assert mode in ["holes", "islands"]
277
+ correct_holes = mode == "holes"
278
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
279
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
280
+ sizes = stats[:, -1][1:] # Row 0 is background label
281
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
282
+ if len(small_regions) == 0:
283
+ return mask, False
284
+ fill_labels = [0] + small_regions
285
+ if not correct_holes:
286
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
287
+ # If every region is below threshold, keep largest
288
+ if len(fill_labels) == 0:
289
+ fill_labels = [int(np.argmax(sizes)) + 1]
290
+ mask = np.isin(regions, fill_labels)
291
+ return mask, True
292
+
293
+
294
+ def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
295
+ from pycocotools import mask as mask_utils # type: ignore
296
+
297
+ h, w = uncompressed_rle["size"]
298
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
299
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
300
+ return rle
301
+
302
+
303
+ def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
304
+ """
305
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
306
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
307
+ """
308
+ # torch.max below raises an error on empty inputs, just skip in this case
309
+ if torch.numel(masks) == 0:
310
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
311
+
312
+ # Normalize shape to CxHxW
313
+ shape = masks.shape
314
+ h, w = shape[-2:]
315
+ if len(shape) > 2:
316
+ masks = masks.flatten(0, -3)
317
+ else:
318
+ masks = masks.unsqueeze(0)
319
+
320
+ # Get top and bottom edges
321
+ in_height, _ = torch.max(masks, dim=-1)
322
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
323
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
324
+ in_height_coords = in_height_coords + h * (~in_height)
325
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
326
+
327
+ # Get left and right edges
328
+ in_width, _ = torch.max(masks, dim=-2)
329
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
330
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
331
+ in_width_coords = in_width_coords + w * (~in_width)
332
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
333
+
334
+ # If the mask is empty the right edge will be to the left of the left edge.
335
+ # Replace these boxes with [0, 0, 0, 0]
336
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
337
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
338
+ out = out * (~empty_filter).unsqueeze(-1)
339
+
340
+ # Return to original shape
341
+ if len(shape) > 2:
342
+ out = out.reshape(*shape[:-2], 4)
343
+ else:
344
+ out = out[0]
345
+
346
+ return out
segment_anything/utils/onnx.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn import functional as F
10
+
11
+ from typing import Tuple
12
+
13
+ from ..modeling import Sam
14
+ from .amg import calculate_stability_score
15
+
16
+
17
+ class SamOnnxModel(nn.Module):
18
+ """
19
+ This model should not be called directly, but is used in ONNX export.
20
+ It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
21
+ with some functions modified to enable model tracing. Also supports extra
22
+ options controlling what information. See the ONNX export script for details.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ model: Sam,
28
+ return_single_mask: bool,
29
+ use_stability_score: bool = False,
30
+ return_extra_metrics: bool = False,
31
+ ) -> None:
32
+ super().__init__()
33
+ self.mask_decoder = model.mask_decoder
34
+ self.model = model
35
+ self.img_size = model.image_encoder.img_size
36
+ self.return_single_mask = return_single_mask
37
+ self.use_stability_score = use_stability_score
38
+ self.stability_score_offset = 1.0
39
+ self.return_extra_metrics = return_extra_metrics
40
+
41
+ @staticmethod
42
+ def resize_longest_image_size(
43
+ input_image_size: torch.Tensor, longest_side: int
44
+ ) -> torch.Tensor:
45
+ input_image_size = input_image_size.to(torch.float32)
46
+ scale = longest_side / torch.max(input_image_size)
47
+ transformed_size = scale * input_image_size
48
+ transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
49
+ return transformed_size
50
+
51
+ def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
52
+ point_coords = point_coords + 0.5
53
+ point_coords = point_coords / self.img_size
54
+ point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
55
+ point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
56
+
57
+ point_embedding = point_embedding * (point_labels != -1)
58
+ point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
59
+ point_labels == -1
60
+ )
61
+
62
+ for i in range(self.model.prompt_encoder.num_point_embeddings):
63
+ point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
64
+ i
65
+ ].weight * (point_labels == i)
66
+
67
+ return point_embedding
68
+
69
+ def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
70
+ mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
71
+ mask_embedding = mask_embedding + (
72
+ 1 - has_mask_input
73
+ ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
74
+ return mask_embedding
75
+
76
+ def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
77
+ masks = F.interpolate(
78
+ masks,
79
+ size=(self.img_size, self.img_size),
80
+ mode="bilinear",
81
+ align_corners=False,
82
+ )
83
+
84
+ prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64)
85
+ masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
86
+
87
+ orig_im_size = orig_im_size.to(torch.int64)
88
+ h, w = orig_im_size[0], orig_im_size[1]
89
+ masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
90
+ return masks
91
+
92
+ def select_masks(
93
+ self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
94
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
95
+ # Determine if we should return the multiclick mask or not from the number of points.
96
+ # The reweighting is used to avoid control flow.
97
+ score_reweight = torch.tensor(
98
+ [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
99
+ ).to(iou_preds.device)
100
+ score = iou_preds + (num_points - 2.5) * score_reweight
101
+ best_idx = torch.argmax(score, dim=1)
102
+ masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
103
+ iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
104
+
105
+ return masks, iou_preds
106
+
107
+ @torch.no_grad()
108
+ def forward(
109
+ self,
110
+ image_embeddings: torch.Tensor,
111
+ point_coords: torch.Tensor,
112
+ point_labels: torch.Tensor,
113
+ mask_input: torch.Tensor,
114
+ has_mask_input: torch.Tensor,
115
+ orig_im_size: torch.Tensor,
116
+ ):
117
+ sparse_embedding = self._embed_points(point_coords, point_labels)
118
+ dense_embedding = self._embed_masks(mask_input, has_mask_input)
119
+
120
+ masks, scores = self.model.mask_decoder.predict_masks(
121
+ image_embeddings=image_embeddings,
122
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
123
+ sparse_prompt_embeddings=sparse_embedding,
124
+ dense_prompt_embeddings=dense_embedding,
125
+ )
126
+
127
+ if self.use_stability_score:
128
+ scores = calculate_stability_score(
129
+ masks, self.model.mask_threshold, self.stability_score_offset
130
+ )
131
+
132
+ if self.return_single_mask:
133
+ masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
134
+
135
+ upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
136
+
137
+ if self.return_extra_metrics:
138
+ stability_scores = calculate_stability_score(
139
+ upscaled_masks, self.model.mask_threshold, self.stability_score_offset
140
+ )
141
+ areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
142
+ return upscaled_masks, scores, stability_scores, areas, masks
143
+
144
+ return upscaled_masks, scores, masks