CgvKodai commited on
Commit
66003a2
·
verified ·
1 Parent(s): c6a03f4

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +90 -35
  2. .gitignore +149 -0
  3. .gradio/certificate.pem +31 -0
  4. CODE_OF_CONDUCT.md +80 -0
  5. CONTRIBUTING.md +31 -0
  6. LICENSE.txt +115 -0
  7. README.md +291 -7
  8. demo_colmap.py +330 -0
  9. demo_gradio.py +691 -0
  10. demo_viser.py +402 -0
  11. docs/package.md +45 -0
  12. examples/kitchen/images/00.png +3 -0
  13. examples/kitchen/images/01.png +3 -0
  14. examples/kitchen/images/02.png +3 -0
  15. examples/kitchen/images/03.png +3 -0
  16. examples/kitchen/images/04.png +3 -0
  17. examples/kitchen/images/05.png +3 -0
  18. examples/kitchen/images/06.png +3 -0
  19. examples/kitchen/images/07.png +3 -0
  20. examples/kitchen/images/08.png +3 -0
  21. examples/kitchen/images/09.png +3 -0
  22. examples/kitchen/images/10.png +3 -0
  23. examples/kitchen/images/11.png +3 -0
  24. examples/kitchen/images/12.png +3 -0
  25. examples/kitchen/images/13.png +3 -0
  26. examples/kitchen/images/14.png +3 -0
  27. examples/kitchen/images/15.png +3 -0
  28. examples/kitchen/images/16.png +3 -0
  29. examples/kitchen/images/17.png +3 -0
  30. examples/kitchen/images/18.png +3 -0
  31. examples/kitchen/images/19.png +3 -0
  32. examples/kitchen/images/20.png +3 -0
  33. examples/kitchen/images/21.png +3 -0
  34. examples/kitchen/images/22.png +3 -0
  35. examples/kitchen/images/23.png +3 -0
  36. examples/kitchen/images/24.png +3 -0
  37. examples/llff_fern/images/000.png +3 -0
  38. examples/llff_fern/images/001.png +3 -0
  39. examples/llff_fern/images/002.png +3 -0
  40. examples/llff_fern/images/003.png +3 -0
  41. examples/llff_fern/images/004.png +3 -0
  42. examples/llff_fern/images/005.png +3 -0
  43. examples/llff_fern/images/006.png +3 -0
  44. examples/llff_fern/images/007.png +3 -0
  45. examples/llff_fern/images/008.png +3 -0
  46. examples/llff_fern/images/009.png +3 -0
  47. examples/llff_fern/images/010.png +3 -0
  48. examples/llff_fern/images/011.png +3 -0
  49. examples/llff_fern/images/012.png +3 -0
  50. examples/llff_fern/images/013.png +3 -0
.gitattributes CHANGED
@@ -1,35 +1,90 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SCM syntax highlighting & preventing 3-way merges
2
+ pixi.lock merge=binary linguist-language=YAML linguist-generated=true
3
+ examples/kitchen/images/00.png filter=lfs diff=lfs merge=lfs -text
4
+ examples/kitchen/images/01.png filter=lfs diff=lfs merge=lfs -text
5
+ examples/kitchen/images/02.png filter=lfs diff=lfs merge=lfs -text
6
+ examples/kitchen/images/03.png filter=lfs diff=lfs merge=lfs -text
7
+ examples/kitchen/images/04.png filter=lfs diff=lfs merge=lfs -text
8
+ examples/kitchen/images/05.png filter=lfs diff=lfs merge=lfs -text
9
+ examples/kitchen/images/06.png filter=lfs diff=lfs merge=lfs -text
10
+ examples/kitchen/images/07.png filter=lfs diff=lfs merge=lfs -text
11
+ examples/kitchen/images/08.png filter=lfs diff=lfs merge=lfs -text
12
+ examples/kitchen/images/09.png filter=lfs diff=lfs merge=lfs -text
13
+ examples/kitchen/images/10.png filter=lfs diff=lfs merge=lfs -text
14
+ examples/kitchen/images/11.png filter=lfs diff=lfs merge=lfs -text
15
+ examples/kitchen/images/12.png filter=lfs diff=lfs merge=lfs -text
16
+ examples/kitchen/images/13.png filter=lfs diff=lfs merge=lfs -text
17
+ examples/kitchen/images/14.png filter=lfs diff=lfs merge=lfs -text
18
+ examples/kitchen/images/15.png filter=lfs diff=lfs merge=lfs -text
19
+ examples/kitchen/images/16.png filter=lfs diff=lfs merge=lfs -text
20
+ examples/kitchen/images/17.png filter=lfs diff=lfs merge=lfs -text
21
+ examples/kitchen/images/18.png filter=lfs diff=lfs merge=lfs -text
22
+ examples/kitchen/images/19.png filter=lfs diff=lfs merge=lfs -text
23
+ examples/kitchen/images/20.png filter=lfs diff=lfs merge=lfs -text
24
+ examples/kitchen/images/21.png filter=lfs diff=lfs merge=lfs -text
25
+ examples/kitchen/images/22.png filter=lfs diff=lfs merge=lfs -text
26
+ examples/kitchen/images/23.png filter=lfs diff=lfs merge=lfs -text
27
+ examples/kitchen/images/24.png filter=lfs diff=lfs merge=lfs -text
28
+ examples/llff_fern/images/000.png filter=lfs diff=lfs merge=lfs -text
29
+ examples/llff_fern/images/001.png filter=lfs diff=lfs merge=lfs -text
30
+ examples/llff_fern/images/002.png filter=lfs diff=lfs merge=lfs -text
31
+ examples/llff_fern/images/003.png filter=lfs diff=lfs merge=lfs -text
32
+ examples/llff_fern/images/004.png filter=lfs diff=lfs merge=lfs -text
33
+ examples/llff_fern/images/005.png filter=lfs diff=lfs merge=lfs -text
34
+ examples/llff_fern/images/006.png filter=lfs diff=lfs merge=lfs -text
35
+ examples/llff_fern/images/007.png filter=lfs diff=lfs merge=lfs -text
36
+ examples/llff_fern/images/008.png filter=lfs diff=lfs merge=lfs -text
37
+ examples/llff_fern/images/009.png filter=lfs diff=lfs merge=lfs -text
38
+ examples/llff_fern/images/010.png filter=lfs diff=lfs merge=lfs -text
39
+ examples/llff_fern/images/011.png filter=lfs diff=lfs merge=lfs -text
40
+ examples/llff_fern/images/012.png filter=lfs diff=lfs merge=lfs -text
41
+ examples/llff_fern/images/013.png filter=lfs diff=lfs merge=lfs -text
42
+ examples/llff_fern/images/014.png filter=lfs diff=lfs merge=lfs -text
43
+ examples/llff_fern/images/015.png filter=lfs diff=lfs merge=lfs -text
44
+ examples/llff_fern/images/016.png filter=lfs diff=lfs merge=lfs -text
45
+ examples/llff_fern/images/017.png filter=lfs diff=lfs merge=lfs -text
46
+ examples/llff_fern/images/018.png filter=lfs diff=lfs merge=lfs -text
47
+ examples/llff_fern/images/019.png filter=lfs diff=lfs merge=lfs -text
48
+ examples/llff_flower/images/000.png filter=lfs diff=lfs merge=lfs -text
49
+ examples/llff_flower/images/001.png filter=lfs diff=lfs merge=lfs -text
50
+ examples/llff_flower/images/002.png filter=lfs diff=lfs merge=lfs -text
51
+ examples/llff_flower/images/003.png filter=lfs diff=lfs merge=lfs -text
52
+ examples/llff_flower/images/004.png filter=lfs diff=lfs merge=lfs -text
53
+ examples/llff_flower/images/005.png filter=lfs diff=lfs merge=lfs -text
54
+ examples/llff_flower/images/006.png filter=lfs diff=lfs merge=lfs -text
55
+ examples/llff_flower/images/007.png filter=lfs diff=lfs merge=lfs -text
56
+ examples/llff_flower/images/008.png filter=lfs diff=lfs merge=lfs -text
57
+ examples/llff_flower/images/009.png filter=lfs diff=lfs merge=lfs -text
58
+ examples/llff_flower/images/010.png filter=lfs diff=lfs merge=lfs -text
59
+ examples/llff_flower/images/011.png filter=lfs diff=lfs merge=lfs -text
60
+ examples/llff_flower/images/012.png filter=lfs diff=lfs merge=lfs -text
61
+ examples/llff_flower/images/013.png filter=lfs diff=lfs merge=lfs -text
62
+ examples/llff_flower/images/014.png filter=lfs diff=lfs merge=lfs -text
63
+ examples/llff_flower/images/015.png filter=lfs diff=lfs merge=lfs -text
64
+ examples/llff_flower/images/016.png filter=lfs diff=lfs merge=lfs -text
65
+ examples/llff_flower/images/017.png filter=lfs diff=lfs merge=lfs -text
66
+ examples/llff_flower/images/018.png filter=lfs diff=lfs merge=lfs -text
67
+ examples/llff_flower/images/019.png filter=lfs diff=lfs merge=lfs -text
68
+ examples/llff_flower/images/020.png filter=lfs diff=lfs merge=lfs -text
69
+ examples/llff_flower/images/021.png filter=lfs diff=lfs merge=lfs -text
70
+ examples/llff_flower/images/022.png filter=lfs diff=lfs merge=lfs -text
71
+ examples/llff_flower/images/023.png filter=lfs diff=lfs merge=lfs -text
72
+ examples/llff_flower/images/024.png filter=lfs diff=lfs merge=lfs -text
73
+ examples/room/images/no_overlap_1.png filter=lfs diff=lfs merge=lfs -text
74
+ examples/room/images/no_overlap_2.jpg filter=lfs diff=lfs merge=lfs -text
75
+ examples/room/images/no_overlap_3.jpg filter=lfs diff=lfs merge=lfs -text
76
+ examples/room/images/no_overlap_4.jpg filter=lfs diff=lfs merge=lfs -text
77
+ examples/room/images/no_overlap_5.jpg filter=lfs diff=lfs merge=lfs -text
78
+ examples/room/images/no_overlap_6.jpg filter=lfs diff=lfs merge=lfs -text
79
+ examples/room/images/no_overlap_7.jpg filter=lfs diff=lfs merge=lfs -text
80
+ examples/room/images/no_overlap_8.jpg filter=lfs diff=lfs merge=lfs -text
81
+ examples/single_cartoon/images/model_was_never_trained_on_single_image_or_cartoon.jpg filter=lfs diff=lfs merge=lfs -text
82
+ examples/single_oil_painting/images/model_was_never_trained_on_single_image_or_oil_painting.png filter=lfs diff=lfs merge=lfs -text
83
+ examples/videos/Colosseum.mp4 filter=lfs diff=lfs merge=lfs -text
84
+ examples/videos/fern.mp4 filter=lfs diff=lfs merge=lfs -text
85
+ examples/videos/great_wall.mp4 filter=lfs diff=lfs merge=lfs -text
86
+ examples/videos/kitchen.mp4 filter=lfs diff=lfs merge=lfs -text
87
+ examples/videos/pyramid.mp4 filter=lfs diff=lfs merge=lfs -text
88
+ examples/videos/room.mp4 filter=lfs diff=lfs merge=lfs -text
89
+ examples/videos/single_cartoon.mp4 filter=lfs diff=lfs merge=lfs -text
90
+ examples/videos/single_oil_painting.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .hydra/
2
+ output/
3
+ ckpt/
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__/
6
+ **/__pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ pip-wheel-metadata/
28
+ share/python-wheels/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+ MANIFEST
33
+
34
+ # PyInstaller
35
+ # Usually these files are written by a python script from a template
36
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
37
+ *.manifest
38
+ *.spec
39
+
40
+ # Installer logs
41
+ pip-log.txt
42
+ pip-delete-this-directory.txt
43
+
44
+ # Unit test / coverage reports
45
+ htmlcov/
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *.cover
54
+ *.py,cover
55
+ .hypothesis/
56
+ .pytest_cache/
57
+ cover/
58
+
59
+ # Translations
60
+ *.mo
61
+ *.pot
62
+
63
+ # Django stuff:
64
+ *.log
65
+ local_settings.py
66
+ db.sqlite3
67
+ db.sqlite3-journal
68
+
69
+ # Flask stuff:
70
+ instance/
71
+ .webassets-cache
72
+
73
+ # Scrapy stuff:
74
+ .scrapy
75
+
76
+ # Sphinx documentation
77
+ docs/_build/
78
+
79
+ # PyBuilder
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
100
+ __pypackages__/
101
+
102
+ # Celery stuff
103
+ celerybeat-schedule
104
+ celerybeat.pid
105
+
106
+ # SageMath parsed files
107
+ *.sage.py
108
+
109
+ # Environments
110
+ .env
111
+ .venv
112
+ env/
113
+ venv/
114
+ ENV/
115
+ env.bak/
116
+ venv.bak/
117
+
118
+ # Spyder project settings
119
+ .spyderproject
120
+ .spyproject
121
+
122
+ # Rope project settings
123
+ .ropeproject
124
+
125
+ # mkdocs documentation
126
+ /site
127
+
128
+ # mypy
129
+ .mypy_cache/
130
+ .dmypy.json
131
+ dmypy.json
132
+
133
+ # Pyre type checker
134
+ .pyre/
135
+
136
+ # pytype static type analyzer
137
+ .pytype/
138
+
139
+ # Profiling data
140
+ .prof
141
+
142
+ # Folder specific to your needs
143
+ **/tmp/
144
+ **/outputs/skyseg.onnx
145
+ skyseg.onnx
146
+
147
+ # pixi environments
148
+ .pixi
149
+ *.egg-info
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to make participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies within all project spaces, and it also applies when
49
+ an individual is representing the project or its community in public spaces.
50
+ Examples of representing a project or community include using an official
51
+ project e-mail address, posting via an official social media account, or acting
52
+ as an appointed representative at an online or offline event. Representation of
53
+ a project may be further defined and clarified by project maintainers.
54
+
55
+ This Code of Conduct also applies outside the project spaces when there is a
56
+ reasonable belief that an individual's behavior may have a negative impact on
57
+ the project or its community.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported by contacting the project team at <opensource-conduct@meta.com>. All
63
+ complaints will be reviewed and investigated and will result in a response that
64
+ is deemed necessary and appropriate to the circumstances. The project team is
65
+ obligated to maintain confidentiality with regard to the reporter of an incident.
66
+ Further details of specific enforcement policies may be posted separately.
67
+
68
+ Project maintainers who do not follow or enforce the Code of Conduct in good
69
+ faith may face temporary or permanent repercussions as determined by other
70
+ members of the project's leadership.
71
+
72
+ ## Attribution
73
+
74
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
+
77
+ [homepage]: https://www.contributor-covenant.org
78
+
79
+ For answers to common questions about this code of conduct, see
80
+ https://www.contributor-covenant.org/faq
CONTRIBUTING.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to vggt
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Pull Requests
6
+ We actively welcome your pull requests.
7
+
8
+ 1. Fork the repo and create your branch from `main`.
9
+ 2. If you've added code that should be tested, add tests.
10
+ 3. If you've changed APIs, update the documentation.
11
+ 4. Ensure the test suite passes.
12
+ 5. Make sure your code lints.
13
+ 6. If you haven't already, complete the Contributor License Agreement ("CLA").
14
+
15
+ ## Contributor License Agreement ("CLA")
16
+ In order to accept your pull request, we need you to submit a CLA. You only need
17
+ to do this once to work on any of Facebook's open source projects.
18
+
19
+ Complete your CLA here: <https://code.facebook.com/cla>
20
+
21
+ ## Issues
22
+ We use GitHub issues to track public bugs. Please ensure your description is
23
+ clear and has sufficient instructions to be able to reproduce the issue.
24
+
25
+ Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
26
+ disclosure of security bugs. In those cases, please go through the process
27
+ outlined on that page and do not file a public issue.
28
+
29
+ ## License
30
+ By contributing to vggt, you agree that your contributions will be licensed
31
+ under the LICENSE file in the root directory of this source tree.
LICENSE.txt ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VGGT License
2
+
3
+ v1 Last Updated: July 29, 2025
4
+
5
+ “Acceptable Use Policy” means the Acceptable Use Policy, applicable to Research Materials, that is incorporated into this Agreement.
6
+
7
+ “Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Research Materials set forth herein.
8
+
9
+
10
+ “Documentation” means the specifications, manuals and documentation accompanying
11
+ Research Materials distributed by Meta.
12
+
13
+
14
+ “Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
15
+
16
+ “Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
17
+ “Research Materials” means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, demonstration materials and other elements of the foregoing distributed by Meta and made available under this Agreement.
18
+
19
+ By clicking “I Accept” below or by using or distributing any portion or element of the Research Materials, you agree to be bound by this Agreement.
20
+
21
+
22
+ 1. License Rights and Redistribution.
23
+
24
+
25
+ a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the Research Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Research Materials.
26
+
27
+ b. Redistribution and Use.
28
+
29
+
30
+ i. Distribution of Research Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the Research Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement. You shall also provide a copy of this Agreement to such third party.
31
+
32
+
33
+ ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with Research Materials, you must acknowledge the use of Research Materials in your publication.
34
+
35
+
36
+ iii. Your use of the Research Materials must comply with applicable laws and regulations (including Trade Control Laws) and adhere to the Acceptable Use Policy, which is hereby incorporated by reference into this Agreement.
37
+ 2. User Support. Your use of the Research Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the Research Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind.
38
+
39
+
40
+ 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE RESEARCH MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE RESEARCH MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE RESEARCH MATERIALS AND ANY OUTPUT AND RESULTS.
41
+
42
+ 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
43
+
44
+ 5. Intellectual Property.
45
+
46
+
47
+ a. Subject to Meta’s ownership of Research Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Research Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
48
+
49
+ b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Research Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Research Materials.
50
+
51
+ 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Research Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Research Materials. Sections 5, 6 and 9 shall survive the termination of this Agreement.
52
+
53
+ 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
54
+
55
+
56
+ 8. Modifications and Amendments. Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the Research Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta.
57
+
58
+
59
+ Acceptable Use Policy
60
+
61
+ Meta seeks to further understanding of new and existing research domains with the mission of advancing the state-of-the-art in artificial intelligence through open research for the benefit of all.
62
+
63
+ As part of this mission, Meta makes certain research materials available for use in accordance with this Agreement (including the Acceptable Use Policy). Meta is committed to promoting the safe and responsible use of such research materials.
64
+
65
+ Prohibited Uses
66
+
67
+ You agree you will not use, or allow others to use, Research Materials to:
68
+
69
+ Violate the law or others’ rights, including to:
70
+ Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
71
+ Violence or terrorism
72
+ Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
73
+ Human trafficking, exploitation, and sexual violence
74
+ The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
75
+ Sexual solicitation
76
+ Any other criminal activity
77
+
78
+ Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
79
+
80
+ Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
81
+
82
+ Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
83
+
84
+ Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
85
+
86
+ Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any technology using Research Materials
87
+
88
+ Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
89
+
90
+ 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of research artifacts related to the following:
91
+
92
+ Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
93
+
94
+ Guns and illegal weapons (including weapon development)
95
+
96
+ Illegal drugs and regulated/controlled substances
97
+ Operation of critical infrastructure, transportation technologies, or heavy machinery
98
+
99
+ Self-harm or harm to others, including suicide, cutting, and eating disorders
100
+ Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
101
+
102
+ 3. Intentionally deceive or mislead others, including use of Research Materials related to the following:
103
+
104
+ Generating, promoting, or furthering fraud or the creation or promotion of disinformation
105
+ Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
106
+
107
+ Generating, promoting, or further distributing spam
108
+
109
+ Impersonating another individual without consent, authorization, or legal right
110
+
111
+ Representing that outputs of research materials or outputs from technology using Research Materials are human-generated
112
+
113
+ Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
114
+
115
+ 4. Fail to appropriately disclose to end users any known dangers of your Research Materials.
README.md CHANGED
@@ -1,12 +1,296 @@
1
  ---
2
- title: ' Vggt'
3
- emoji: 📚
4
- colorFrom: purple
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
  ---
 
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: _vggt
3
+ app_file: demo_gradio.py
 
 
4
  sdk: gradio
5
  sdk_version: 5.49.1
 
 
6
  ---
7
+ <div align="center">
8
+ <h1>VGGT: Visual Geometry Grounded Transformer</h1>
9
 
10
+ <a href="https://jytime.github.io/data/VGGT_CVPR25.pdf" target="_blank" rel="noopener noreferrer">
11
+ <img src="https://img.shields.io/badge/Paper-VGGT" alt="Paper PDF">
12
+ </a>
13
+ <a href="https://arxiv.org/abs/2503.11651"><img src="https://img.shields.io/badge/arXiv-2503.11651-b31b1b" alt="arXiv"></a>
14
+ <a href="https://vgg-t.github.io/"><img src="https://img.shields.io/badge/Project_Page-green" alt="Project Page"></a>
15
+ <a href="https://huggingface.co/spaces/facebook/vggt"><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Demo-blue'></a>
16
+
17
+
18
+ **[Visual Geometry Group, University of Oxford](https://www.robots.ox.ac.uk/~vgg/)**; **[Meta AI](https://ai.facebook.com/research/)**
19
+
20
+
21
+ [Jianyuan Wang](https://jytime.github.io/), [Minghao Chen](https://silent-chen.github.io/), [Nikita Karaev](https://nikitakaraevv.github.io/), [Andrea Vedaldi](https://www.robots.ox.ac.uk/~vedaldi/), [Christian Rupprecht](https://chrirupp.github.io/), [David Novotny](https://d-novotny.github.io/)
22
+ </div>
23
+
24
+ ```bibtex
25
+ @inproceedings{wang2025vggt,
26
+ title={VGGT: Visual Geometry Grounded Transformer},
27
+ author={Wang, Jianyuan and Chen, Minghao and Karaev, Nikita and Vedaldi, Andrea and Rupprecht, Christian and Novotny, David},
28
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
29
+ year={2025}
30
+ }
31
+ ```
32
+
33
+ ## Updates
34
+
35
+ - [July 29, 2025] We've updated the license for VGGT to permit **commercial use** (excluding military applications). All code in this repository is now under a commercial-use-friendly license. However, only the newly released checkpoint [**VGGT-1B-Commercial**](https://huggingface.co/facebook/VGGT-1B-Commercial) is licensed for commercial usage — the original checkpoint remains non-commercial. Full license details are available [here](https://github.com/facebookresearch/vggt/blob/main/LICENSE.txt). Access to the checkpoint requires completing an application form, which is processed by a system similar to LLaMA's approval workflow, automatically. The new checkpoint delivers similar performance to the original model. Please submit an issue if you notice a significant performance discrepancy.
36
+
37
+
38
+
39
+ - [July 6, 2025] Training code is now available in the `training` folder, including an example to finetune VGGT on a custom dataset.
40
+
41
+
42
+ - [June 13, 2025] Honored to receive the Best Paper Award at CVPR 2025! Apologies if I’m slow to respond to queries or GitHub issues these days. If you’re interested, our oral presentation is available [here](https://docs.google.com/presentation/d/1JVuPnuZx6RgAy-U5Ezobg73XpBi7FrOh/edit?usp=sharing&ouid=107115712143490405606&rtpof=true&sd=true). Another long presentation can be found [here](https://docs.google.com/presentation/d/1aSv0e5PmH1mnwn2MowlJIajFUYZkjqgw/edit?usp=sharing&ouid=107115712143490405606&rtpof=true&sd=true) (Note: it’s shared in .pptx format with animations — quite large, but feel free to use it as a template if helpful.)
43
+
44
+
45
+ - [June 2, 2025] Added a script to run VGGT and save predictions in COLMAP format, with bundle adjustment support optional. The saved COLMAP files can be directly used with [gsplat](https://github.com/nerfstudio-project/gsplat) or other NeRF/Gaussian splatting libraries.
46
+
47
+
48
+ - [May 3, 2025] Evaluation code for reproducing our camera pose estimation results on Co3D is now available in the [evaluation](https://github.com/facebookresearch/vggt/tree/evaluation) branch.
49
+
50
+
51
+ ## Overview
52
+
53
+ Visual Geometry Grounded Transformer (VGGT, CVPR 2025) is a feed-forward neural network that directly infers all key 3D attributes of a scene, including extrinsic and intrinsic camera parameters, point maps, depth maps, and 3D point tracks, **from one, a few, or hundreds of its views, within seconds**.
54
+
55
+
56
+ ## Quick Start
57
+
58
+ First, clone this repository to your local machine, and install the dependencies (torch, torchvision, numpy, Pillow, and huggingface_hub).
59
+
60
+ ```bash
61
+ git clone git@github.com:facebookresearch/vggt.git
62
+ cd vggt
63
+ pip install -r requirements.txt
64
+ ```
65
+
66
+ Alternatively, you can install VGGT as a package (<a href="docs/package.md">click here</a> for details).
67
+
68
+
69
+ Now, try the model with just a few lines of code:
70
+
71
+ ```python
72
+ import torch
73
+ from vggt.models.vggt import VGGT
74
+ from vggt.utils.load_fn import load_and_preprocess_images
75
+
76
+ device = "cuda" if torch.cuda.is_available() else "cpu"
77
+ # bfloat16 is supported on Ampere GPUs (Compute Capability 8.0+)
78
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
79
+
80
+ # Initialize the model and load the pretrained weights.
81
+ # This will automatically download the model weights the first time it's run, which may take a while.
82
+ model = VGGT.from_pretrained("facebook/VGGT-1B").to(device)
83
+
84
+ # Load and preprocess example images (replace with your own image paths)
85
+ image_names = ["path/to/imageA.png", "path/to/imageB.png", "path/to/imageC.png"]
86
+ images = load_and_preprocess_images(image_names).to(device)
87
+
88
+ with torch.no_grad():
89
+ with torch.cuda.amp.autocast(dtype=dtype):
90
+ # Predict attributes including cameras, depth maps, and point maps.
91
+ predictions = model(images)
92
+ ```
93
+
94
+ The model weights will be automatically downloaded from Hugging Face. If you encounter issues such as slow loading, you can manually download them [here](https://huggingface.co/facebook/VGGT-1B/blob/main/model.pt) and load, or:
95
+
96
+ ```python
97
+ model = VGGT()
98
+ _URL = "https://huggingface.co/facebook/VGGT-1B/resolve/main/model.pt"
99
+ model.load_state_dict(torch.hub.load_state_dict_from_url(_URL))
100
+ ```
101
+
102
+ ## Detailed Usage
103
+
104
+ <details>
105
+ <summary>Click to expand</summary>
106
+
107
+ You can also optionally choose which attributes (branches) to predict, as shown below. This achieves the same result as the example above. This example uses a batch size of 1 (processing a single scene), but it naturally works for multiple scenes.
108
+
109
+ ```python
110
+ from vggt.utils.pose_enc import pose_encoding_to_extri_intri
111
+ from vggt.utils.geometry import unproject_depth_map_to_point_map
112
+
113
+ with torch.no_grad():
114
+ with torch.cuda.amp.autocast(dtype=dtype):
115
+ images = images[None] # add batch dimension
116
+ aggregated_tokens_list, ps_idx = model.aggregator(images)
117
+
118
+ # Predict Cameras
119
+ pose_enc = model.camera_head(aggregated_tokens_list)[-1]
120
+ # Extrinsic and intrinsic matrices, following OpenCV convention (camera from world)
121
+ extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc, images.shape[-2:])
122
+
123
+ # Predict Depth Maps
124
+ depth_map, depth_conf = model.depth_head(aggregated_tokens_list, images, ps_idx)
125
+
126
+ # Predict Point Maps
127
+ point_map, point_conf = model.point_head(aggregated_tokens_list, images, ps_idx)
128
+
129
+ # Construct 3D Points from Depth Maps and Cameras
130
+ # which usually leads to more accurate 3D points than point map branch
131
+ point_map_by_unprojection = unproject_depth_map_to_point_map(depth_map.squeeze(0),
132
+ extrinsic.squeeze(0),
133
+ intrinsic.squeeze(0))
134
+
135
+ # Predict Tracks
136
+ # choose your own points to track, with shape (N, 2) for one scene
137
+ query_points = torch.FloatTensor([[100.0, 200.0],
138
+ [60.72, 259.94]]).to(device)
139
+ track_list, vis_score, conf_score = model.track_head(aggregated_tokens_list, images, ps_idx, query_points=query_points[None])
140
+ ```
141
+
142
+
143
+ Furthermore, if certain pixels in the input frames are unwanted (e.g., reflective surfaces, sky, or water), you can simply mask them by setting the corresponding pixel values to 0 or 1. Precise segmentation masks aren't necessary - simple bounding box masks work effectively (check this [issue](https://github.com/facebookresearch/vggt/issues/47) for an example).
144
+
145
+ </details>
146
+
147
+
148
+ ## Interactive Demo
149
+
150
+ We provide multiple ways to visualize your 3D reconstructions. Before using these visualization tools, install the required dependencies:
151
+
152
+ ```bash
153
+ pip install -r requirements_demo.txt
154
+ ```
155
+
156
+ ### Interactive 3D Visualization
157
+
158
+ **Please note:** VGGT typically reconstructs a scene in less than 1 second. However, visualizing 3D points may take tens of seconds due to third-party rendering, independent of VGGT's processing time. The visualization is slow especially when the number of images is large.
159
+
160
+
161
+ #### Gradio Web Interface
162
+
163
+ Our Gradio-based interface allows you to upload images/videos, run reconstruction, and interactively explore the 3D scene in your browser. You can launch this in your local machine or try it on [Hugging Face](https://huggingface.co/spaces/facebook/vggt).
164
+
165
+
166
+ ```bash
167
+ python demo_gradio.py
168
+ ```
169
+
170
+ <details>
171
+ <summary>Click to preview the Gradio interactive interface</summary>
172
+
173
+ ![Gradio Web Interface Preview](https://jytime.github.io/data/vggt_hf_demo_screen.png)
174
+ </details>
175
+
176
+
177
+ #### Viser 3D Viewer
178
+
179
+ Run the following command to run reconstruction and visualize the point clouds in viser. Note this script requires a path to a folder containing images. It assumes only image files under the folder. You can set `--use_point_map` to use the point cloud from the point map branch, instead of the depth-based point cloud.
180
+
181
+ ```bash
182
+ python demo_viser.py --image_folder path/to/your/images/folder
183
+ ```
184
+
185
+ ## Exporting to COLMAP Format
186
+
187
+ We also support exporting VGGT's predictions directly to COLMAP format, by:
188
+
189
+ ```bash
190
+ # Feedforward prediction only
191
+ python demo_colmap.py --scene_dir=/YOUR/SCENE_DIR/
192
+
193
+ # With bundle adjustment
194
+ python demo_colmap.py --scene_dir=/YOUR/SCENE_DIR/ --use_ba
195
+
196
+ # Run with bundle adjustment using reduced parameters for faster processing
197
+ # Reduces max_query_pts from 4096 (default) to 2048 and query_frame_num from 8 (default) to 5
198
+ # Trade-off: Faster execution but potentially less robust reconstruction in complex scenes (you may consider setting query_frame_num equal to your total number of images)
199
+ # See demo_colmap.py for additional bundle adjustment configuration options
200
+ python demo_colmap.py --scene_dir=/YOUR/SCENE_DIR/ --use_ba --max_query_pts=2048 --query_frame_num=5
201
+ ```
202
+
203
+ Please ensure that the images are stored in `/YOUR/SCENE_DIR/images/`. This folder should contain only the images. Check the examples folder for the desired data structure.
204
+
205
+ The reconstruction result (camera parameters and 3D points) will be automatically saved under `/YOUR/SCENE_DIR/sparse/` in the COLMAP format, such as:
206
+
207
+ ```
208
+ SCENE_DIR/
209
+ ├── images/
210
+ └── sparse/
211
+ ├── cameras.bin
212
+ ├── images.bin
213
+ └── points3D.bin
214
+ ```
215
+
216
+ ## Integration with Gaussian Splatting
217
+
218
+
219
+ The exported COLMAP files can be directly used with [gsplat](https://github.com/nerfstudio-project/gsplat) for Gaussian Splatting training. Install `gsplat` following their official instructions (we recommend `gsplat==1.3.0`):
220
+
221
+ An example command to train the model is:
222
+ ```
223
+ cd gsplat
224
+ python examples/simple_trainer.py default --data_factor 1 --data_dir /YOUR/SCENE_DIR/ --result_dir /YOUR/RESULT_DIR/
225
+ ```
226
+
227
+
228
+
229
+ ## Zero-shot Single-view Reconstruction
230
+
231
+ Our model shows surprisingly good performance on single-view reconstruction, although it was never trained for this task. The model does not need to duplicate the single-view image to a pair, instead, it can directly infer the 3D structure from the tokens of the single view image. Feel free to try it with our demos above, which naturally works for single-view reconstruction.
232
+
233
+
234
+ We did not quantitatively test monocular depth estimation performance ourselves, but [@kabouzeid](https://github.com/kabouzeid) generously provided a comparison of VGGT to recent methods [here](https://github.com/facebookresearch/vggt/issues/36). VGGT shows competitive or better results compared to state-of-the-art monocular approaches such as DepthAnything v2 or MoGe, despite never being explicitly trained for single-view tasks.
235
+
236
+
237
+
238
+ ## Runtime and GPU Memory
239
+
240
+ We benchmark the runtime and GPU memory usage of VGGT's aggregator on a single NVIDIA H100 GPU across various input sizes.
241
+
242
+ | **Input Frames** | 1 | 2 | 4 | 8 | 10 | 20 | 50 | 100 | 200 |
243
+ |:----------------:|:-:|:-:|:-:|:-:|:--:|:--:|:--:|:---:|:---:|
244
+ | **Time (s)** | 0.04 | 0.05 | 0.07 | 0.11 | 0.14 | 0.31 | 1.04 | 3.12 | 8.75 |
245
+ | **Memory (GB)** | 1.88 | 2.07 | 2.45 | 3.23 | 3.63 | 5.58 | 11.41 | 21.15 | 40.63 |
246
+
247
+ Note that these results were obtained using Flash Attention 3, which is faster than the default Flash Attention 2 implementation while maintaining almost the same memory usage. Feel free to compile Flash Attention 3 from source to get better performance.
248
+
249
+
250
+ ## Research Progression
251
+
252
+ Our work builds upon a series of previous research projects. If you're interested in understanding how our research evolved, check out our previous works:
253
+
254
+
255
+ <table border="0" cellspacing="0" cellpadding="0">
256
+ <tr>
257
+ <td align="left">
258
+ <a href="https://github.com/jytime/Deep-SfM-Revisited">Deep SfM Revisited</a>
259
+ </td>
260
+ <td style="white-space: pre;">──┐</td>
261
+ <td></td>
262
+ </tr>
263
+ <tr>
264
+ <td align="left">
265
+ <a href="https://github.com/facebookresearch/PoseDiffusion">PoseDiffusion</a>
266
+ </td>
267
+ <td style="white-space: pre;">─────►</td>
268
+ <td>
269
+ <a href="https://github.com/facebookresearch/vggsfm">VGGSfM</a> ──►
270
+ <a href="https://github.com/facebookresearch/vggt">VGGT</a>
271
+ </td>
272
+ </tr>
273
+ <tr>
274
+ <td align="left">
275
+ <a href="https://github.com/facebookresearch/co-tracker">CoTracker</a>
276
+ </td>
277
+ <td style="white-space: pre;">──┘</td>
278
+ <td></td>
279
+ </tr>
280
+ </table>
281
+
282
+
283
+ ## Acknowledgements
284
+
285
+ Thanks to these great repositories: [PoseDiffusion](https://github.com/facebookresearch/PoseDiffusion), [VGGSfM](https://github.com/facebookresearch/vggsfm), [CoTracker](https://github.com/facebookresearch/co-tracker), [DINOv2](https://github.com/facebookresearch/dinov2), [Dust3r](https://github.com/naver/dust3r), [Moge](https://github.com/microsoft/moge), [PyTorch3D](https://github.com/facebookresearch/pytorch3d), [Sky Segmentation](https://github.com/xiongzhu666/Sky-Segmentation-and-Post-processing), [Depth Anything V2](https://github.com/DepthAnything/Depth-Anything-V2), [Metric3D](https://github.com/YvanYin/Metric3D) and many other inspiring works in the community.
286
+
287
+ ## Checklist
288
+
289
+ - [x] Release the training code
290
+ - [ ] Release VGGT-500M and VGGT-200M
291
+
292
+
293
+ ## License
294
+ See the [LICENSE](./LICENSE.txt) file for details about the license under which this code is made available.
295
+
296
+ Please note that only this [model checkpoint](https://huggingface.co/facebook/VGGT-1B-Commercial) allows commercial usage. This new checkpoint achieves the same performance level (might be slightly better) as the original one, e.g., AUC@30: 90.37 vs. 89.98 on the Co3D dataset.
demo_colmap.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import random
8
+ import numpy as np
9
+ import glob
10
+ import os
11
+ import copy
12
+ import torch
13
+ import torch.nn.functional as F
14
+
15
+ # Configure CUDA settings
16
+ torch.backends.cudnn.enabled = True
17
+ torch.backends.cudnn.benchmark = True
18
+ torch.backends.cudnn.deterministic = False
19
+
20
+ import argparse
21
+ from pathlib import Path
22
+ import trimesh
23
+ import pycolmap
24
+
25
+
26
+ from vggt.models.vggt import VGGT
27
+ from vggt.utils.load_fn import load_and_preprocess_images_square
28
+ from vggt.utils.pose_enc import pose_encoding_to_extri_intri
29
+ from vggt.utils.geometry import unproject_depth_map_to_point_map
30
+ from vggt.utils.helper import create_pixel_coordinate_grid, randomly_limit_trues
31
+ from vggt.dependency.track_predict import predict_tracks
32
+ from vggt.dependency.np_to_pycolmap import batch_np_matrix_to_pycolmap, batch_np_matrix_to_pycolmap_wo_track
33
+
34
+
35
+ # TODO: add support for masks
36
+ # TODO: add iterative BA
37
+ # TODO: add support for radial distortion, which needs extra_params
38
+ # TODO: test with more cases
39
+ # TODO: test different camera types
40
+
41
+
42
+ def parse_args():
43
+ parser = argparse.ArgumentParser(description="VGGT Demo")
44
+ parser.add_argument("--scene_dir", type=str, required=True, help="Directory containing the scene images")
45
+ parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
46
+ parser.add_argument("--use_ba", action="store_true", default=False, help="Use BA for reconstruction")
47
+ ######### BA parameters #########
48
+ parser.add_argument(
49
+ "--max_reproj_error", type=float, default=8.0, help="Maximum reprojection error for reconstruction"
50
+ )
51
+ parser.add_argument("--shared_camera", action="store_true", default=False, help="Use shared camera for all images")
52
+ parser.add_argument("--camera_type", type=str, default="SIMPLE_PINHOLE", help="Camera type for reconstruction")
53
+ parser.add_argument("--vis_thresh", type=float, default=0.2, help="Visibility threshold for tracks")
54
+ parser.add_argument("--query_frame_num", type=int, default=8, help="Number of frames to query")
55
+ parser.add_argument("--max_query_pts", type=int, default=4096, help="Maximum number of query points")
56
+ parser.add_argument(
57
+ "--fine_tracking", action="store_true", default=True, help="Use fine tracking (slower but more accurate)"
58
+ )
59
+ parser.add_argument(
60
+ "--conf_thres_value", type=float, default=5.0, help="Confidence threshold value for depth filtering (wo BA)"
61
+ )
62
+ return parser.parse_args()
63
+
64
+
65
+ def run_VGGT(model, images, dtype, resolution=518):
66
+ # images: [B, 3, H, W]
67
+
68
+ assert len(images.shape) == 4
69
+ assert images.shape[1] == 3
70
+
71
+ # hard-coded to use 518 for VGGT
72
+ images = F.interpolate(images, size=(resolution, resolution), mode="bilinear", align_corners=False)
73
+
74
+ with torch.no_grad():
75
+ with torch.cuda.amp.autocast(dtype=dtype):
76
+ images = images[None] # add batch dimension
77
+ aggregated_tokens_list, ps_idx = model.aggregator(images)
78
+
79
+ # Predict Cameras
80
+ pose_enc = model.camera_head(aggregated_tokens_list)[-1]
81
+ # Extrinsic and intrinsic matrices, following OpenCV convention (camera from world)
82
+ extrinsic, intrinsic = pose_encoding_to_extri_intri(pose_enc, images.shape[-2:])
83
+ # Predict Depth Maps
84
+ depth_map, depth_conf = model.depth_head(aggregated_tokens_list, images, ps_idx)
85
+
86
+ extrinsic = extrinsic.squeeze(0).cpu().numpy()
87
+ intrinsic = intrinsic.squeeze(0).cpu().numpy()
88
+ depth_map = depth_map.squeeze(0).cpu().numpy()
89
+ depth_conf = depth_conf.squeeze(0).cpu().numpy()
90
+ return extrinsic, intrinsic, depth_map, depth_conf
91
+
92
+
93
+ def demo_fn(args):
94
+ # Print configuration
95
+ print("Arguments:", vars(args))
96
+
97
+ # Set seed for reproducibility
98
+ np.random.seed(args.seed)
99
+ torch.manual_seed(args.seed)
100
+ random.seed(args.seed)
101
+ if torch.cuda.is_available():
102
+ torch.cuda.manual_seed(args.seed)
103
+ torch.cuda.manual_seed_all(args.seed) # for multi-GPU
104
+ print(f"Setting seed as: {args.seed}")
105
+
106
+ # Set device and dtype
107
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
108
+ device = "cuda" if torch.cuda.is_available() else "cpu"
109
+ print(f"Using device: {device}")
110
+ print(f"Using dtype: {dtype}")
111
+
112
+ # Run VGGT for camera and depth estimation
113
+ model = VGGT()
114
+ _URL = "https://huggingface.co/facebook/VGGT-1B/resolve/main/model.pt"
115
+ model.load_state_dict(torch.hub.load_state_dict_from_url(_URL))
116
+ model.eval()
117
+ model = model.to(device)
118
+ print(f"Model loaded")
119
+
120
+ # Get image paths and preprocess them
121
+ image_dir = os.path.join(args.scene_dir, "images")
122
+ image_path_list = glob.glob(os.path.join(image_dir, "*"))
123
+ if len(image_path_list) == 0:
124
+ raise ValueError(f"No images found in {image_dir}")
125
+ base_image_path_list = [os.path.basename(path) for path in image_path_list]
126
+
127
+ # Load images and original coordinates
128
+ # Load Image in 1024, while running VGGT with 518
129
+ vggt_fixed_resolution = 518
130
+ img_load_resolution = 1024
131
+
132
+ images, original_coords = load_and_preprocess_images_square(image_path_list, img_load_resolution)
133
+ images = images.to(device)
134
+ original_coords = original_coords.to(device)
135
+ print(f"Loaded {len(images)} images from {image_dir}")
136
+
137
+ # Run VGGT to estimate camera and depth
138
+ # Run with 518x518 images
139
+ extrinsic, intrinsic, depth_map, depth_conf = run_VGGT(model, images, dtype, vggt_fixed_resolution)
140
+ points_3d = unproject_depth_map_to_point_map(depth_map, extrinsic, intrinsic)
141
+
142
+ if args.use_ba:
143
+ image_size = np.array(images.shape[-2:])
144
+ scale = img_load_resolution / vggt_fixed_resolution
145
+ shared_camera = args.shared_camera
146
+
147
+ with torch.cuda.amp.autocast(dtype=dtype):
148
+ # Predicting Tracks
149
+ # Using VGGSfM tracker instead of VGGT tracker for efficiency
150
+ # VGGT tracker requires multiple backbone runs to query different frames (this is a problem caused by the training process)
151
+ # Will be fixed in VGGT v2
152
+
153
+ # You can also change the pred_tracks to tracks from any other methods
154
+ # e.g., from COLMAP, from CoTracker, or by chaining 2D matches from Lightglue/LoFTR.
155
+ pred_tracks, pred_vis_scores, pred_confs, points_3d, points_rgb = predict_tracks(
156
+ images,
157
+ conf=depth_conf,
158
+ points_3d=points_3d,
159
+ masks=None,
160
+ max_query_pts=args.max_query_pts,
161
+ query_frame_num=args.query_frame_num,
162
+ keypoint_extractor="aliked+sp",
163
+ fine_tracking=args.fine_tracking,
164
+ )
165
+
166
+ torch.cuda.empty_cache()
167
+
168
+ # rescale the intrinsic matrix from 518 to 1024
169
+ intrinsic[:, :2, :] *= scale
170
+ track_mask = pred_vis_scores > args.vis_thresh
171
+
172
+ # TODO: radial distortion, iterative BA, masks
173
+ reconstruction, valid_track_mask = batch_np_matrix_to_pycolmap(
174
+ points_3d,
175
+ extrinsic,
176
+ intrinsic,
177
+ pred_tracks,
178
+ image_size,
179
+ masks=track_mask,
180
+ max_reproj_error=args.max_reproj_error,
181
+ shared_camera=shared_camera,
182
+ camera_type=args.camera_type,
183
+ points_rgb=points_rgb,
184
+ )
185
+
186
+ if reconstruction is None:
187
+ raise ValueError("No reconstruction can be built with BA")
188
+
189
+ # Bundle Adjustment
190
+ ba_options = pycolmap.BundleAdjustmentOptions()
191
+ pycolmap.bundle_adjustment(reconstruction, ba_options)
192
+
193
+ reconstruction_resolution = img_load_resolution
194
+ else:
195
+ conf_thres_value = args.conf_thres_value
196
+ max_points_for_colmap = 100000 # randomly sample 3D points
197
+ shared_camera = False # in the feedforward manner, we do not support shared camera
198
+ camera_type = "PINHOLE" # in the feedforward manner, we only support PINHOLE camera
199
+
200
+ image_size = np.array([vggt_fixed_resolution, vggt_fixed_resolution])
201
+ num_frames, height, width, _ = points_3d.shape
202
+
203
+ points_rgb = F.interpolate(
204
+ images, size=(vggt_fixed_resolution, vggt_fixed_resolution), mode="bilinear", align_corners=False
205
+ )
206
+ points_rgb = (points_rgb.cpu().numpy() * 255).astype(np.uint8)
207
+ points_rgb = points_rgb.transpose(0, 2, 3, 1)
208
+
209
+ # (S, H, W, 3), with x, y coordinates and frame indices
210
+ points_xyf = create_pixel_coordinate_grid(num_frames, height, width)
211
+
212
+ conf_mask = depth_conf >= conf_thres_value
213
+ # at most writing 100000 3d points to colmap reconstruction object
214
+ conf_mask = randomly_limit_trues(conf_mask, max_points_for_colmap)
215
+
216
+ points_3d = points_3d[conf_mask]
217
+ points_xyf = points_xyf[conf_mask]
218
+ points_rgb = points_rgb[conf_mask]
219
+
220
+ print("Converting to COLMAP format")
221
+ reconstruction = batch_np_matrix_to_pycolmap_wo_track(
222
+ points_3d,
223
+ points_xyf,
224
+ points_rgb,
225
+ extrinsic,
226
+ intrinsic,
227
+ image_size,
228
+ shared_camera=shared_camera,
229
+ camera_type=camera_type,
230
+ )
231
+
232
+ reconstruction_resolution = vggt_fixed_resolution
233
+
234
+ reconstruction = rename_colmap_recons_and_rescale_camera(
235
+ reconstruction,
236
+ base_image_path_list,
237
+ original_coords.cpu().numpy(),
238
+ img_size=reconstruction_resolution,
239
+ shift_point2d_to_original_res=True,
240
+ shared_camera=shared_camera,
241
+ )
242
+
243
+ print(f"Saving reconstruction to {args.scene_dir}/sparse")
244
+ sparse_reconstruction_dir = os.path.join(args.scene_dir, "sparse")
245
+ os.makedirs(sparse_reconstruction_dir, exist_ok=True)
246
+ reconstruction.write(sparse_reconstruction_dir)
247
+
248
+ # Save point cloud for fast visualization
249
+ trimesh.PointCloud(points_3d, colors=points_rgb).export(os.path.join(args.scene_dir, "sparse/points.ply"))
250
+
251
+ return True
252
+
253
+
254
+ def rename_colmap_recons_and_rescale_camera(
255
+ reconstruction, image_paths, original_coords, img_size, shift_point2d_to_original_res=False, shared_camera=False
256
+ ):
257
+ rescale_camera = True
258
+
259
+ for pyimageid in reconstruction.images:
260
+ # Reshaped the padded&resized image to the original size
261
+ # Rename the images to the original names
262
+ pyimage = reconstruction.images[pyimageid]
263
+ pycamera = reconstruction.cameras[pyimage.camera_id]
264
+ pyimage.name = image_paths[pyimageid - 1]
265
+
266
+ if rescale_camera:
267
+ # Rescale the camera parameters
268
+ pred_params = copy.deepcopy(pycamera.params)
269
+
270
+ real_image_size = original_coords[pyimageid - 1, -2:]
271
+ resize_ratio = max(real_image_size) / img_size
272
+ pred_params = pred_params * resize_ratio
273
+ real_pp = real_image_size / 2
274
+ pred_params[-2:] = real_pp # center of the image
275
+
276
+ pycamera.params = pred_params
277
+ pycamera.width = real_image_size[0]
278
+ pycamera.height = real_image_size[1]
279
+
280
+ if shift_point2d_to_original_res:
281
+ # Also shift the point2D to original resolution
282
+ top_left = original_coords[pyimageid - 1, :2]
283
+
284
+ for point2D in pyimage.points2D:
285
+ point2D.xy = (point2D.xy - top_left) * resize_ratio
286
+
287
+ if shared_camera:
288
+ # If shared_camera, all images share the same camera
289
+ # no need to rescale any more
290
+ rescale_camera = False
291
+
292
+ return reconstruction
293
+
294
+
295
+ if __name__ == "__main__":
296
+ args = parse_args()
297
+ with torch.no_grad():
298
+ demo_fn(args)
299
+
300
+
301
+ # Work in Progress (WIP)
302
+
303
+ """
304
+ VGGT Runner Script
305
+ =================
306
+
307
+ A script to run the VGGT model for 3D reconstruction from image sequences.
308
+
309
+ Directory Structure
310
+ ------------------
311
+ Input:
312
+ input_folder/
313
+ └── images/ # Source images for reconstruction
314
+
315
+ Output:
316
+ output_folder/
317
+ ├── images/
318
+ ├── sparse/ # Reconstruction results
319
+ │ ├── cameras.bin # Camera parameters (COLMAP format)
320
+ │ ├── images.bin # Pose for each image (COLMAP format)
321
+ │ ├── points3D.bin # 3D points (COLMAP format)
322
+ │ └── points.ply # Point cloud visualization file
323
+ └── visuals/ # Visualization outputs TODO
324
+
325
+ Key Features
326
+ -----------
327
+ • Dual-mode Support: Run reconstructions using either VGGT or VGGT+BA
328
+ • Resolution Preservation: Maintains original image resolution in camera parameters and tracks
329
+ • COLMAP Compatibility: Exports results in standard COLMAP sparse reconstruction format
330
+ """
demo_gradio.py ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import os
8
+ import cv2
9
+ import torch
10
+ import numpy as np
11
+ import gradio as gr
12
+ import sys
13
+ import shutil
14
+ from datetime import datetime
15
+ import glob
16
+ import gc
17
+ import time
18
+
19
+ sys.path.append("vggt/")
20
+
21
+ from visual_util import predictions_to_glb
22
+ from vggt.models.vggt import VGGT
23
+ from vggt.utils.load_fn import load_and_preprocess_images
24
+ from vggt.utils.pose_enc import pose_encoding_to_extri_intri
25
+ from vggt.utils.geometry import unproject_depth_map_to_point_map
26
+
27
+ device = "cuda" if torch.cuda.is_available() else "cpu"
28
+
29
+ print("Initializing and loading VGGT model...")
30
+ # model = VGGT.from_pretrained("facebook/VGGT-1B") # another way to load the model
31
+
32
+ model = VGGT()
33
+ _URL = "https://huggingface.co/facebook/VGGT-1B/resolve/main/model.pt"
34
+ model.load_state_dict(torch.hub.load_state_dict_from_url(_URL))
35
+
36
+
37
+ model.eval()
38
+ model = model.to(device)
39
+
40
+
41
+ # -------------------------------------------------------------------------
42
+ # 1) Core model inference
43
+ # -------------------------------------------------------------------------
44
+ def run_model(target_dir, model) -> dict:
45
+ """
46
+ Run the VGGT model on images in the 'target_dir/images' folder and return predictions.
47
+ """
48
+ print(f"Processing images from {target_dir}")
49
+
50
+ # Device check
51
+ device = "cuda" if torch.cuda.is_available() else "cpu"
52
+ if not torch.cuda.is_available():
53
+ raise ValueError("CUDA is not available. Check your environment.")
54
+
55
+ # Move model to device
56
+ model = model.to(device)
57
+ model.eval()
58
+
59
+ # Load and preprocess images
60
+ image_names = glob.glob(os.path.join(target_dir, "images", "*"))
61
+ image_names = sorted(image_names)
62
+ print(f"Found {len(image_names)} images")
63
+ if len(image_names) == 0:
64
+ raise ValueError("No images found. Check your upload.")
65
+
66
+ images = load_and_preprocess_images(image_names).to(device)
67
+ print(f"Preprocessed images shape: {images.shape}")
68
+
69
+ # Run inference
70
+ print("Running inference...")
71
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
72
+
73
+ with torch.no_grad():
74
+ with torch.cuda.amp.autocast(dtype=dtype):
75
+ predictions = model(images)
76
+
77
+ # Convert pose encoding to extrinsic and intrinsic matrices
78
+ print("Converting pose encoding to extrinsic and intrinsic matrices...")
79
+ extrinsic, intrinsic = pose_encoding_to_extri_intri(predictions["pose_enc"], images.shape[-2:])
80
+ predictions["extrinsic"] = extrinsic
81
+ predictions["intrinsic"] = intrinsic
82
+
83
+ # Convert tensors to numpy
84
+ for key in predictions.keys():
85
+ if isinstance(predictions[key], torch.Tensor):
86
+ predictions[key] = predictions[key].cpu().numpy().squeeze(0) # remove batch dimension
87
+ predictions['pose_enc_list'] = None # remove pose_enc_list
88
+
89
+ # Generate world points from depth map
90
+ print("Computing world points from depth map...")
91
+ depth_map = predictions["depth"] # (S, H, W, 1)
92
+ world_points = unproject_depth_map_to_point_map(depth_map, predictions["extrinsic"], predictions["intrinsic"])
93
+ predictions["world_points_from_depth"] = world_points
94
+
95
+ # Clean up
96
+ torch.cuda.empty_cache()
97
+ return predictions
98
+
99
+
100
+ # -------------------------------------------------------------------------
101
+ # 2) Handle uploaded video/images --> produce target_dir + images
102
+ # -------------------------------------------------------------------------
103
+ def handle_uploads(input_video, input_images):
104
+ """
105
+ Create a new 'target_dir' + 'images' subfolder, and place user-uploaded
106
+ images or extracted frames from video into it. Return (target_dir, image_paths).
107
+ """
108
+ start_time = time.time()
109
+ gc.collect()
110
+ torch.cuda.empty_cache()
111
+
112
+ # Create a unique folder name
113
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
114
+ target_dir = f"input_images_{timestamp}"
115
+ target_dir_images = os.path.join(target_dir, "images")
116
+
117
+ # Clean up if somehow that folder already exists
118
+ if os.path.exists(target_dir):
119
+ shutil.rmtree(target_dir)
120
+ os.makedirs(target_dir)
121
+ os.makedirs(target_dir_images)
122
+
123
+ image_paths = []
124
+
125
+ # --- Handle images ---
126
+ if input_images is not None:
127
+ for file_data in input_images:
128
+ if isinstance(file_data, dict) and "name" in file_data:
129
+ file_path = file_data["name"]
130
+ else:
131
+ file_path = file_data
132
+ dst_path = os.path.join(target_dir_images, os.path.basename(file_path))
133
+ shutil.copy(file_path, dst_path)
134
+ image_paths.append(dst_path)
135
+
136
+ # --- Handle video ---
137
+ if input_video is not None:
138
+ if isinstance(input_video, dict) and "name" in input_video:
139
+ video_path = input_video["name"]
140
+ else:
141
+ video_path = input_video
142
+
143
+ vs = cv2.VideoCapture(video_path)
144
+ fps = vs.get(cv2.CAP_PROP_FPS)
145
+ frame_interval = int(fps * 1) # 1 frame/sec
146
+
147
+ count = 0
148
+ video_frame_num = 0
149
+ while True:
150
+ gotit, frame = vs.read()
151
+ if not gotit:
152
+ break
153
+ count += 1
154
+ if count % frame_interval == 0:
155
+ image_path = os.path.join(target_dir_images, f"{video_frame_num:06}.png")
156
+ cv2.imwrite(image_path, frame)
157
+ image_paths.append(image_path)
158
+ video_frame_num += 1
159
+
160
+ # Sort final images for gallery
161
+ image_paths = sorted(image_paths)
162
+
163
+ end_time = time.time()
164
+ print(f"Files copied to {target_dir_images}; took {end_time - start_time:.3f} seconds")
165
+ return target_dir, image_paths
166
+
167
+
168
+ # -------------------------------------------------------------------------
169
+ # 3) Update gallery on upload
170
+ # -------------------------------------------------------------------------
171
+ def update_gallery_on_upload(input_video, input_images):
172
+ """
173
+ Whenever user uploads or changes files, immediately handle them
174
+ and show in the gallery. Return (target_dir, image_paths).
175
+ If nothing is uploaded, returns "None" and empty list.
176
+ """
177
+ if not input_video and not input_images:
178
+ return None, None, None, None
179
+ target_dir, image_paths = handle_uploads(input_video, input_images)
180
+ return None, target_dir, image_paths, "Upload complete. Click 'Reconstruct' to begin 3D processing."
181
+
182
+
183
+ # -------------------------------------------------------------------------
184
+ # 4) Reconstruction: uses the target_dir plus any viz parameters
185
+ # -------------------------------------------------------------------------
186
+ def gradio_demo(
187
+ target_dir,
188
+ conf_thres=3.0,
189
+ frame_filter="All",
190
+ mask_black_bg=False,
191
+ mask_white_bg=False,
192
+ show_cam=True,
193
+ mask_sky=False,
194
+ prediction_mode="Pointmap Regression",
195
+ ):
196
+ """
197
+ Perform reconstruction using the already-created target_dir/images.
198
+ """
199
+ if not os.path.isdir(target_dir) or target_dir == "None":
200
+ return None, "No valid target directory found. Please upload first.", None, None
201
+
202
+ start_time = time.time()
203
+ gc.collect()
204
+ torch.cuda.empty_cache()
205
+
206
+ # Prepare frame_filter dropdown
207
+ target_dir_images = os.path.join(target_dir, "images")
208
+ all_files = sorted(os.listdir(target_dir_images)) if os.path.isdir(target_dir_images) else []
209
+ all_files = [f"{i}: {filename}" for i, filename in enumerate(all_files)]
210
+ frame_filter_choices = ["All"] + all_files
211
+
212
+ print("Running run_model...")
213
+ with torch.no_grad():
214
+ predictions = run_model(target_dir, model)
215
+
216
+ # Save predictions
217
+ prediction_save_path = os.path.join(target_dir, "predictions.npz")
218
+ np.savez(prediction_save_path, **predictions)
219
+
220
+ # Handle None frame_filter
221
+ if frame_filter is None:
222
+ frame_filter = "All"
223
+
224
+ # Build a GLB file name
225
+ glbfile = os.path.join(
226
+ target_dir,
227
+ f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_maskb{mask_black_bg}_maskw{mask_white_bg}_cam{show_cam}_sky{mask_sky}_pred{prediction_mode.replace(' ', '_')}.glb",
228
+ )
229
+
230
+ # Convert predictions to GLB
231
+ glbscene = predictions_to_glb(
232
+ predictions,
233
+ conf_thres=conf_thres,
234
+ filter_by_frames=frame_filter,
235
+ mask_black_bg=mask_black_bg,
236
+ mask_white_bg=mask_white_bg,
237
+ show_cam=show_cam,
238
+ mask_sky=mask_sky,
239
+ target_dir=target_dir,
240
+ prediction_mode=prediction_mode,
241
+ )
242
+ glbscene.export(file_obj=glbfile)
243
+
244
+ # Cleanup
245
+ del predictions
246
+ gc.collect()
247
+ torch.cuda.empty_cache()
248
+
249
+ end_time = time.time()
250
+ print(f"Total time: {end_time - start_time:.2f} seconds (including IO)")
251
+ log_msg = f"Reconstruction Success ({len(all_files)} frames). Waiting for visualization."
252
+
253
+ return glbfile, log_msg, gr.Dropdown(choices=frame_filter_choices, value=frame_filter, interactive=True)
254
+
255
+
256
+ # -------------------------------------------------------------------------
257
+ # 5) Helper functions for UI resets + re-visualization
258
+ # -------------------------------------------------------------------------
259
+ def clear_fields():
260
+ """
261
+ Clears the 3D viewer, the stored target_dir, and empties the gallery.
262
+ """
263
+ return None
264
+
265
+
266
+ def update_log():
267
+ """
268
+ Display a quick log message while waiting.
269
+ """
270
+ return "Loading and Reconstructing..."
271
+
272
+
273
+ def update_visualization(
274
+ target_dir, conf_thres, frame_filter, mask_black_bg, mask_white_bg, show_cam, mask_sky, prediction_mode, is_example
275
+ ):
276
+ """
277
+ Reload saved predictions from npz, create (or reuse) the GLB for new parameters,
278
+ and return it for the 3D viewer. If is_example == "True", skip.
279
+ """
280
+
281
+ # If it's an example click, skip as requested
282
+ if is_example == "True":
283
+ return None, "No reconstruction available. Please click the Reconstruct button first."
284
+
285
+ if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
286
+ return None, "No reconstruction available. Please click the Reconstruct button first."
287
+
288
+ predictions_path = os.path.join(target_dir, "predictions.npz")
289
+ if not os.path.exists(predictions_path):
290
+ return None, f"No reconstruction available at {predictions_path}. Please run 'Reconstruct' first."
291
+
292
+ key_list = [
293
+ "pose_enc",
294
+ "depth",
295
+ "depth_conf",
296
+ "world_points",
297
+ "world_points_conf",
298
+ "images",
299
+ "extrinsic",
300
+ "intrinsic",
301
+ "world_points_from_depth",
302
+ ]
303
+
304
+ loaded = np.load(predictions_path)
305
+ predictions = {key: np.array(loaded[key]) for key in key_list}
306
+
307
+ glbfile = os.path.join(
308
+ target_dir,
309
+ f"glbscene_{conf_thres}_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_maskb{mask_black_bg}_maskw{mask_white_bg}_cam{show_cam}_sky{mask_sky}_pred{prediction_mode.replace(' ', '_')}.glb",
310
+ )
311
+
312
+ if not os.path.exists(glbfile):
313
+ glbscene = predictions_to_glb(
314
+ predictions,
315
+ conf_thres=conf_thres,
316
+ filter_by_frames=frame_filter,
317
+ mask_black_bg=mask_black_bg,
318
+ mask_white_bg=mask_white_bg,
319
+ show_cam=show_cam,
320
+ mask_sky=mask_sky,
321
+ target_dir=target_dir,
322
+ prediction_mode=prediction_mode,
323
+ )
324
+ glbscene.export(file_obj=glbfile)
325
+
326
+ return glbfile, "Updating Visualization"
327
+
328
+
329
+ # -------------------------------------------------------------------------
330
+ # Example images
331
+ # -------------------------------------------------------------------------
332
+
333
+ great_wall_video = "examples/videos/great_wall.mp4"
334
+ colosseum_video = "examples/videos/Colosseum.mp4"
335
+ room_video = "examples/videos/room.mp4"
336
+ kitchen_video = "examples/videos/kitchen.mp4"
337
+ fern_video = "examples/videos/fern.mp4"
338
+ single_cartoon_video = "examples/videos/single_cartoon.mp4"
339
+ single_oil_painting_video = "examples/videos/single_oil_painting.mp4"
340
+ pyramid_video = "examples/videos/pyramid.mp4"
341
+
342
+
343
+ # -------------------------------------------------------------------------
344
+ # 6) Build Gradio UI
345
+ # -------------------------------------------------------------------------
346
+ theme = gr.themes.Ocean()
347
+ theme.set(
348
+ checkbox_label_background_fill_selected="*button_primary_background_fill",
349
+ checkbox_label_text_color_selected="*button_primary_text_color",
350
+ )
351
+
352
+ with gr.Blocks(
353
+ theme=theme,
354
+ css="""
355
+ .custom-log * {
356
+ font-style: italic;
357
+ font-size: 22px !important;
358
+ background-image: linear-gradient(120deg, #0ea5e9 0%, #6ee7b7 60%, #34d399 100%);
359
+ -webkit-background-clip: text;
360
+ background-clip: text;
361
+ font-weight: bold !important;
362
+ color: transparent !important;
363
+ text-align: center !important;
364
+ }
365
+
366
+ .example-log * {
367
+ font-style: italic;
368
+ font-size: 16px !important;
369
+ background-image: linear-gradient(120deg, #0ea5e9 0%, #6ee7b7 60%, #34d399 100%);
370
+ -webkit-background-clip: text;
371
+ background-clip: text;
372
+ color: transparent !important;
373
+ }
374
+
375
+ #my_radio .wrap {
376
+ display: flex;
377
+ flex-wrap: nowrap;
378
+ justify-content: center;
379
+ align-items: center;
380
+ }
381
+
382
+ #my_radio .wrap label {
383
+ display: flex;
384
+ width: 50%;
385
+ justify-content: center;
386
+ align-items: center;
387
+ margin: 0;
388
+ padding: 10px 0;
389
+ box-sizing: border-box;
390
+ }
391
+ """,
392
+ ) as demo:
393
+ # Instead of gr.State, we use a hidden Textbox:
394
+ is_example = gr.Textbox(label="is_example", visible=False, value="None")
395
+ num_images = gr.Textbox(label="num_images", visible=False, value="None")
396
+
397
+ gr.HTML(
398
+ """
399
+ <h1>🏛️ VGGT: Visual Geometry Grounded Transformer</h1>
400
+ <p>
401
+ <a href="https://github.com/facebookresearch/vggt">🐙 GitHub Repository</a> |
402
+ <a href="#">Project Page</a>
403
+ </p>
404
+
405
+ <div style="font-size: 16px; line-height: 1.5;">
406
+ <p>Upload a video or a set of images to create a 3D reconstruction of a scene or object. VGGT takes these images and generates a 3D point cloud, along with estimated camera poses.</p>
407
+
408
+ <h3>Getting Started:</h3>
409
+ <ol>
410
+ <li><strong>Upload Your Data:</strong> Use the "Upload Video" or "Upload Images" buttons on the left to provide your input. Videos will be automatically split into individual frames (one frame per second).</li>
411
+ <li><strong>Preview:</strong> Your uploaded images will appear in the gallery on the left.</li>
412
+ <li><strong>Reconstruct:</strong> Click the "Reconstruct" button to start the 3D reconstruction process.</li>
413
+ <li><strong>Visualize:</strong> The 3D reconstruction will appear in the viewer on the right. You can rotate, pan, and zoom to explore the model, and download the GLB file. Note the visualization of 3D points may be slow for a large number of input images.</li>
414
+ <li>
415
+ <strong>Adjust Visualization (Optional):</strong>
416
+ After reconstruction, you can fine-tune the visualization using the options below
417
+ <details style="display:inline;">
418
+ <summary style="display:inline;">(<strong>click to expand</strong>):</summary>
419
+ <ul>
420
+ <li><em>Confidence Threshold:</em> Adjust the filtering of points based on confidence.</li>
421
+ <li><em>Show Points from Frame:</em> Select specific frames to display in the point cloud.</li>
422
+ <li><em>Show Camera:</em> Toggle the display of estimated camera positions.</li>
423
+ <li><em>Filter Sky / Filter Black Background:</em> Remove sky or black-background points.</li>
424
+ <li><em>Select a Prediction Mode:</em> Choose between "Depthmap and Camera Branch" or "Pointmap Branch."</li>
425
+ </ul>
426
+ </details>
427
+ </li>
428
+ </ol>
429
+ <p><strong style="color: #0ea5e9;">Please note:</strong> <span style="color: #0ea5e9; font-weight: bold;">VGGT typically reconstructs a scene in less than 1 second. However, visualizing 3D points may take tens of seconds due to third-party rendering, which are independent of VGGT's processing time. </span></p>
430
+ </div>
431
+ """
432
+ )
433
+
434
+ target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
435
+
436
+ with gr.Row():
437
+ with gr.Column(scale=2):
438
+ input_video = gr.Video(label="Upload Video", interactive=True)
439
+ input_images = gr.File(file_count="multiple", label="Upload Images", interactive=True)
440
+
441
+ image_gallery = gr.Gallery(
442
+ label="Preview",
443
+ columns=4,
444
+ height="300px",
445
+ show_download_button=True,
446
+ object_fit="contain",
447
+ preview=True,
448
+ )
449
+
450
+ with gr.Column(scale=4):
451
+ with gr.Column():
452
+ gr.Markdown("**3D Reconstruction (Point Cloud and Camera Poses)**")
453
+ log_output = gr.Markdown(
454
+ "Please upload a video or images, then click Reconstruct.", elem_classes=["custom-log"]
455
+ )
456
+ reconstruction_output = gr.Model3D(height=520, zoom_speed=0.5, pan_speed=0.5)
457
+
458
+ with gr.Row():
459
+ submit_btn = gr.Button("Reconstruct", scale=1, variant="primary")
460
+ clear_btn = gr.ClearButton(
461
+ [input_video, input_images, reconstruction_output, log_output, target_dir_output, image_gallery],
462
+ scale=1,
463
+ )
464
+
465
+ with gr.Row():
466
+ prediction_mode = gr.Radio(
467
+ ["Depthmap and Camera Branch", "Pointmap Branch"],
468
+ label="Select a Prediction Mode",
469
+ value="Depthmap and Camera Branch",
470
+ scale=1,
471
+ elem_id="my_radio",
472
+ )
473
+
474
+ with gr.Row():
475
+ conf_thres = gr.Slider(minimum=0, maximum=100, value=50, step=0.1, label="Confidence Threshold (%)")
476
+ frame_filter = gr.Dropdown(choices=["All"], value="All", label="Show Points from Frame")
477
+ with gr.Column():
478
+ show_cam = gr.Checkbox(label="Show Camera", value=True)
479
+ mask_sky = gr.Checkbox(label="Filter Sky", value=False)
480
+ mask_black_bg = gr.Checkbox(label="Filter Black Background", value=False)
481
+ mask_white_bg = gr.Checkbox(label="Filter White Background", value=False)
482
+
483
+ # ---------------------- Examples section ----------------------
484
+ examples = [
485
+ [colosseum_video, "22", None, 20.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
486
+ [pyramid_video, "30", None, 35.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
487
+ [single_cartoon_video, "1", None, 15.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
488
+ [single_oil_painting_video, "1", None, 20.0, False, False, True, True, "Depthmap and Camera Branch", "True"],
489
+ [room_video, "8", None, 5.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
490
+ [kitchen_video, "25", None, 50.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
491
+ [fern_video, "20", None, 45.0, False, False, True, False, "Depthmap and Camera Branch", "True"],
492
+ ]
493
+
494
+ def example_pipeline(
495
+ input_video,
496
+ num_images_str,
497
+ input_images,
498
+ conf_thres,
499
+ mask_black_bg,
500
+ mask_white_bg,
501
+ show_cam,
502
+ mask_sky,
503
+ prediction_mode,
504
+ is_example_str,
505
+ ):
506
+ """
507
+ 1) Copy example images to new target_dir
508
+ 2) Reconstruct
509
+ 3) Return model3D + logs + new_dir + updated dropdown + gallery
510
+ We do NOT return is_example. It's just an input.
511
+ """
512
+ target_dir, image_paths = handle_uploads(input_video, input_images)
513
+ # Always use "All" for frame_filter in examples
514
+ frame_filter = "All"
515
+ glbfile, log_msg, dropdown = gradio_demo(
516
+ target_dir, conf_thres, frame_filter, mask_black_bg, mask_white_bg, show_cam, mask_sky, prediction_mode
517
+ )
518
+ return glbfile, log_msg, target_dir, dropdown, image_paths
519
+
520
+ gr.Markdown("Click any row to load an example.", elem_classes=["example-log"])
521
+
522
+ gr.Examples(
523
+ examples=examples,
524
+ inputs=[
525
+ input_video,
526
+ num_images,
527
+ input_images,
528
+ conf_thres,
529
+ mask_black_bg,
530
+ mask_white_bg,
531
+ show_cam,
532
+ mask_sky,
533
+ prediction_mode,
534
+ is_example,
535
+ ],
536
+ outputs=[reconstruction_output, log_output, target_dir_output, frame_filter, image_gallery],
537
+ fn=example_pipeline,
538
+ cache_examples=False,
539
+ examples_per_page=50,
540
+ )
541
+
542
+ # -------------------------------------------------------------------------
543
+ # "Reconstruct" button logic:
544
+ # - Clear fields
545
+ # - Update log
546
+ # - gradio_demo(...) with the existing target_dir
547
+ # - Then set is_example = "False"
548
+ # -------------------------------------------------------------------------
549
+ submit_btn.click(fn=clear_fields, inputs=[], outputs=[reconstruction_output]).then(
550
+ fn=update_log, inputs=[], outputs=[log_output]
551
+ ).then(
552
+ fn=gradio_demo,
553
+ inputs=[
554
+ target_dir_output,
555
+ conf_thres,
556
+ frame_filter,
557
+ mask_black_bg,
558
+ mask_white_bg,
559
+ show_cam,
560
+ mask_sky,
561
+ prediction_mode,
562
+ ],
563
+ outputs=[reconstruction_output, log_output, frame_filter],
564
+ ).then(
565
+ fn=lambda: "False", inputs=[], outputs=[is_example] # set is_example to "False"
566
+ )
567
+
568
+ # -------------------------------------------------------------------------
569
+ # Real-time Visualization Updates
570
+ # -------------------------------------------------------------------------
571
+ conf_thres.change(
572
+ update_visualization,
573
+ [
574
+ target_dir_output,
575
+ conf_thres,
576
+ frame_filter,
577
+ mask_black_bg,
578
+ mask_white_bg,
579
+ show_cam,
580
+ mask_sky,
581
+ prediction_mode,
582
+ is_example,
583
+ ],
584
+ [reconstruction_output, log_output],
585
+ )
586
+ frame_filter.change(
587
+ update_visualization,
588
+ [
589
+ target_dir_output,
590
+ conf_thres,
591
+ frame_filter,
592
+ mask_black_bg,
593
+ mask_white_bg,
594
+ show_cam,
595
+ mask_sky,
596
+ prediction_mode,
597
+ is_example,
598
+ ],
599
+ [reconstruction_output, log_output],
600
+ )
601
+ mask_black_bg.change(
602
+ update_visualization,
603
+ [
604
+ target_dir_output,
605
+ conf_thres,
606
+ frame_filter,
607
+ mask_black_bg,
608
+ mask_white_bg,
609
+ show_cam,
610
+ mask_sky,
611
+ prediction_mode,
612
+ is_example,
613
+ ],
614
+ [reconstruction_output, log_output],
615
+ )
616
+ mask_white_bg.change(
617
+ update_visualization,
618
+ [
619
+ target_dir_output,
620
+ conf_thres,
621
+ frame_filter,
622
+ mask_black_bg,
623
+ mask_white_bg,
624
+ show_cam,
625
+ mask_sky,
626
+ prediction_mode,
627
+ is_example,
628
+ ],
629
+ [reconstruction_output, log_output],
630
+ )
631
+ show_cam.change(
632
+ update_visualization,
633
+ [
634
+ target_dir_output,
635
+ conf_thres,
636
+ frame_filter,
637
+ mask_black_bg,
638
+ mask_white_bg,
639
+ show_cam,
640
+ mask_sky,
641
+ prediction_mode,
642
+ is_example,
643
+ ],
644
+ [reconstruction_output, log_output],
645
+ )
646
+ mask_sky.change(
647
+ update_visualization,
648
+ [
649
+ target_dir_output,
650
+ conf_thres,
651
+ frame_filter,
652
+ mask_black_bg,
653
+ mask_white_bg,
654
+ show_cam,
655
+ mask_sky,
656
+ prediction_mode,
657
+ is_example,
658
+ ],
659
+ [reconstruction_output, log_output],
660
+ )
661
+ prediction_mode.change(
662
+ update_visualization,
663
+ [
664
+ target_dir_output,
665
+ conf_thres,
666
+ frame_filter,
667
+ mask_black_bg,
668
+ mask_white_bg,
669
+ show_cam,
670
+ mask_sky,
671
+ prediction_mode,
672
+ is_example,
673
+ ],
674
+ [reconstruction_output, log_output],
675
+ )
676
+
677
+ # -------------------------------------------------------------------------
678
+ # Auto-update gallery whenever user uploads or changes their files
679
+ # -------------------------------------------------------------------------
680
+ input_video.change(
681
+ fn=update_gallery_on_upload,
682
+ inputs=[input_video, input_images],
683
+ outputs=[reconstruction_output, target_dir_output, image_gallery, log_output],
684
+ )
685
+ input_images.change(
686
+ fn=update_gallery_on_upload,
687
+ inputs=[input_video, input_images],
688
+ outputs=[reconstruction_output, target_dir_output, image_gallery, log_output],
689
+ )
690
+
691
+ demo.queue(max_size=20).launch(show_error=True, share=True)
demo_viser.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import os
8
+ import glob
9
+ import time
10
+ import threading
11
+ import argparse
12
+ from typing import List, Optional
13
+
14
+ import numpy as np
15
+ import torch
16
+ from tqdm.auto import tqdm
17
+ import viser
18
+ import viser.transforms as viser_tf
19
+ import cv2
20
+
21
+
22
+ try:
23
+ import onnxruntime
24
+ except ImportError:
25
+ print("onnxruntime not found. Sky segmentation may not work.")
26
+
27
+ from visual_util import segment_sky, download_file_from_url
28
+ from vggt.models.vggt import VGGT
29
+ from vggt.utils.load_fn import load_and_preprocess_images
30
+ from vggt.utils.geometry import closed_form_inverse_se3, unproject_depth_map_to_point_map
31
+ from vggt.utils.pose_enc import pose_encoding_to_extri_intri
32
+
33
+
34
+ def viser_wrapper(
35
+ pred_dict: dict,
36
+ port: int = 8080,
37
+ init_conf_threshold: float = 50.0, # represents percentage (e.g., 50 means filter lowest 50%)
38
+ use_point_map: bool = False,
39
+ background_mode: bool = False,
40
+ mask_sky: bool = False,
41
+ image_folder: str = None,
42
+ ):
43
+ """
44
+ Visualize predicted 3D points and camera poses with viser.
45
+
46
+ Args:
47
+ pred_dict (dict):
48
+ {
49
+ "images": (S, 3, H, W) - Input images,
50
+ "world_points": (S, H, W, 3),
51
+ "world_points_conf": (S, H, W),
52
+ "depth": (S, H, W, 1),
53
+ "depth_conf": (S, H, W),
54
+ "extrinsic": (S, 3, 4),
55
+ "intrinsic": (S, 3, 3),
56
+ }
57
+ port (int): Port number for the viser server.
58
+ init_conf_threshold (float): Initial percentage of low-confidence points to filter out.
59
+ use_point_map (bool): Whether to visualize world_points or use depth-based points.
60
+ background_mode (bool): Whether to run the server in background thread.
61
+ mask_sky (bool): Whether to apply sky segmentation to filter out sky points.
62
+ image_folder (str): Path to the folder containing input images.
63
+ """
64
+ print(f"Starting viser server on port {port}")
65
+
66
+ server = viser.ViserServer(host="0.0.0.0", port=port)
67
+ server.gui.configure_theme(titlebar_content=None, control_layout="collapsible")
68
+
69
+ # Unpack prediction dict
70
+ images = pred_dict["images"] # (S, 3, H, W)
71
+ world_points_map = pred_dict["world_points"] # (S, H, W, 3)
72
+ conf_map = pred_dict["world_points_conf"] # (S, H, W)
73
+
74
+ depth_map = pred_dict["depth"] # (S, H, W, 1)
75
+ depth_conf = pred_dict["depth_conf"] # (S, H, W)
76
+
77
+ extrinsics_cam = pred_dict["extrinsic"] # (S, 3, 4)
78
+ intrinsics_cam = pred_dict["intrinsic"] # (S, 3, 3)
79
+
80
+ # Compute world points from depth if not using the precomputed point map
81
+ if not use_point_map:
82
+ world_points = unproject_depth_map_to_point_map(depth_map, extrinsics_cam, intrinsics_cam)
83
+ conf = depth_conf
84
+ else:
85
+ world_points = world_points_map
86
+ conf = conf_map
87
+
88
+ # Apply sky segmentation if enabled
89
+ if mask_sky and image_folder is not None:
90
+ conf = apply_sky_segmentation(conf, image_folder)
91
+
92
+ # Convert images from (S, 3, H, W) to (S, H, W, 3)
93
+ # Then flatten everything for the point cloud
94
+ colors = images.transpose(0, 2, 3, 1) # now (S, H, W, 3)
95
+ S, H, W, _ = world_points.shape
96
+
97
+ # Flatten
98
+ points = world_points.reshape(-1, 3)
99
+ colors_flat = (colors.reshape(-1, 3) * 255).astype(np.uint8)
100
+ conf_flat = conf.reshape(-1)
101
+
102
+ cam_to_world_mat = closed_form_inverse_se3(extrinsics_cam) # shape (S, 4, 4) typically
103
+ # For convenience, we store only (3,4) portion
104
+ cam_to_world = cam_to_world_mat[:, :3, :]
105
+
106
+ # Compute scene center and recenter
107
+ scene_center = np.mean(points, axis=0)
108
+ points_centered = points - scene_center
109
+ cam_to_world[..., -1] -= scene_center
110
+
111
+ # Store frame indices so we can filter by frame
112
+ frame_indices = np.repeat(np.arange(S), H * W)
113
+
114
+ # Build the viser GUI
115
+ gui_show_frames = server.gui.add_checkbox("Show Cameras", initial_value=True)
116
+
117
+ # Now the slider represents percentage of points to filter out
118
+ gui_points_conf = server.gui.add_slider(
119
+ "Confidence Percent", min=0, max=100, step=0.1, initial_value=init_conf_threshold
120
+ )
121
+
122
+ gui_frame_selector = server.gui.add_dropdown(
123
+ "Show Points from Frames", options=["All"] + [str(i) for i in range(S)], initial_value="All"
124
+ )
125
+
126
+ # Create the main point cloud handle
127
+ # Compute the threshold value as the given percentile
128
+ init_threshold_val = np.percentile(conf_flat, init_conf_threshold)
129
+ init_conf_mask = (conf_flat >= init_threshold_val) & (conf_flat > 0.1)
130
+ point_cloud = server.scene.add_point_cloud(
131
+ name="viser_pcd",
132
+ points=points_centered[init_conf_mask],
133
+ colors=colors_flat[init_conf_mask],
134
+ point_size=0.001,
135
+ point_shape="circle",
136
+ )
137
+
138
+ # We will store references to frames & frustums so we can toggle visibility
139
+ frames: List[viser.FrameHandle] = []
140
+ frustums: List[viser.CameraFrustumHandle] = []
141
+
142
+ def visualize_frames(extrinsics: np.ndarray, images_: np.ndarray) -> None:
143
+ """
144
+ Add camera frames and frustums to the scene.
145
+ extrinsics: (S, 3, 4)
146
+ images_: (S, 3, H, W)
147
+ """
148
+ # Clear any existing frames or frustums
149
+ for f in frames:
150
+ f.remove()
151
+ frames.clear()
152
+ for fr in frustums:
153
+ fr.remove()
154
+ frustums.clear()
155
+
156
+ # Optionally attach a callback that sets the viewpoint to the chosen camera
157
+ def attach_callback(frustum: viser.CameraFrustumHandle, frame: viser.FrameHandle) -> None:
158
+ @frustum.on_click
159
+ def _(_) -> None:
160
+ for client in server.get_clients().values():
161
+ client.camera.wxyz = frame.wxyz
162
+ client.camera.position = frame.position
163
+
164
+ img_ids = range(S)
165
+ for img_id in tqdm(img_ids):
166
+ cam2world_3x4 = extrinsics[img_id]
167
+ T_world_camera = viser_tf.SE3.from_matrix(cam2world_3x4)
168
+
169
+ # Add a small frame axis
170
+ frame_axis = server.scene.add_frame(
171
+ f"frame_{img_id}",
172
+ wxyz=T_world_camera.rotation().wxyz,
173
+ position=T_world_camera.translation(),
174
+ axes_length=0.05,
175
+ axes_radius=0.002,
176
+ origin_radius=0.002,
177
+ )
178
+ frames.append(frame_axis)
179
+
180
+ # Convert the image for the frustum
181
+ img = images_[img_id] # shape (3, H, W)
182
+ img = (img.transpose(1, 2, 0) * 255).astype(np.uint8)
183
+ h, w = img.shape[:2]
184
+
185
+ # If you want correct FOV from intrinsics, do something like:
186
+ # fx = intrinsics_cam[img_id, 0, 0]
187
+ # fov = 2 * np.arctan2(h/2, fx)
188
+ # For demonstration, we pick a simple approximate FOV:
189
+ fy = 1.1 * h
190
+ fov = 2 * np.arctan2(h / 2, fy)
191
+
192
+ # Add the frustum
193
+ frustum_cam = server.scene.add_camera_frustum(
194
+ f"frame_{img_id}/frustum", fov=fov, aspect=w / h, scale=0.05, image=img, line_width=1.0
195
+ )
196
+ frustums.append(frustum_cam)
197
+ attach_callback(frustum_cam, frame_axis)
198
+
199
+ def update_point_cloud() -> None:
200
+ """Update the point cloud based on current GUI selections."""
201
+ # Here we compute the threshold value based on the current percentage
202
+ current_percentage = gui_points_conf.value
203
+ threshold_val = np.percentile(conf_flat, current_percentage)
204
+
205
+ print(f"Threshold absolute value: {threshold_val}, percentage: {current_percentage}%")
206
+
207
+ conf_mask = (conf_flat >= threshold_val) & (conf_flat > 1e-5)
208
+
209
+ if gui_frame_selector.value == "All":
210
+ frame_mask = np.ones_like(conf_mask, dtype=bool)
211
+ else:
212
+ selected_idx = int(gui_frame_selector.value)
213
+ frame_mask = frame_indices == selected_idx
214
+
215
+ combined_mask = conf_mask & frame_mask
216
+ point_cloud.points = points_centered[combined_mask]
217
+ point_cloud.colors = colors_flat[combined_mask]
218
+
219
+ @gui_points_conf.on_update
220
+ def _(_) -> None:
221
+ update_point_cloud()
222
+
223
+ @gui_frame_selector.on_update
224
+ def _(_) -> None:
225
+ update_point_cloud()
226
+
227
+ @gui_show_frames.on_update
228
+ def _(_) -> None:
229
+ """Toggle visibility of camera frames and frustums."""
230
+ for f in frames:
231
+ f.visible = gui_show_frames.value
232
+ for fr in frustums:
233
+ fr.visible = gui_show_frames.value
234
+
235
+ # Add the camera frames to the scene
236
+ visualize_frames(cam_to_world, images)
237
+
238
+ print("Starting viser server...")
239
+ # If background_mode is True, spawn a daemon thread so the main thread can continue.
240
+ if background_mode:
241
+
242
+ def server_loop():
243
+ while True:
244
+ time.sleep(0.001)
245
+
246
+ thread = threading.Thread(target=server_loop, daemon=True)
247
+ thread.start()
248
+ else:
249
+ while True:
250
+ time.sleep(0.01)
251
+
252
+ return server
253
+
254
+
255
+ # Helper functions for sky segmentation
256
+
257
+
258
+ def apply_sky_segmentation(conf: np.ndarray, image_folder: str) -> np.ndarray:
259
+ """
260
+ Apply sky segmentation to confidence scores.
261
+
262
+ Args:
263
+ conf (np.ndarray): Confidence scores with shape (S, H, W)
264
+ image_folder (str): Path to the folder containing input images
265
+
266
+ Returns:
267
+ np.ndarray: Updated confidence scores with sky regions masked out
268
+ """
269
+ S, H, W = conf.shape
270
+ sky_masks_dir = image_folder.rstrip("/") + "_sky_masks"
271
+ os.makedirs(sky_masks_dir, exist_ok=True)
272
+
273
+ # Download skyseg.onnx if it doesn't exist
274
+ if not os.path.exists("skyseg.onnx"):
275
+ print("Downloading skyseg.onnx...")
276
+ download_file_from_url("https://huggingface.co/JianyuanWang/skyseg/resolve/main/skyseg.onnx", "skyseg.onnx")
277
+
278
+ skyseg_session = onnxruntime.InferenceSession("skyseg.onnx")
279
+ image_files = sorted(glob.glob(os.path.join(image_folder, "*")))
280
+ sky_mask_list = []
281
+
282
+ print("Generating sky masks...")
283
+ for i, image_path in enumerate(tqdm(image_files[:S])): # Limit to the number of images in the batch
284
+ image_name = os.path.basename(image_path)
285
+ mask_filepath = os.path.join(sky_masks_dir, image_name)
286
+
287
+ if os.path.exists(mask_filepath):
288
+ sky_mask = cv2.imread(mask_filepath, cv2.IMREAD_GRAYSCALE)
289
+ else:
290
+ sky_mask = segment_sky(image_path, skyseg_session, mask_filepath)
291
+
292
+ # Resize mask to match H×W if needed
293
+ if sky_mask.shape[0] != H or sky_mask.shape[1] != W:
294
+ sky_mask = cv2.resize(sky_mask, (W, H))
295
+
296
+ sky_mask_list.append(sky_mask)
297
+
298
+ # Convert list to numpy array with shape S×H×W
299
+ sky_mask_array = np.array(sky_mask_list)
300
+ # Apply sky mask to confidence scores
301
+ sky_mask_binary = (sky_mask_array > 0.1).astype(np.float32)
302
+ conf = conf * sky_mask_binary
303
+
304
+ print("Sky segmentation applied successfully")
305
+ return conf
306
+
307
+
308
+ parser = argparse.ArgumentParser(description="VGGT demo with viser for 3D visualization")
309
+ parser.add_argument(
310
+ "--image_folder", type=str, default="examples/kitchen/images/", help="Path to folder containing images"
311
+ )
312
+ parser.add_argument("--use_point_map", action="store_true", help="Use point map instead of depth-based points")
313
+ parser.add_argument("--background_mode", action="store_true", help="Run the viser server in background mode")
314
+ parser.add_argument("--port", type=int, default=8080, help="Port number for the viser server")
315
+ parser.add_argument(
316
+ "--conf_threshold", type=float, default=25.0, help="Initial percentage of low-confidence points to filter out"
317
+ )
318
+ parser.add_argument("--mask_sky", action="store_true", help="Apply sky segmentation to filter out sky points")
319
+
320
+
321
+ def main():
322
+ """
323
+ Main function for the VGGT demo with viser for 3D visualization.
324
+
325
+ This function:
326
+ 1. Loads the VGGT model
327
+ 2. Processes input images from the specified folder
328
+ 3. Runs inference to generate 3D points and camera poses
329
+ 4. Optionally applies sky segmentation to filter out sky points
330
+ 5. Visualizes the results using viser
331
+
332
+ Command-line arguments:
333
+ --image_folder: Path to folder containing input images
334
+ --use_point_map: Use point map instead of depth-based points
335
+ --background_mode: Run the viser server in background mode
336
+ --port: Port number for the viser server
337
+ --conf_threshold: Initial percentage of low-confidence points to filter out
338
+ --mask_sky: Apply sky segmentation to filter out sky points
339
+ """
340
+ args = parser.parse_args()
341
+ device = "cuda" if torch.cuda.is_available() else "cpu"
342
+ print(f"Using device: {device}")
343
+
344
+ print("Initializing and loading VGGT model...")
345
+ # model = VGGT.from_pretrained("facebook/VGGT-1B")
346
+
347
+ model = VGGT()
348
+ _URL = "https://huggingface.co/facebook/VGGT-1B/resolve/main/model.pt"
349
+ model.load_state_dict(torch.hub.load_state_dict_from_url(_URL))
350
+
351
+ model.eval()
352
+ model = model.to(device)
353
+
354
+ # Use the provided image folder path
355
+ print(f"Loading images from {args.image_folder}...")
356
+ image_names = glob.glob(os.path.join(args.image_folder, "*"))
357
+ print(f"Found {len(image_names)} images")
358
+
359
+ images = load_and_preprocess_images(image_names).to(device)
360
+ print(f"Preprocessed images shape: {images.shape}")
361
+
362
+ print("Running inference...")
363
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
364
+
365
+ with torch.no_grad():
366
+ with torch.cuda.amp.autocast(dtype=dtype):
367
+ predictions = model(images)
368
+
369
+ print("Converting pose encoding to extrinsic and intrinsic matrices...")
370
+ extrinsic, intrinsic = pose_encoding_to_extri_intri(predictions["pose_enc"], images.shape[-2:])
371
+ predictions["extrinsic"] = extrinsic
372
+ predictions["intrinsic"] = intrinsic
373
+
374
+ print("Processing model outputs...")
375
+ for key in predictions.keys():
376
+ if isinstance(predictions[key], torch.Tensor):
377
+ predictions[key] = predictions[key].cpu().numpy().squeeze(0) # remove batch dimension and convert to numpy
378
+
379
+ if args.use_point_map:
380
+ print("Visualizing 3D points from point map")
381
+ else:
382
+ print("Visualizing 3D points by unprojecting depth map by cameras")
383
+
384
+ if args.mask_sky:
385
+ print("Sky segmentation enabled - will filter out sky points")
386
+
387
+ print("Starting viser visualization...")
388
+
389
+ viser_server = viser_wrapper(
390
+ predictions,
391
+ port=args.port,
392
+ init_conf_threshold=args.conf_threshold,
393
+ use_point_map=args.use_point_map,
394
+ background_mode=args.background_mode,
395
+ mask_sky=args.mask_sky,
396
+ image_folder=args.image_folder,
397
+ )
398
+ print("Visualization complete")
399
+
400
+
401
+ if __name__ == "__main__":
402
+ main()
docs/package.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Alternative Installation Methods
2
+
3
+ This document explains how to install VGGT as a package using different package managers.
4
+
5
+ ## Prerequisites
6
+
7
+ Before installing VGGT as a package, you need to install PyTorch and torchvision. We don't list these as dependencies to avoid CUDA version mismatches. Install them first, with an example as:
8
+
9
+ ```bash
10
+ # install pytorch 2.3.1 with cuda 12.1
11
+ pip install torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu121
12
+ ```
13
+
14
+ ## Installation Options
15
+
16
+ ### Install with pip
17
+
18
+ The simplest way to install VGGT is using pip:
19
+
20
+ ```bash
21
+ pip install -e .
22
+ ```
23
+
24
+ ### Install and run with pixi
25
+
26
+ [Pixi](https://pixi.sh) is a package management tool for creating reproducible environments.
27
+
28
+ 1. First, [download and install pixi](https://pixi.sh/latest/get_started/)
29
+ 2. Then run:
30
+
31
+ ```bash
32
+ pixi run -e python demo_gradio.py
33
+ ```
34
+
35
+ ### Install and run with uv
36
+
37
+ [uv](https://docs.astral.sh/uv/) is a fast Python package installer and resolver.
38
+
39
+ 1. First, [install uv](https://docs.astral.sh/uv/getting-started/installation/)
40
+ 2. Then run:
41
+
42
+ ```bash
43
+ uv run --extra demo demo_gradio.py
44
+ ```
45
+
examples/kitchen/images/00.png ADDED

Git LFS Details

  • SHA256: 54527a575988094058cdc1975b421c48e0f446726473d0ac21ea55ecb24e96a7
  • Pointer size: 131 Bytes
  • Size of remote file: 691 kB
examples/kitchen/images/01.png ADDED

Git LFS Details

  • SHA256: 0ad4c6d74c16661ed427f8100124aaf53e7fd0577b32c362f13559dfad7027a7
  • Pointer size: 131 Bytes
  • Size of remote file: 726 kB
examples/kitchen/images/02.png ADDED

Git LFS Details

  • SHA256: 596bd54d26f889fc80cedee81d95dda709fa134d86ac199b6509337e413246d5
  • Pointer size: 131 Bytes
  • Size of remote file: 789 kB
examples/kitchen/images/03.png ADDED

Git LFS Details

  • SHA256: 78193756310d9abaf81fa28902cf0b284260a0a916b085a7c08a4723eead1dd6
  • Pointer size: 131 Bytes
  • Size of remote file: 828 kB
examples/kitchen/images/04.png ADDED

Git LFS Details

  • SHA256: ca551254002a318228e19e46982813f3e489828796e98547ff632043f3002f9d
  • Pointer size: 131 Bytes
  • Size of remote file: 724 kB
examples/kitchen/images/05.png ADDED

Git LFS Details

  • SHA256: a8dcd116d782d32b404d7e4aa69f462abbd048a0d8727440ec37f18cc4548ee4
  • Pointer size: 131 Bytes
  • Size of remote file: 759 kB
examples/kitchen/images/06.png ADDED

Git LFS Details

  • SHA256: 2fcc2b871c6fef6f3a3e0f06a3ffc1f0eee3e40afa2461f7c7c665057decb3e6
  • Pointer size: 131 Bytes
  • Size of remote file: 674 kB
examples/kitchen/images/07.png ADDED

Git LFS Details

  • SHA256: 28d21898de0e6370790839a40f7f45d84fbb3e6ff5809f0a0e14bd01bdef730e
  • Pointer size: 131 Bytes
  • Size of remote file: 856 kB
examples/kitchen/images/08.png ADDED

Git LFS Details

  • SHA256: 0137a2bb3eb3e691d8d8b1f8884a9c8f99748888b1db770091d7acdf35fe8efa
  • Pointer size: 131 Bytes
  • Size of remote file: 677 kB
examples/kitchen/images/09.png ADDED

Git LFS Details

  • SHA256: 1ab59c1ef85d8169b404463f01b7ae4d287da12677126b68a3dce407ca2b9077
  • Pointer size: 131 Bytes
  • Size of remote file: 797 kB
examples/kitchen/images/10.png ADDED

Git LFS Details

  • SHA256: f180cbf110bc65b89ad616328ad7d076dc3901a18def4b1337a134cdf65233a0
  • Pointer size: 131 Bytes
  • Size of remote file: 730 kB
examples/kitchen/images/11.png ADDED

Git LFS Details

  • SHA256: 781196eadae8d907928e877e073289c0998e2b9e513d4f7580e147d15d1ae571
  • Pointer size: 131 Bytes
  • Size of remote file: 799 kB
examples/kitchen/images/12.png ADDED

Git LFS Details

  • SHA256: dd59b24dc8962ba0fc7fbb37b53a6d76fec9730c74e7e3235a06902b250e7d44
  • Pointer size: 131 Bytes
  • Size of remote file: 707 kB
examples/kitchen/images/13.png ADDED

Git LFS Details

  • SHA256: b4cd39f22c766477bad741ff37a1ee5f71aecde8bb6762d869b4c9dca1ceacfb
  • Pointer size: 131 Bytes
  • Size of remote file: 755 kB
examples/kitchen/images/14.png ADDED

Git LFS Details

  • SHA256: 5df1f398efc144271e342d7b65447e022a100b93b3850a755fbc66aff5fca0f2
  • Pointer size: 131 Bytes
  • Size of remote file: 642 kB
examples/kitchen/images/15.png ADDED

Git LFS Details

  • SHA256: 325262829ddb11d1c7df1a8f1fef79a297332dad51870ab0d40a73f1dd6869b1
  • Pointer size: 131 Bytes
  • Size of remote file: 639 kB
examples/kitchen/images/16.png ADDED

Git LFS Details

  • SHA256: 9779a78d72fc25f2118a270f060afeacbcef149a4f012119ff041effa8727cbf
  • Pointer size: 131 Bytes
  • Size of remote file: 754 kB
examples/kitchen/images/17.png ADDED

Git LFS Details

  • SHA256: 2549f4f505ea021eebe0bf579b969b6c162d2dee18b0c8e9d7a3c043d200e45b
  • Pointer size: 131 Bytes
  • Size of remote file: 774 kB
examples/kitchen/images/18.png ADDED

Git LFS Details

  • SHA256: e1c21131c4732756d5774dd732af86c1d39dea96fd2d613afd570633b3a76ef6
  • Pointer size: 131 Bytes
  • Size of remote file: 829 kB
examples/kitchen/images/19.png ADDED

Git LFS Details

  • SHA256: d17680e77c6cb326eb4604e29f9e532db34769ca20b938e944ab53e8bd3798e2
  • Pointer size: 131 Bytes
  • Size of remote file: 678 kB
examples/kitchen/images/20.png ADDED

Git LFS Details

  • SHA256: 5e9c835a0e0c1bc162a8bff6b93677c58cb53afaadca260b0ca2a388565b4cc2
  • Pointer size: 131 Bytes
  • Size of remote file: 718 kB
examples/kitchen/images/21.png ADDED

Git LFS Details

  • SHA256: 0747b2d1b44ef538a9aa40a067881ef9d3ed5cacbf954c926a2bdf5f29c114e6
  • Pointer size: 131 Bytes
  • Size of remote file: 787 kB
examples/kitchen/images/22.png ADDED

Git LFS Details

  • SHA256: 77a0014d7c7d5802ce23cda4e102759274fd8f4c150271a3b61cbb2fe33b69b6
  • Pointer size: 131 Bytes
  • Size of remote file: 675 kB
examples/kitchen/images/23.png ADDED

Git LFS Details

  • SHA256: 1a9415e9b8f08ff298829ffac779bb1e8dedccb3bf36060d59a7da2a35c4f790
  • Pointer size: 131 Bytes
  • Size of remote file: 652 kB
examples/kitchen/images/24.png ADDED

Git LFS Details

  • SHA256: 5199003307466bf4706a0898f139bf3590946f255d08c6b11d5aa9eede54c83a
  • Pointer size: 131 Bytes
  • Size of remote file: 800 kB
examples/llff_fern/images/000.png ADDED

Git LFS Details

  • SHA256: 47f447d31a84d53494045087cbb8a40b877a68a76f549af14f6bb6f490a5b05d
  • Pointer size: 131 Bytes
  • Size of remote file: 671 kB
examples/llff_fern/images/001.png ADDED

Git LFS Details

  • SHA256: 05402df1d7247e794768461571c188737dcae5fcb34400990f5751244a3e41c0
  • Pointer size: 131 Bytes
  • Size of remote file: 666 kB
examples/llff_fern/images/002.png ADDED

Git LFS Details

  • SHA256: e17135aa9b506fac24a9529ee56c37ef5a52c55498998d3de64cf3e46210dccc
  • Pointer size: 131 Bytes
  • Size of remote file: 652 kB
examples/llff_fern/images/003.png ADDED

Git LFS Details

  • SHA256: 3285c7cc6b4b75703a68f510072c5eca81cff9b983044426cbe2ca27d4e526c5
  • Pointer size: 131 Bytes
  • Size of remote file: 653 kB
examples/llff_fern/images/004.png ADDED

Git LFS Details

  • SHA256: 9976282206f9aff0fc3eaa3daa182ba93e0c6734c69bdf31f40989641b4f8fea
  • Pointer size: 131 Bytes
  • Size of remote file: 609 kB
examples/llff_fern/images/005.png ADDED

Git LFS Details

  • SHA256: e7ce2bcabcd2b2972c505e2649eb0f5b9efb30adcd455d93f5014370d53f2653
  • Pointer size: 131 Bytes
  • Size of remote file: 633 kB
examples/llff_fern/images/006.png ADDED

Git LFS Details

  • SHA256: 57dfa176d28662655adda9c0c04d6949424ddb3c702f533ddb332543dd1dcbdb
  • Pointer size: 131 Bytes
  • Size of remote file: 634 kB
examples/llff_fern/images/007.png ADDED

Git LFS Details

  • SHA256: da42e3dd198bc0951591b2a5e41bb15fbff18c2aef194d17a6acbf128487749e
  • Pointer size: 131 Bytes
  • Size of remote file: 632 kB
examples/llff_fern/images/008.png ADDED

Git LFS Details

  • SHA256: a8b8ac9860697b9cb1bfe41b358b03aef7a97ecb2fa9af61bc6e11210d99e8be
  • Pointer size: 131 Bytes
  • Size of remote file: 633 kB
examples/llff_fern/images/009.png ADDED

Git LFS Details

  • SHA256: beb825c8fee0b21801bca59ddddf65e560bd87fdc6823ec733cb8e6be05002c9
  • Pointer size: 131 Bytes
  • Size of remote file: 640 kB
examples/llff_fern/images/010.png ADDED

Git LFS Details

  • SHA256: 390054ae2969ce5ed0e8ed9b9c0501c527028b565e18807e0acd5d62a4627dae
  • Pointer size: 131 Bytes
  • Size of remote file: 637 kB
examples/llff_fern/images/011.png ADDED

Git LFS Details

  • SHA256: a3875f648038011549183d4c6a273be3d75e868e7f4971b474089121acbf8d52
  • Pointer size: 131 Bytes
  • Size of remote file: 618 kB
examples/llff_fern/images/012.png ADDED

Git LFS Details

  • SHA256: e003aa530890edaf01a34701e0a83f21d90d41e36e80b91dc5aba9a055a72063
  • Pointer size: 131 Bytes
  • Size of remote file: 647 kB
examples/llff_fern/images/013.png ADDED

Git LFS Details

  • SHA256: c4c393c28985d237c9152b7e053dbd06257904f6415696a8d710c038d9c45885
  • Pointer size: 131 Bytes
  • Size of remote file: 650 kB